output
stringlengths
64
73.2k
input
stringlengths
208
73.3k
instruction
stringclasses
1 value
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 13 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 52 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); _collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); }
#vulnerable code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); this._collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); } #location 15 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 64 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 21 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); _collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); }
#vulnerable code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); this._collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); } #location 21 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void populateBuffer() { // not used yet or returned empty results if (queryDate == null) { queryDate = new Date(); lastTimeResetToNOW = Instant.now(); lastStartOffset = 0; } // been running same query for too long and paging deep? else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) { LOG.info("Reached max start offset {}", lastStartOffset); lastStartOffset = 0; } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( queryDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(lastStartOffset); sourceBuilder.size(maxBucketNum); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } if (StringUtils.isNotBlank(totalSortField)) { sourceBuilder.sort(new FieldSortBuilder(totalSortField) .order(SortOrder.ASC)); } CollapseBuilder collapse = new CollapseBuilder(partitionField); // group expansion -> sends sub queries for each bucket if (maxURLsPerBucket > 1) { InnerHitBuilder ihb = new InnerHitBuilder(); ihb.setSize(maxURLsPerBucket); ihb.setName("urls_per_bucket"); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { List<SortBuilder<?>> sorts = new LinkedList<>(); FieldSortBuilder bucketsorter = SortBuilders.fieldSort( bucketSortField).order(SortOrder.ASC); sorts.add(bucketsorter); ihb.setSorts(sorts); } collapse.setInnerHits(ihb); } sourceBuilder.collapse(collapse); request.source(sourceBuilder); // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); }
#vulnerable code @Override protected void populateBuffer() { // not used yet or returned empty results if (lastDate == null) { lastDate = new Date(); lastStartOffset = 0; } // been running same query for too long and paging deep? else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) { LOG.info("Reached max start offset {}", lastStartOffset); lastStartOffset = 0; } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( lastDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(lastStartOffset); sourceBuilder.size(maxBucketNum); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } if (StringUtils.isNotBlank(totalSortField)) { sourceBuilder.sort(new FieldSortBuilder(totalSortField) .order(SortOrder.ASC)); } CollapseBuilder collapse = new CollapseBuilder(partitionField); // group expansion -> sends sub queries for each bucket if (maxURLsPerBucket > 1) { InnerHitBuilder ihb = new InnerHitBuilder(); ihb.setSize(maxURLsPerBucket); ihb.setName("urls_per_bucket"); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { List<SortBuilder<?>> sorts = new LinkedList<>(); FieldSortBuilder bucketsorter = SortBuilders.fieldSort( bucketSortField).order(SortOrder.ASC); sorts.add(bucketsorter); ihb.setSorts(sorts); } collapse.setInnerHits(ihb); } sourceBuilder.collapse(collapse); request.source(sourceBuilder); // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); } #location 5 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 58 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 60 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeStartESQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } esQueryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInESQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeStartESQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } esQueryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // remove lock isInESQuery.set(false); } #location 17 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info("lastDate reset based on resetFetchDateAfterNSecs {}", resetFetchDateAfterNSecs); queryDate = null; lastStartOffset = 0; } } // no more results? if (numBuckets == 0) { queryDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); } #location 18 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 16 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 7 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 55 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void filter(String url, byte[] content, DocumentFragment doc, ParseResult parse) { // check whether the metadata already contains a lang value // in which case we normalise its value and use it Metadata m = parse.get(url).getMetadata(); String extractedValue = m.getFirstValue(extractedKeyName); if (StringUtils.isNotBlank(extractedValue) && extractedValue.length() > 1) { extractedValue = extractedValue.substring(0, 2).toLowerCase( Locale.ENGLISH); LOG.info("Lang: {} extracted from page for {}", extractedValue, url); m.setValue(mdKey, extractedValue); return; } String text = parse.get(url).getText(); if (StringUtils.isBlank(text)) { return; } TextObject textObject = textObjectFactory.forText(text); synchronized (languageDetector) { List<DetectedLanguage> probs = languageDetector .getProbabilities(textObject); if (probs == null || probs.size() == 0) { return; } for (DetectedLanguage lang : probs) { if (lang.getProbability() >= minProb) { String code = lang.getLocale().getLanguage(); parse.get(url).getMetadata().addValue(mdKey, code); } } } }
#vulnerable code @Override public void filter(String url, byte[] content, DocumentFragment doc, ParseResult parse) { // check whether the metadata already contains a lang value // in which case we might want to skip if (mdSkip != null) { String existingVal = parse.get(url).getMetadata() .getFirstValue(mdSkip); if (StringUtils.isNotBlank(existingVal)) { return; } } String text = parse.get(url).getText(); if (StringUtils.isBlank(text)) { return; } TextObject textObject = textObjectFactory.forText(text); synchronized (languageDetector) { List<DetectedLanguage> probs = languageDetector .getProbabilities(textObject); if (probs == null || probs.size() == 0) { return; } for (DetectedLanguage lang : probs) { if (lang.getProbability() >= minProb) { String code = lang.getLocale().getLanguage(); parse.get(url).getMetadata().addValue(mdKey, code); } } } } #location 9 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); esQueryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); } #location 92 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(queryDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(queryDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = queryDate; } } else { oldDate = queryDate; } if (oldDate != null) { queryDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, queryDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, queryDate, mostRecentDateFound); } } // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); queryDate = null; } } // change the date if we don't get any results at all if (numBuckets == 0) { queryDate = null; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; Aggregations aggregs = response.getAggregations(); if (aggregs == null) { isInQuery.set(false); return; } SingleBucketAggregation sample = aggregs.get("sample"); if (sample != null) { aggregs = sample.getAggregations(); } Terms agg = aggregs.get("partition"); int numhits = 0; int numBuckets = 0; int alreadyprocessed = 0; Date mostRecentDateFound = null; SimpleDateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSSX"); synchronized (buffer) { // For each entry Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg .getBuckets().iterator(); while (iterator.hasNext()) { Terms.Bucket entry = iterator.next(); String key = (String) entry.getKey(); // bucket key long docCount = entry.getDocCount(); // Doc count int hitsForThisBucket = 0; // filter results so that we don't include URLs we are already // being processed TopHits topHits = entry.getAggregations().get("docs"); for (SearchHit hit : topHits.getHits().getHits()) { hitsForThisBucket++; Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString()); // consider only the first document of the last bucket // for optimising the nextFetchDate if (hitsForThisBucket == 1 && !iterator.hasNext()) { String strDate = (String) keyValues .get("nextFetchDate"); try { mostRecentDateFound = formatter.parse(strDate); } catch (ParseException e) { throw new RuntimeException("can't parse date :" + strDate); } } // is already being processed - skip it! if (beingProcessed.containsKey(url)) { alreadyprocessed++; continue; } Metadata metadata = fromKeyValues(keyValues); buffer.add(new Values(url, metadata)); } if (hitsForThisBucket > 0) numBuckets++; numhits += hitsForThisBucket; LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount, alreadyprocessed); } // Shuffle the URLs so that we don't get blocks of URLs from the // same // host or domain Collections.shuffle((List) buffer); } LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed); queryTimes.addMeasurement(timeTaken); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numhits); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; } } // optimise the nextFetchDate by getting the most recent value // returned in the query and add to it, unless the previous value is // within n mins in which case we'll keep it else if (mostRecentDateFound != null && recentDateIncrease >= 0) { Calendar potentialNewDate = Calendar.getInstance(); potentialNewDate.setTime(mostRecentDateFound); potentialNewDate.add(Calendar.MINUTE, recentDateIncrease); Date oldDate = null; // check boundaries if (this.recentDateMinGap > 0) { Calendar low = Calendar.getInstance(); low.setTime(lastDate); low.add(Calendar.MINUTE, -recentDateMinGap); Calendar high = Calendar.getInstance(); high.setTime(lastDate); high.add(Calendar.MINUTE, recentDateMinGap); if (high.before(potentialNewDate) || low.after(potentialNewDate)) { oldDate = lastDate; } } else { oldDate = lastDate; } if (oldDate != null) { lastDate = potentialNewDate.getTime(); LOG.info( "{} lastDate changed from {} to {} based on mostRecentDateFound {}", logIdprefix, oldDate, lastDate, mostRecentDateFound); } else { LOG.info( "{} lastDate kept at {} based on mostRecentDateFound {}", logIdprefix, lastDate, mostRecentDateFound); } } // change the date if we don't get any results at all if (numBuckets == 0) { lastDate = null; } // remove lock isInQuery.set(false); } #location 107 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { SearchHits hits = response.getHits(); LOG.info("{} ES query returned {} hits in {} msec", logIdprefix, hits.getHits().length, response.getTook().getMillis()); hasFinished = hits.getHits().length == 0; synchronized (buffer) { // Unlike standard spouts, the scroll queries should never return // the same // document twice -> no need to look in the buffer or cache for (SearchHit hit : hits) { Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); String status = (String) keyValues.get("status"); String nextFetchDate = (String) keyValues.get("nextFetchDate"); Metadata metadata = fromKeyValues(keyValues); metadata.setValue( AbstractStatusUpdaterBolt.AS_IS_NEXTFETCHDATE_METADATA, nextFetchDate); buffer.add(new Values(url, metadata, Status.valueOf(status))); } } scrollId = response.getScrollId(); // remove lock markQueryReceivedNow(); }
#vulnerable code @Override public void onResponse(SearchResponse response) { SearchHits hits = response.getHits(); LOG.info("{} ES query returned {} hits in {} msec", logIdprefix, hits.getHits().length, response.getTook().getMillis()); hasStarted = true; synchronized (buffer) { // Unlike standard spouts, the scroll queries should never return // the same // document twice -> no need to look in the buffer or cache for (SearchHit hit : hits) { Map<String, Object> keyValues = hit.getSourceAsMap(); String url = (String) keyValues.get("url"); String status = (String) keyValues.get("status"); String nextFetchDate = (String) keyValues.get("nextFetchDate"); Metadata metadata = fromKeyValues(keyValues); metadata.setValue( AbstractStatusUpdaterBolt.AS_IS_NEXTFETCHDATE_METADATA, nextFetchDate); buffer.add(new Values(url, metadata, Status.valueOf(status))); } } scrollId = response.getScrollId(); // remove lock markQueryReceivedNow(); } #location 6 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 24 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Client getClient(Map stormConf, String boltType) { List<String> hosts = new LinkedList<>(); Object addresses = stormConf.get("es." + boltType + ".addresses"); // list if (addresses instanceof PersistentVector) { hosts.addAll((PersistentVector) addresses); } // single value? else { hosts.add(addresses.toString()); } String clustername = ConfUtils.getString(stormConf, "es." + boltType + ".cluster.name", "elasticsearch"); // Use Node client if no host is specified // ES will try to find the cluster automatically // and join it if (hosts.size() == 0) { Node node = org.elasticsearch.node.NodeBuilder .nodeBuilder() .settings( ImmutableSettings.settingsBuilder().put( "http.enabled", false)) .clusterName(clustername).client(true).node(); return node.client(); } // if a transport address has been specified // use the transport client - even if it is localhost Settings settings = ImmutableSettings.settingsBuilder() .put("cluster.name", clustername).build(); TransportClient tc = new TransportClient(settings); for (String host : hosts) { String[] hostPort = host.split(":"); // no port specified? use default one int port = 9300; if (hostPort.length == 2) { port = Integer.parseInt(hostPort[1].trim()); } InetSocketTransportAddress ista = new InetSocketTransportAddress( hostPort[0].trim(), port); tc.addTransportAddress(ista); } return tc; }
#vulnerable code public static Client getClient(Map stormConf, String boltType) { String host = ConfUtils.getString(stormConf, "es." + boltType + ".hostname"); String clustername = ConfUtils.getString(stormConf, "es." + boltType + ".cluster.name", "elasticsearch"); // Use Node client if no host is specified // ES will try to find the cluster automatically // and join it if (StringUtils.isBlank(host)) { Node node = org.elasticsearch.node.NodeBuilder .nodeBuilder() .settings( ImmutableSettings.settingsBuilder().put( "http.enabled", false)) .clusterName(clustername).client(true).node(); return node.client(); } // if a transport address has been specified // use the transport client - even if it is localhost Settings settings = ImmutableSettings.settingsBuilder() .put("cluster.name", clustername).build(); return new TransportClient(settings) .addTransportAddress(new InetSocketTransportAddress(host, 9300)); } #location 26 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 9 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void store(String url, Status status, Metadata metadata, Date nextFetch) throws Exception { // the mysql insert statement String query = tableName + " (url, status, nextfetchdate, metadata, bucket, host)" + " values (?, ?, ?, ?, ?, ?)"; StringBuffer mdAsString = new StringBuffer(); for (String mdKey : metadata.keySet()) { String[] vals = metadata.getValues(mdKey); for (String v : vals) { mdAsString.append("\t").append(mdKey).append("=").append(v); } } int partition = 0; String partitionKey = partitioner.getPartition(url, metadata); if (maxNumBuckets > 1) { // determine which shard to send to based on the host / domain / IP partition = Math.abs(partitionKey.hashCode() % maxNumBuckets); } // create in table if does not already exist if (status.equals(Status.DISCOVERED)) { query = "INSERT IGNORE INTO " + query; } else query = "REPLACE INTO " + query; PreparedStatement preparedStmt = connection.prepareStatement(query); preparedStmt.setString(1, url); preparedStmt.setString(2, status.toString()); preparedStmt.setObject(3, nextFetch); preparedStmt.setString(4, mdAsString.toString()); preparedStmt.setInt(5, partition); preparedStmt.setString(6, partitionKey); long start = System.currentTimeMillis(); // execute the preparedstatement preparedStmt.execute(); eventCounter.scope("sql_query_number").incrBy(1); averagedMetrics.scope("sql_execute_time").update( System.currentTimeMillis() - start); preparedStmt.close(); }
#vulnerable code @Override public void store(String url, Status status, Metadata metadata, Date nextFetch) throws Exception { // the mysql insert statement String query = tableName + " (url, status, nextfetchdate, metadata, bucket)" + " values (?, ?, ?, ?, ?)"; StringBuffer mdAsString = new StringBuffer(); for (String mdKey : metadata.keySet()) { String[] vals = metadata.getValues(mdKey); for (String v : vals) { mdAsString.append("\t").append(mdKey).append("=").append(v); } } int partition = 0; if (maxNumBuckets > 1) { // determine which queue to send to based on the host / domain / IP String partitionKey = partitioner.getPartition(url, metadata); partition = Math.abs(partitionKey.hashCode() % maxNumBuckets); } // create in table if does not already exist if (status.equals(Status.DISCOVERED)) { query = "INSERT IGNORE INTO " + query; } else query = "REPLACE INTO " + query; PreparedStatement preparedStmt = connection.prepareStatement(query); preparedStmt.setString(1, url); preparedStmt.setString(2, status.toString()); preparedStmt.setObject(3, nextFetch); preparedStmt.setString(4, mdAsString.toString()); preparedStmt.setInt(5, partition); long start = System.currentTimeMillis(); // execute the preparedstatement preparedStmt.execute(); eventCounter.scope("sql_query_number").incrBy(1); averagedMetrics.scope("sql_execute_time").update( System.currentTimeMillis() - start); preparedStmt.close(); } #location 22 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); _collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); }
#vulnerable code @Override public void nextTuple() { // inactive? if (active == false) return; // have anything in the buffer? if (!buffer.isEmpty()) { Values fields = buffer.remove(); String url = fields.get(0).toString(); beingProcessed.add(url); this._collector.emit(fields, url); eventCounter.scope("emitted").incrBy(1); return; } // re-populate the buffer populateBuffer(); } #location 16 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 43 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 52 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 19 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } RegexRule rule = createRule(line); if (rule != null) { rules.add(rule); } } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; }
#vulnerable code private List<RegexRule> readRules(String rulesFile) { List<RegexRule> rules = new ArrayList<RegexRule>(); try { InputStream regexStream = getClass().getClassLoader() .getResourceAsStream(rulesFile); Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8); BufferedReader in = new BufferedReader(reader); String line; while ((line = in.readLine()) != null) { if (line.length() == 0) { continue; } char first = line.charAt(0); boolean sign = false; switch (first) { case '+': sign = true; break; case '-': sign = false; break; case ' ': case '\n': case '#': // skip blank & comment lines continue; default: throw new IOException("Invalid first character: " + line); } String regex = line.substring(1); LOG.trace("Adding rule [{}]", regex); RegexRule rule = createRule(sign, regex); rules.add(rule); } } catch (IOException e) { LOG.error("There was an error reading the default-regex-filters file"); e.printStackTrace(); } return rules; } #location 31 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 60 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void populateBuffer() { if (queryDate == null) { queryDate = new Date(); lastTimeResetToNOW = Instant.now(); } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( queryDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(0); sourceBuilder.size(0); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(BucketOrder.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); sourceBuilder.aggregation(sab); } else { sourceBuilder.aggregation(aggregations); } request.source(sourceBuilder); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); }
#vulnerable code @Override protected void populateBuffer() { if (lastDate == null) { lastDate = new Date(); } String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print( lastDate.getTime()); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate") .lte(formattedLastDate); if (filterQuery != null) { queryBuilder = boolQuery().must(queryBuilder).filter( QueryBuilders.queryStringQuery(filterQuery)); } SearchRequest request = new SearchRequest(indexName).types(docType) .searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(queryBuilder); sourceBuilder.from(0); sourceBuilder.size(0); sourceBuilder.explain(false); sourceBuilder.trackTotalHits(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(BucketOrder.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); sourceBuilder.aggregation(sab); } else { sourceBuilder.aggregation(aggregations); } request.source(sourceBuilder); // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { request.preference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, request.toString()); isInQuery.set(true); client.searchAsync(request, this); } #location 5 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Date schedule(Status status, Metadata metadata) { LOG.debug("Scheduling status: {}, metadata: {}", status, metadata); String signature = metadata.getFirstValue(SIGNATURE_KEY); String oldSignature = metadata.getFirstValue(SIGNATURE_OLD_KEY); if (status != Status.FETCHED) { // reset all metadata metadata.remove(SIGNATURE_MODIFIED_KEY); metadata.remove(FETCH_INTERVAL_KEY); metadata.remove(SIGNATURE_KEY); metadata.remove(SIGNATURE_OLD_KEY); // fall-back to DefaultScheduler return super.schedule(status, metadata); } Calendar now = Calendar.getInstance(Locale.ROOT); String signatureModified = metadata .getFirstValue(SIGNATURE_MODIFIED_KEY); boolean changed = false; final String modifiedTimeString = httpDateFormat.format(now.getTime()); if (metadata.getFirstValue("fetch.statusCode").equals("304")) { // HTTP 304 Not Modified // - no new signature calculated because no content fetched // - do not compare persisted signatures } else if (signature == null || oldSignature == null) { // no decision possible by signature comparison if // - document not parsed (intentionally or not) or // - signature not generated or // - old signature not copied // fall-back to DefaultScheduler LOG.debug("No signature for FETCHED page: {}", metadata); return super.schedule(status, metadata); } else if (signature.equals(oldSignature)) { // unchanged } else { // change detected by signature comparison changed = true; signatureModified = modifiedTimeString; if (setLastModified) { metadata.setValue(HttpHeaders.LAST_MODIFIED, modifiedTimeString); } } String fetchInterval = metadata.getFirstValue(FETCH_INTERVAL_KEY); int interval = defaultfetchInterval; if (fetchInterval != null) { interval = Integer.parseInt(fetchInterval); } else { // initialize from DefaultScheduler Optional<Integer> customInterval = super.checkCustomInterval( metadata, status); if (customInterval.isPresent()) { interval = customInterval.get(); } else { interval = defaultfetchInterval; } fetchInterval = Integer.toString(interval); } if (changed) { // shrink fetch interval (slow down decrementing if already close to // the minimum interval) interval = (int) ((1.0f - fetchIntervalDecRate) * interval + fetchIntervalDecRate * minFetchInterval); LOG.debug( "Signature has changed, fetchInterval decreased from {} to {}", fetchInterval, interval); } else { // no change or not modified, increase fetch interval interval = (int) (interval * (1.0f + fetchIntervalIncRate)); if (interval > maxFetchInterval) { interval = maxFetchInterval; } LOG.debug("Unchanged, fetchInterval increased from {} to {}", fetchInterval, interval); // remove old signature (do not keep same signature twice) metadata.remove(SIGNATURE_OLD_KEY); if (signatureModified == null) { signatureModified = modifiedTimeString; } } metadata.setValue(FETCH_INTERVAL_KEY, Integer.toString(interval)); metadata.setValue(SIGNATURE_MODIFIED_KEY, signatureModified); now.add(Calendar.MINUTE, interval); return now.getTime(); }
#vulnerable code @Override public Date schedule(Status status, Metadata metadata) { LOG.debug("Scheduling status: {}, metadata: {}", status, metadata); String signature = metadata.getFirstValue(SIGNATURE_KEY); String oldSignature = metadata.getFirstValue(SIGNATURE_OLD_KEY); if (status != Status.FETCHED) { // reset all metadata metadata.remove(SIGNATURE_MODIFIED_KEY); metadata.remove(FETCH_INTERVAL_KEY); metadata.remove(SIGNATURE_KEY); metadata.remove(SIGNATURE_OLD_KEY); // fall-back to DefaultScheduler return super.schedule(status, metadata); } Calendar now = Calendar.getInstance(Locale.ROOT); String signatureModified = metadata .getFirstValue(SIGNATURE_MODIFIED_KEY); boolean changed = false; final String modifiedTimeString = httpDateFormat.format(now.getTime()); if (signature == null || oldSignature == null) { // no decision possible by signature comparison if // - document not parsed (intentionally or not) or // - signature not generated or // - old signature not copied if (metadata.getFirstValue("fetch.statusCode").equals("304")) { // HTTP 304 Not Modified } else { // fall-back to DefaultScheduler LOG.debug("No signature for FETCHED page: {}", metadata); return super.schedule(status, metadata); } } else if (signature.equals(oldSignature)) { // unchanged, remove old signature (do not keep same signature // twice) metadata.remove(SIGNATURE_OLD_KEY); if (signatureModified == null) signatureModified = modifiedTimeString; } else { // change detected by signature comparison changed = true; signatureModified = modifiedTimeString; if (setLastModified) metadata.setValue(HttpHeaders.LAST_MODIFIED, modifiedTimeString); } String fetchInterval = metadata.getFirstValue(FETCH_INTERVAL_KEY); int interval = defaultfetchInterval; if (fetchInterval != null) { interval = Integer.parseInt(fetchInterval); } else { // initialize from DefaultScheduler Optional<Integer> customInterval = super.checkCustomInterval( metadata, status); if (customInterval.isPresent()) interval = customInterval.get(); else interval = defaultfetchInterval; fetchInterval = Integer.toString(interval); } if (changed) { // shrink fetch interval (slow down decrementing if already close to // the minimum interval) interval = (int) ((1.0f - fetchIntervalDecRate) * interval + fetchIntervalDecRate * minFetchInterval); LOG.debug( "Signature has changed, fetchInterval decreased from {} to {}", fetchInterval, interval); } else { // no change or not modified, increase fetch interval interval = (int) (interval * (1.0f + fetchIntervalIncRate)); if (interval > maxFetchInterval) interval = maxFetchInterval; LOG.debug("Unchanged, fetchInterval increased from {} to {}", fetchInterval, interval); } metadata.setValue(FETCH_INTERVAL_KEY, Integer.toString(interval)); metadata.setValue(SIGNATURE_MODIFIED_KEY, signatureModified); now.add(Calendar.MINUTE, interval); return now.getTime(); } #location 35 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private static String getCharsetFromBOM(final byte[] byteData) { try (BOMInputStream bomIn = new BOMInputStream( new ByteArrayInputStream(byteData))) { ByteOrderMark bom = bomIn.getBOM(); if (bom != null) { return bom.getCharsetName(); } } catch (IOException e) { return null; } return null; }
#vulnerable code private static String getCharsetFromBOM(final byte[] byteData) { BOMInputStream bomIn = new BOMInputStream(new ByteArrayInputStream( byteData)); try { ByteOrderMark bom = bomIn.getBOM(); if (bom != null) { return bom.getCharsetName(); } } catch (IOException e) { return null; } return null; } #location 5 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void nextTuple() { if (!active) return; // synchronize access to buffer needed in case of asynchronous // queries to the backend synchronized (buffer) { // force the refresh of the buffer even if the buffer is not empty if (!isInQuery.get() && triggerQueries()) { populateBuffer(); } if (!buffer.isEmpty()) { // track how long the buffer had been empty for if (timestampEmptyBuffer != -1) { eventCounter.scope("empty.buffer").incrBy( System.currentTimeMillis() - timestampEmptyBuffer); timestampEmptyBuffer = -1; } List<Object> fields = buffer.remove(); String url = fields.get(0).toString(); this._collector.emit(fields, url); beingProcessed.put(url, null); in_buffer.remove(url); eventCounter.scope("emitted").incrBy(1); return; } else if (timestampEmptyBuffer == -1) { timestampEmptyBuffer = System.currentTimeMillis(); } } if (isInQuery.get() || throttleQueries() > 0) { // sleep for a bit but not too much in order to give ack/fail a // chance Utils.sleep(10); return; } // re-populate the buffer populateBuffer(); timeLastQuerySent = System.currentTimeMillis(); }
#vulnerable code @Override public void nextTuple() { if (!active) return; // synchronize access to buffer needed in case of asynchronous // queries to the backend synchronized (buffer) { if (!buffer.isEmpty()) { // track how long the buffer had been empty for if (timestampEmptyBuffer != -1) { eventCounter.scope("empty.buffer").incrBy( System.currentTimeMillis() - timestampEmptyBuffer); timestampEmptyBuffer = -1; } List<Object> fields = buffer.remove(); String url = fields.get(0).toString(); this._collector.emit(fields, url); beingProcessed.put(url, null); eventCounter.scope("emitted").incrBy(1); return; } else if (timestampEmptyBuffer == -1) { timestampEmptyBuffer = System.currentTimeMillis(); } } if (isInQuery.get() || throttleQueries() > 0) { // sleep for a bit but not too much in order to give ack/fail a // chance Utils.sleep(10); return; } // re-populate the buffer populateBuffer(); timeLastQuery = System.currentTimeMillis(); } #location 27 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } esQueryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); } #location 54 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override protected void populateBuffer() { if (lastDate == null) { lastDate = new Date(); } String formattedLastDate = String.format(DATEFORMAT, lastDate); LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, formattedLastDate); QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery( "nextFetchDate").lte(formattedLastDate); SearchRequestBuilder srb = client.prepareSearch(indexName) .setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(rangeQueryBuilder).setFrom(0).setSize(0) .setExplain(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(Terms.Order.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); srb.addAggregation(sab); } else { srb.addAggregation(aggregations); } // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { srb.setPreference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, srb.toString()); timeStartESQuery = System.currentTimeMillis(); isInESQuery.set(true); srb.execute(this); }
#vulnerable code @Override protected void populateBuffer() { if (lastDate == null) { lastDate = String.format(DATEFORMAT, new Date()); } LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, lastDate); QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery( "nextFetchDate").lte(lastDate); SearchRequestBuilder srb = client.prepareSearch(indexName) .setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(rangeQueryBuilder).setFrom(0).setSize(0) .setExplain(false); TermsAggregationBuilder aggregations = AggregationBuilders .terms("partition").field(partitionField).size(maxBucketNum); TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs") .size(maxURLsPerBucket).explain(false); // sort within a bucket if (StringUtils.isNotBlank(bucketSortField)) { FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField) .order(SortOrder.ASC); tophits.sort(sorter); } aggregations.subAggregation(tophits); // sort between buckets if (StringUtils.isNotBlank(totalSortField)) { MinAggregationBuilder minBuilder = AggregationBuilders.min( "top_hit").field(totalSortField); aggregations.subAggregation(minBuilder); aggregations.order(Terms.Order.aggregation("top_hit", true)); } if (sample) { DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder( "sample"); sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket); sab.shardSize(maxURLsPerBucket * maxBucketNum); sab.subAggregation(aggregations); srb.addAggregation(sab); } else { srb.addAggregation(aggregations); } // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html // _shards:2,3 if (shardID != -1) { srb.setPreference("_shards:" + shardID); } // dump query to log LOG.debug("{} ES query {}", logIdprefix, srb.toString()); timeStartESQuery = System.currentTimeMillis(); isInESQuery.set(true); srb.execute(this); } #location 5 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 18 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 56 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW .toEpochMilli() + (resetFetchDateAfterNSecs * 1000)); if (Instant.now().isAfter(changeNeededOn)) { LOG.info("lastDate reset based on resetFetchDateAfterNSecs {}", resetFetchDateAfterNSecs); queryDate = null; lastStartOffset = 0; } } // no more results? if (numBuckets == 0) { queryDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); }
#vulnerable code @Override public void onResponse(SearchResponse response) { long timeTaken = System.currentTimeMillis() - timeLastQuery; SearchHit[] hits = response.getHits().getHits(); int numBuckets = hits.length; // reset the value for next fetch date if the previous one is too old if (resetFetchDateAfterNSecs != -1) { Calendar diffCal = Calendar.getInstance(); diffCal.setTime(lastDate); diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs); // compare to now if (diffCal.before(Calendar.getInstance())) { LOG.info( "{} lastDate set to null based on resetFetchDateAfterNSecs {}", logIdprefix, resetFetchDateAfterNSecs); lastDate = null; lastStartOffset = 0; } } int alreadyprocessed = 0; int numDocs = 0; synchronized (buffer) { for (SearchHit hit : hits) { Map<String, SearchHits> innerHits = hit.getInnerHits(); // wanted just one per bucket : no inner hits if (innerHits == null) { numDocs++; if (!addHitToBuffer(hit)) { alreadyprocessed++; } continue; } // more than one per bucket SearchHits inMyBucket = innerHits.get("urls_per_bucket"); for (SearchHit subHit : inMyBucket.getHits()) { numDocs++; if (!addHitToBuffer(subHit)) { alreadyprocessed++; } } } // Shuffle the URLs so that we don't get blocks of URLs from the // same host or domain if (numBuckets != numDocs) { Collections.shuffle((List) buffer); } } queryTimes.addMeasurement(timeTaken); // could be derived from the count of query times above eventCounter.scope("ES_queries").incrBy(1); eventCounter.scope("ES_docs").incrBy(numDocs); eventCounter.scope("already_being_processed").incrBy(alreadyprocessed); LOG.info( "{} ES query returned {} hits from {} buckets in {} msec with {} already being processed", logIdprefix, numDocs, numBuckets, timeTaken, alreadyprocessed); // no more results? if (numBuckets == 0) { lastDate = null; lastStartOffset = 0; } // still got some results but paging won't help else if (numBuckets < maxBucketNum) { lastStartOffset = 0; } else { lastStartOffset += numBuckets; } // remove lock isInQuery.set(false); } #location 11 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 50 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); super.open(stormConf, context, collector); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); partitionField = ConfUtils.getString(stormConf, ESStatusRoutingFieldParamName); bucketSortField = ConfUtils.getString(stormConf, ESStatusBucketSortFieldParamName, bucketSortField); totalSortField = ConfUtils.getString(stormConf, ESStatusGlobalSortFieldParamName); maxURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxURLsParamName, 1); maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName, 10); minDelayBetweenQueries = ConfUtils.getLong(stormConf, ESStatusMinDelayParamName, 2000); // one ES client per JVM synchronized (AggregationSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { logIdprefix = "[" + context.getThisComponentId() + " #" + context.getThisTaskIndex() + "] "; // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("{} assigned shard ID {}", logIdprefix, shardID); } _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 66 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(Tuple input) { // triggered by the arrival of a tuple // be it a tick or normal one flushQueues(); if (isTickTuple(input)) { _collector.ack(input); return; } CountMetric metric = metricGauge.scope("activethreads"); metric.getValueAndReset(); metric.incrBy(this.activeThreads.get()); metric = metricGauge.scope("in queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.inQueues.get()); metric = metricGauge.scope("queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.queues.size()); LOG.info("[Fetcher #" + taskIndex + "] Threads : " + this.activeThreads.get() + "\tqueues : " + this.fetchQueues.queues.size() + "\tin_queues : " + this.fetchQueues.inQueues.get()); String url = input.getStringByField("url"); // check whether this tuple has a url field if (url == null) { LOG.info("[Fetcher #" + taskIndex + "] Missing url field for tuple " + input); // ignore silently _collector.ack(input); return; } fetchQueues.addFetchItem(input); }
#vulnerable code @Override public void execute(Tuple input) { // main thread in charge of acking and failing // see // https://github.com/nathanmarz/storm/wiki/Troubleshooting#nullpointerexception-from-deep-inside-storm int acked = 0; int failed = 0; int emitted = 0; // emit with or without anchors // before acking synchronized (emitQueue) { for (Object[] toemit : this.emitQueue) { String streamID = (String) toemit[0]; Tuple anchor = (Tuple) toemit[1]; Values vals = (Values) toemit[2]; if (anchor == null) _collector.emit(streamID, vals); else _collector.emit(streamID, Arrays.asList(anchor), vals); } emitted = emitQueue.size(); emitQueue.clear(); } // have a tick tuple to make sure we don't get starved synchronized (ackQueue) { for (Tuple toack : this.ackQueue) { _collector.ack(toack); } acked = ackQueue.size(); ackQueue.clear(); } synchronized (failQueue) { for (Tuple toack : this.failQueue) { _collector.fail(toack); } failed = failQueue.size(); failQueue.clear(); } if (acked + failed + emitted > 0) LOG.info("[Fetcher #" + taskIndex + "] Acked : " + acked + "\tFailed : " + failed + "\tEmitted : " + emitted); if (isTickTuple(input)) { _collector.ack(input); return; } CountMetric metric = metricGauge.scope("activethreads"); metric.getValueAndReset(); metric.incrBy(this.activeThreads.get()); metric = metricGauge.scope("in queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.inQueues.get()); metric = metricGauge.scope("queues"); metric.getValueAndReset(); metric.incrBy(this.fetchQueues.queues.size()); LOG.info("[Fetcher #" + taskIndex + "] Threads : " + this.activeThreads.get() + "\tqueues : " + this.fetchQueues.queues.size() + "\tin_queues : " + this.fetchQueues.inQueues.get()); String url = input.getStringByField("url"); // check whether this tuple has a url field if (url == null) { LOG.info("[Fetcher #" + taskIndex + "] Missing url field for tuple " + input); // ignore silently _collector.ack(input); return; } fetchQueues.addFetchItem(input); } #location 74 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 5 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); super.open(stormConf, context, collector); partitioner = new URLPartitioner(); partitioner.configure(stormConf); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); }
#vulnerable code @Override public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) { indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName, "status"); docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName, "status"); maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf, ESStatusMaxInflightParamName, 1); maxBufferSize = ConfUtils.getInt(stormConf, ESStatusBufferSizeParamName, 100); randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName, true); maxSecSinceQueriedDate = ConfUtils.getInt(stormConf, ESMaxSecsSinceQueriedDateParamName, -1); sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName, "nextFetchDate"); // one ES client per JVM synchronized (ElasticSearchSpout.class) { try { if (client == null) { client = ElasticSearchConnection.getClient(stormConf, ESBoltType); } } catch (Exception e1) { LOG.error("Can't connect to ElasticSearch", e1); throw new RuntimeException(e1); } } // if more than one instance is used we expect their number to be the // same as the number of shards int totalTasks = context .getComponentTasks(context.getThisComponentId()).size(); if (totalTasks > 1) { // determine the number of shards so that we can restrict the // search ClusterSearchShardsRequest request = new ClusterSearchShardsRequest( indexName); ClusterSearchShardsResponse shardresponse = client.admin() .cluster().searchShards(request).actionGet(); ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups(); if (totalTasks != shardgroups.length) { throw new RuntimeException( "Number of ES spout instances should be the same as number of shards (" + shardgroups.length + ") but is " + totalTasks); } shardID = shardgroups[context.getThisTaskIndex()].getShardId(); LOG.info("Assigned shard ID {}", shardID); } partitioner = new URLPartitioner(); partitioner.configure(stormConf); _collector = collector; this.eventCounter = context.registerMetric("counters", new MultiCountMetric(), 10); context.registerMetric("beingProcessed", new IMetric() { @Override public Object getValueAndReset() { return beingProcessed.size(); } }, 10); context.registerMetric("buffer_size", new IMetric() { @Override public Object getValueAndReset() { return buffer.size(); } }, 10); } #location 41 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_empty() { Result<IntegerProperty> result = parseXCalProperty("", marshaller); IntegerProperty prop = result.getValue(); assertNull(prop.getValue()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_empty() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, ""); Result<IntegerProperty> result = marshaller.parseXml(element, params); IntegerProperty prop = result.getValue(); assertNull(prop.getValue()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml() { Result<Geo> result = parseXCalProperty("<latitude>12.34</latitude><longitude>56.78</longitude>", marshaller); Geo prop = result.getValue(); assertEquals(12.34, prop.getLatitude(), 0.001); assertEquals(56.78, prop.getLongitude(), 0.001); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<latitude>12.34</latitude><longitude>56.78</longitude>"); Result<Geo> result = marshaller.parseXml(element, params); Geo prop = result.getValue(); assertEquals(12.34, prop.getLatitude(), 0.001); assertEquals(56.78, prop.getLongitude(), 0.001); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_uri() { Result<Attachment> result = parseXCalProperty("<uri>http://example.com/image.png</uri>", marshaller); Attachment prop = result.getValue(); assertEquals("http://example.com/image.png", prop.getUri()); assertNull(prop.getData()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_uri() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<uri>http://example.com/image.png</uri>"); Result<Attachment> result = marshaller.parseXml(element, params); Attachment prop = result.getValue(); assertEquals("http://example.com/image.png", prop.getUri()); assertNull(prop.getData()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_uri() { Attachment prop = new Attachment("image/png", "http://example.com/image.png"); assertWriteXml("<uri>http://example.com/image.png</uri>", prop, marshaller); }
#vulnerable code @Test public void writeXml_uri() { Attachment prop = new Attachment("image/png", "http://example.com/image.png"); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<uri>http://example.com/image.png</uri>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_missing_both() { Geo prop = new Geo(null, null); assertWriteXml("", prop, marshaller); }
#vulnerable code @Test public void writeXml_missing_both() { Geo prop = new Geo(null, null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, ""); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void escape_newlines() throws Exception { ICalendar ical = new ICalendar(); VEvent event = new VEvent(); event.setSummary("summary\nof event"); ical.addEvent(event); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:VEVENT\r\n" + "UID:.*?\r\n" + "DTSTAMP:.*?\r\n" + "SUMMARY:summary\\\\nof event\r\n" + "END:VEVENT\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void escape_newlines() throws Exception { ICalendar ical = new ICalendar(); VEvent event = new VEvent(); event.setSummary("summary\nof event"); ical.addEvent(event); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:VEVENT\r\n" + "UID:.*?\r\n" + "DTSTAMP:.*?\r\n" + "SUMMARY:summary\\\\nof event\r\n" + "END:VEVENT\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 30 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void experimental_property() throws Exception { ICalendar ical = new ICalendar(); ical.addExperimentalProperty("X-NUMBER", "1"); ical.addExperimentalProperty("X-NUMBER", "2"); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "X-NUMBER:1\r\n" + "X-NUMBER:2\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void experimental_property() throws Exception { ICalendar ical = new ICalendar(); ical.addExperimentalProperty("X-NUMBER", "1"); ical.addExperimentalProperty("X-NUMBER", "2"); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "X-NUMBER:1\r\n" + "X-NUMBER:2\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 25 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_data() { Result<Attachment> result = parseXCalProperty("<binary>" + Base64.encodeBase64String("data".getBytes()) + "</binary>", marshaller); Attachment prop = result.getValue(); assertNull(prop.getUri()); assertArrayEquals("data".getBytes(), prop.getData()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_data() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<binary>" + Base64.encodeBase64String("data".getBytes()) + "</binary>"); Result<Attachment> result = marshaller.parseXml(element, params); Attachment prop = result.getValue(); assertNull(prop.getUri()); assertArrayEquals("data".getBytes(), prop.getData()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_missing_longitude() { Geo prop = new Geo(12.34, null); assertWriteXml("<latitude>12.34</latitude>", prop, marshaller); }
#vulnerable code @Test public void writeXml_missing_longitude() { Geo prop = new Geo(12.34, null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<latitude>12.34</latitude>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml() { IntegerProperty prop = new IntegerProperty(5); assertWriteXml("<integer>5</integer>", prop, marshaller); }
#vulnerable code @Test public void writeXml() { IntegerProperty prop = new IntegerProperty(5); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<integer>5</integer>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void bad_parameter_value_chars() throws Exception { ICalendar ical = new ICalendar(); ical.getProductId().addParameter("X-TEST", "\"test\""); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID;X-TEST='test':.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void bad_parameter_value_chars() throws Exception { ICalendar ical = new ICalendar(); ical.getProductId().addParameter("X-TEST", "\"test\""); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID;X-TEST='test':.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(1, writer.getWarnings()); } #location 22 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_missing_latitude() { Result<Geo> result = parseXCalProperty("<longitude>56.78</longitude>", marshaller); Geo prop = result.getValue(); assertNull(prop.getLatitude()); assertEquals(56.78, prop.getLongitude(), 0.001); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_missing_latitude() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<longitude>56.78</longitude>"); Result<Geo> result = marshaller.parseXml(element, params); Geo prop = result.getValue(); assertNull(prop.getLatitude()); assertEquals(56.78, prop.getLongitude(), 0.001); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void experimental_property_marshaller() throws Exception { ICalendar ical = new ICalendar(); ical.addProperty(new TestProperty("one")); ical.addProperty(new TestProperty("two")); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new TestPropertyMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "X-TEST:one\r\n" + "X-TEST:two\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void experimental_property_marshaller() throws Exception { ICalendar ical = new ICalendar(); ical.addProperty(new TestProperty("one")); ical.addProperty(new TestProperty("two")); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new TestPropertyMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "X-TEST:one\r\n" + "X-TEST:two\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 26 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml() { RequestStatus prop = new RequestStatus("1.2.3"); prop.setDescription("description"); prop.setExceptionText("data"); assertWriteXml("<code>1.2.3</code><description>description</description><data>data</data>", prop, marshaller); }
#vulnerable code @Test public void writeXml() { RequestStatus prop = new RequestStatus("1.2.3"); prop.setDescription("description"); prop.setExceptionText("data"); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<code>1.2.3</code><description>description</description><data>data</data>"); assertXMLEqual(expected, actual); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml() { Result<DateTimePropertyImpl> result = parseXCalProperty("<date-time>2013-06-11T13:43:02Z</date-time>", marshaller); DateTimePropertyImpl prop = result.getValue(); assertEquals(datetime, prop.getValue()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<date-time>2013-06-11T13:43:02Z</date-time>"); Result<DateTimePropertyImpl> result = marshaller.parseXml(element, params); DateTimePropertyImpl prop = result.getValue(); assertEquals(datetime, prop.getValue()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void jcal_draft_example2() throws Throwable { //see: http://tools.ietf.org/html/draft-ietf-jcardcal-jcal-05#page-27 //Note: all whitespace is removed from the expected JSON string it easier to compare it with the actual result VTimezone usEasternTz; ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setProductId("-//ExampleCorp.//ExampleClient//EN"); ical.setVersion(Version.v2_0()); { usEasternTz = new VTimezone(null); usEasternTz.setLastModified(utcFormatter.parse("2004-01-10T03:28:45")); usEasternTz.setTimezoneId("US/Eastern"); { DaylightSavingsTime daylight = new DaylightSavingsTime(); daylight.setDateStart(localFormatter.parse("2000-04-04T02:00:00")); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(1, DayOfWeek.SUNDAY); rrule.addByMonth(4); daylight.setRecurrenceRule(rrule); daylight.addTimezoneName("EDT"); daylight.setTimezoneOffsetFrom(-5, 0); daylight.setTimezoneOffsetTo(-4, 0); usEasternTz.addDaylightSavingsTime(daylight); } { StandardTime standard = new StandardTime(); standard.setDateStart(localFormatter.parse("2000-10-26T02:00:00")); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(1, DayOfWeek.SUNDAY); rrule.addByMonth(10); standard.setRecurrenceRule(rrule); standard.addTimezoneName("EST"); standard.setTimezoneOffsetFrom(-4, 0); standard.setTimezoneOffsetTo(-5, 0); usEasternTz.addStandardTime(standard); } ical.addTimezone(usEasternTz); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-02T12:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); RecurrenceRule rrule = new RecurrenceRule(Frequency.DAILY); rrule.setCount(5); event.setRecurrenceRule(rrule); RecurrenceDates rdate = new RecurrenceDates(Arrays.asList(new Period(usEasternFormatter.parse("2006-01-02T15:00:00"), new Duration.Builder().hours(2).build()))); rdate.setTimezone(usEasternTz); event.addRecurrenceDates(rdate); event.setSummary("Event#2"); event.setDescription("Wearehavingameetingallthisweekat12pmforonehour,withanadditionalmeetingonthefirstday2hourslong.\nPleasebringyourownlunchforthe12pmmeetings."); event.setUid("[email protected]"); ical.addEvent(event); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-02T14:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); event.setRecurrenceId(usEasternFormatter.parse("2006-01-04T12:00:00")).setTimezone(usEasternTz); event.setSummary("Event#2"); event.setUid("[email protected]"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "jcal-draft-example2.json"); }
#vulnerable code @Test public void jcal_draft_example2() throws Throwable { //see: http://tools.ietf.org/html/draft-ietf-jcardcal-jcal-05#page-27 //Note: all whitespace is removed from the expected JSON string it easier to compare it with the actual result VTimezone usEasternTz; ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setProductId("-//ExampleCorp.//ExampleClient//EN"); ical.setVersion(Version.v2_0()); { usEasternTz = new VTimezone(null); usEasternTz.setLastModified(utcFormatter.parse("2004-01-10T03:28:45")); usEasternTz.setTimezoneId("US/Eastern"); { DaylightSavingsTime daylight = new DaylightSavingsTime(); daylight.setDateStart(localFormatter.parse("2000-04-04T02:00:00")).setLocalTime(true); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(1, DayOfWeek.SUNDAY); rrule.addByMonth(4); daylight.setRecurrenceRule(rrule); daylight.addTimezoneName("EDT"); daylight.setTimezoneOffsetFrom(-5, 0); daylight.setTimezoneOffsetTo(-4, 0); usEasternTz.addDaylightSavingsTime(daylight); } { StandardTime standard = new StandardTime(); standard.setDateStart(localFormatter.parse("2000-10-26T02:00:00")).setLocalTime(true); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(1, DayOfWeek.SUNDAY); rrule.addByMonth(10); standard.setRecurrenceRule(rrule); standard.addTimezoneName("EST"); standard.setTimezoneOffsetFrom(-4, 0); standard.setTimezoneOffsetTo(-5, 0); usEasternTz.addStandardTime(standard); } ical.addTimezone(usEasternTz); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-02T12:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); RecurrenceRule rrule = new RecurrenceRule(Frequency.DAILY); rrule.setCount(5); event.setRecurrenceRule(rrule); RecurrenceDates rdate = new RecurrenceDates(Arrays.asList(new Period(usEasternFormatter.parse("2006-01-02T15:00:00"), new Duration.Builder().hours(2).build()))); rdate.setTimezone(usEasternTz); event.addRecurrenceDates(rdate); event.setSummary("Event#2"); event.setDescription("Wearehavingameetingallthisweekat12pmforonehour,withanadditionalmeetingonthefirstday2hourslong.\nPleasebringyourownlunchforthe12pmmeetings."); event.setUid("[email protected]"); ical.addEvent(event); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-02T14:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); event.setRecurrenceId(usEasternFormatter.parse("2006-01-04T12:00:00")).setTimezone(usEasternTz); event.setSummary("Event#2"); event.setUid("[email protected]"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "jcal-draft-example2.json"); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test(expected = CannotParseException.class) public void parseXml_invalid() { parseXCalProperty("<integer>invalid</integer>", marshaller); }
#vulnerable code @Test(expected = CannotParseException.class) public void parseXml_invalid() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<integer>invalid</integer>"); marshaller.parseXml(element, params); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_missing_longitude() { Result<Geo> result = parseXCalProperty("<latitude>12.34</latitude>", marshaller); Geo prop = result.getValue(); assertEquals(12.34, prop.getLatitude(), 0.001); assertNull(prop.getLongitude()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_missing_longitude() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<latitude>12.34</latitude>"); Result<Geo> result = marshaller.parseXml(element, params); Geo prop = result.getValue(); assertEquals(12.34, prop.getLatitude(), 0.001); assertNull(prop.getLongitude()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_data_type() { TextPropertyMarshallerImpl marshaller = new TextPropertyMarshallerImpl(Value.CAL_ADDRESS); Result<TextPropertyImpl> result = parseXCalProperty("<cal-address>mailto:[email protected]</cal-address>", marshaller); TextPropertyImpl prop = result.getValue(); assertEquals("mailto:[email protected]", prop.getValue()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_data_type() { TextPropertyMarshallerImpl marshaller = new TextPropertyMarshallerImpl(Value.CAL_ADDRESS); ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<cal-address>mailto:[email protected]</cal-address>"); Result<TextPropertyImpl> result = marshaller.parseXml(element, params); TextPropertyImpl prop = result.getValue(); assertEquals("mailto:[email protected]", prop.getValue()); assertWarnings(0, result.getWarnings()); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void experimental_component() throws Exception { ICalendar ical = new ICalendar(); ical.addExperimentalComponent("X-VPARTY"); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:X-VPARTY\r\n" + "END:X-VPARTY\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void experimental_component() throws Exception { ICalendar ical = new ICalendar(); ical.addExperimentalComponent("X-VPARTY"); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:X-VPARTY\r\n" + "END:X-VPARTY\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 24 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test(expected = CannotParseException.class) public void parseXml_bad_longitude() { parseXCalProperty("<latitude>12.34</latitude><longitude>bad</longitude>", marshaller); }
#vulnerable code @Test(expected = CannotParseException.class) public void parseXml_bad_longitude() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<latitude>12.34</latitude><longitude>bad</longitude>"); marshaller.parseXml(element, params); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void jcal_draft_example1() throws Throwable { //see: http://tools.ietf.org/html/draft-ietf-jcardcal-jcal-05#page-25 //Note: all whitespace is removed from the expected JSON string it easier to compare it with the actual result ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setCalendarScale(CalendarScale.gregorian()); ical.setProductId("-//ExampleInc.//ExampleCalendar//EN"); ical.setVersion(Version.v2_0()); { VEvent event = new VEvent(); event.getProperties().clear(); event.setDateTimeStamp(utcFormatter.parse("2008-02-05T19:12:24")); event.setDateStart(new DateStart(dateFormatter.parse("2008-10-06"), false)); event.setSummary("Planningmeeting"); event.setUid("4088E990AD89CB3DBB484909"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "jcal-draft-example1.json"); }
#vulnerable code @Test public void jcal_draft_example1() throws Throwable { //see: http://tools.ietf.org/html/draft-ietf-jcardcal-jcal-05#page-25 //Note: all whitespace is removed from the expected JSON string it easier to compare it with the actual result ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setCalendarScale(CalendarScale.gregorian()); ical.setProductId("-//ExampleInc.//ExampleCalendar//EN"); ical.setVersion(Version.v2_0()); { VEvent event = new VEvent(); event.getProperties().clear(); event.setDateTimeStamp(utcFormatter.parse("2008-02-05T19:12:24")); event.setDateStart(new DateStart(dateFormatter.parse("2008-10-06"), false)); event.setSummary("Planningmeeting"); event.setUid("4088E990AD89CB3DBB484909"); ical.addEvent(event); } assertWarnings(0, ical.validate()); StringWriter sw = new StringWriter(); JCalWriter writer = new JCalWriter(sw); writer.write(ical); writer.close(); String expected = new String(IOUtils.toByteArray(getClass().getResourceAsStream("jcal-draft-example1.json"))); expected = expected.replaceAll("\\s", ""); String actual = sw.toString(); assertEquals(expected, actual); } #location 18 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_null() { TextPropertyImpl prop = new TextPropertyImpl(null); assertWriteXml("<text></text>", prop, marshaller); }
#vulnerable code @Test public void writeXml_null() { TextPropertyImpl prop = new TextPropertyImpl(null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<text></text>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void override_marshaller() throws Exception { ICalendar ical = new ICalendar(); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new MyVersionMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0 \\(beta\\)\r\n" + "PRODID:.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void override_marshaller() throws Exception { ICalendar ical = new ICalendar(); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new MyVersionMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0 \\(beta\\)\r\n" + "PRODID:.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 22 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_null() { DateTimePropertyImpl prop = new DateTimePropertyImpl(null); assertWriteXml("", prop, marshaller); }
#vulnerable code @Test public void writeXml_null() { DateTimePropertyImpl prop = new DateTimePropertyImpl(null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, ""); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml() { Result<IntegerProperty> result = parseXCalProperty("<integer>5</integer>", marshaller); IntegerProperty prop = result.getValue(); assertIntEquals(5, prop.getValue()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<integer>5</integer>"); Result<IntegerProperty> result = marshaller.parseXml(element, params); IntegerProperty prop = result.getValue(); assertIntEquals(5, prop.getValue()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml_missing_both() { Result<Geo> result = parseXCalProperty("", marshaller); Geo prop = result.getValue(); assertNull(prop.getLatitude()); assertNull(prop.getLongitude()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml_missing_both() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, ""); Result<Geo> result = marshaller.parseXml(element, params); Geo prop = result.getValue(); assertNull(prop.getLatitude()); assertNull(prop.getLongitude()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml() { Geo prop = new Geo(12.34, 56.78); assertWriteXml("<latitude>12.34</latitude><longitude>56.78</longitude>", prop, marshaller); }
#vulnerable code @Test public void writeXml() { Geo prop = new Geo(12.34, 56.78); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<latitude>12.34</latitude><longitude>56.78</longitude>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void example2() throws Throwable { VTimezone usEasternTz; ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setProductId("-//RDU Software//NONSGML HandCal//EN"); ical.setVersion(Version.v2_0()); { usEasternTz = new VTimezone(null); usEasternTz.setTimezoneId("America/New_York"); { StandardTime standard = new StandardTime(); standard.setDateStart(localFormatter.parse("19981025T020000")); standard.setTimezoneOffsetFrom(-4, 0); standard.setTimezoneOffsetTo(-5, 0); standard.addTimezoneName("EST"); usEasternTz.addStandardTime(standard); } { DaylightSavingsTime daylight = new DaylightSavingsTime(); daylight.setDateStart(localFormatter.parse("19990404T020000")); daylight.setTimezoneOffsetFrom(-5, 0); daylight.setTimezoneOffsetTo(-4, 0); daylight.addTimezoneName("EDT"); usEasternTz.addDaylightSavingsTime(daylight); } ical.addTimezone(usEasternTz); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("19980309T231000")); event.setUid("guid-1.example.com"); event.setOrganizer("[email protected]"); Attendee attendee = Attendee.email("[email protected]"); attendee.setRsvp(true); attendee.setRole(Role.REQ_PARTICIPANT); attendee.setCalendarUserType(CalendarUserType.GROUP); event.addAttendee(attendee); event.setDescription("Project XYZ Review Meeting"); event.addCategories("MEETING"); event.setClassification(Classification.public_()); event.setCreated(utcFormatter.parse("19980309T130000")); event.setSummary("XYZ Project Review"); event.setDateStart(usEasternFormatter.parse("19980312T083000")).setTimezone(usEasternTz); event.setDateEnd(usEasternFormatter.parse("19980312T093000")).setTimezone(usEasternTz); event.setLocation("1CP Conference Room 4350"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "rfc5545-example2.ics"); }
#vulnerable code @Test public void example2() throws Throwable { VTimezone usEasternTz; ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setProductId("-//RDU Software//NONSGML HandCal//EN"); ical.setVersion(Version.v2_0()); { usEasternTz = new VTimezone(null); usEasternTz.setTimezoneId("America/New_York"); { StandardTime standard = new StandardTime(); standard.setDateStart(localFormatter.parse("19981025T020000")).setLocalTime(true); standard.setTimezoneOffsetFrom(-4, 0); standard.setTimezoneOffsetTo(-5, 0); standard.addTimezoneName("EST"); usEasternTz.addStandardTime(standard); } { DaylightSavingsTime daylight = new DaylightSavingsTime(); daylight.setDateStart(localFormatter.parse("19990404T020000")).setLocalTime(true); daylight.setTimezoneOffsetFrom(-5, 0); daylight.setTimezoneOffsetTo(-4, 0); daylight.addTimezoneName("EDT"); usEasternTz.addDaylightSavingsTime(daylight); } ical.addTimezone(usEasternTz); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("19980309T231000")); event.setUid("guid-1.example.com"); event.setOrganizer("[email protected]"); Attendee attendee = Attendee.email("[email protected]"); attendee.setRsvp(true); attendee.setRole(Role.REQ_PARTICIPANT); attendee.setCalendarUserType(CalendarUserType.GROUP); event.addAttendee(attendee); event.setDescription("Project XYZ Review Meeting"); event.addCategories("MEETING"); event.setClassification(Classification.public_()); event.setCreated(utcFormatter.parse("19980309T130000")); event.setSummary("XYZ Project Review"); event.setDateStart(usEasternFormatter.parse("19980312T083000")).setTimezone(usEasternTz); event.setDateEnd(usEasternFormatter.parse("19980312T093000")).setTimezone(usEasternTz); event.setLocation("1CP Conference Room 4350"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "rfc5545-example2.ics"); } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void write_example2() throws Throwable { //see: RFC 6321 p.51 VTimezone usEasternTz; ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setProductId("-//Example Inc.//Example Client//EN"); ical.setVersion(Version.v2_0()); { usEasternTz = new VTimezone(null); usEasternTz.setLastModified(utcFormatter.parse("2004-01-10T03:28:45")); usEasternTz.setTimezoneId("US/Eastern"); { DaylightSavingsTime daylight = new DaylightSavingsTime(); daylight.setDateStart(localFormatter.parse("2000-04-04T02:00:00")); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(1, DayOfWeek.SUNDAY); rrule.addByMonth(4); daylight.setRecurrenceRule(rrule); daylight.addTimezoneName("EDT"); daylight.setTimezoneOffsetFrom(-5, 0); daylight.setTimezoneOffsetTo(-4, 0); usEasternTz.addDaylightSavingsTime(daylight); } { StandardTime standard = new StandardTime(); standard.setDateStart(localFormatter.parse("2000-10-26T02:00:00")); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(-1, DayOfWeek.SUNDAY); rrule.addByMonth(10); standard.setRecurrenceRule(rrule); standard.addTimezoneName("EST"); standard.setTimezoneOffsetFrom(-4, 0); standard.setTimezoneOffsetTo(-5, 0); usEasternTz.addStandardTime(standard); } ical.addTimezone(usEasternTz); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-02T12:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); RecurrenceRule rrule = new RecurrenceRule(Frequency.DAILY); rrule.setCount(5); event.setRecurrenceRule(rrule); RecurrenceDates rdate = new RecurrenceDates(Arrays.asList(new Period(usEasternFormatter.parse("2006-01-02T15:00:00"), new Duration.Builder().hours(2).build()))); rdate.setTimezone(usEasternTz); event.addRecurrenceDates(rdate); event.setSummary("Event #2"); event.setDescription("We are having a meeting all this week at 12pm for one hour, with an additional meeting on the first day 2 hours long.\nPlease bring your own lunch for the 12 pm meetings."); event.setUid("[email protected]"); ical.addEvent(event); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-04T14:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); event.setRecurrenceId(usEasternFormatter.parse("2006-01-04T12:00:00")).setTimezone(usEasternTz); event.setSummary("Event #2 bis"); event.setUid("[email protected]"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "rfc6321-example2.xml"); }
#vulnerable code @Test public void write_example2() throws Throwable { //see: RFC 6321 p.51 VTimezone usEasternTz; ICalendar ical = new ICalendar(); ical.getProperties().clear(); ical.setProductId("-//Example Inc.//Example Client//EN"); ical.setVersion(Version.v2_0()); { usEasternTz = new VTimezone(null); usEasternTz.setLastModified(utcFormatter.parse("2004-01-10T03:28:45")); usEasternTz.setTimezoneId("US/Eastern"); { DaylightSavingsTime daylight = new DaylightSavingsTime(); daylight.setDateStart(localFormatter.parse("2000-04-04T02:00:00")).setLocalTime(true); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(1, DayOfWeek.SUNDAY); rrule.addByMonth(4); daylight.setRecurrenceRule(rrule); daylight.addTimezoneName("EDT"); daylight.setTimezoneOffsetFrom(-5, 0); daylight.setTimezoneOffsetTo(-4, 0); usEasternTz.addDaylightSavingsTime(daylight); } { StandardTime standard = new StandardTime(); standard.setDateStart(localFormatter.parse("2000-10-26T02:00:00")).setLocalTime(true); RecurrenceRule rrule = new RecurrenceRule(Frequency.YEARLY); rrule.addByDay(-1, DayOfWeek.SUNDAY); rrule.addByMonth(10); standard.setRecurrenceRule(rrule); standard.addTimezoneName("EST"); standard.setTimezoneOffsetFrom(-4, 0); standard.setTimezoneOffsetTo(-5, 0); usEasternTz.addStandardTime(standard); } ical.addTimezone(usEasternTz); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-02T12:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); RecurrenceRule rrule = new RecurrenceRule(Frequency.DAILY); rrule.setCount(5); event.setRecurrenceRule(rrule); RecurrenceDates rdate = new RecurrenceDates(Arrays.asList(new Period(usEasternFormatter.parse("2006-01-02T15:00:00"), new Duration.Builder().hours(2).build()))); rdate.setTimezone(usEasternTz); event.addRecurrenceDates(rdate); event.setSummary("Event #2"); event.setDescription("We are having a meeting all this week at 12pm for one hour, with an additional meeting on the first day 2 hours long.\nPlease bring your own lunch for the 12 pm meetings."); event.setUid("[email protected]"); ical.addEvent(event); } { VEvent event = new VEvent(); event.setDateTimeStamp(utcFormatter.parse("2006-02-06T00:11:21")); event.setDateStart(usEasternFormatter.parse("2006-01-04T14:00:00")).setTimezone(usEasternTz); event.setDuration(new Duration.Builder().hours(1).build()); event.setRecurrenceId(usEasternFormatter.parse("2006-01-04T12:00:00")).setTimezone(usEasternTz); event.setSummary("Event #2 bis"); event.setUid("[email protected]"); ical.addEvent(event); } assertWarnings(0, ical.validate()); assertExample(ical, "rfc6321-example2.xml"); } #location 15 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void caret_encoding() throws Exception { ICalendar ical = new ICalendar(); ical.getProductId().addParameter("X-TEST", "\"test\""); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.setCaretEncodingEnabled(true); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID;X-TEST=\\^'test\\^':.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void caret_encoding() throws Exception { ICalendar ical = new ICalendar(); ical.getProductId().addParameter("X-TEST", "\"test\""); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.setCaretEncodingEnabled(true); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID;X-TEST=\\^'test\\^':.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 23 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void experimental_component_marshaller() throws Exception { ICalendar ical = new ICalendar(); ical.addComponent(new Party()); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new PartyMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:X-VPARTY\r\n" + "END:X-VPARTY\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void experimental_component_marshaller() throws Exception { ICalendar ical = new ICalendar(); ical.addComponent(new Party()); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new PartyMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "BEGIN:X-VPARTY\r\n" + "END:X-VPARTY\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(0, writer.getWarnings()); } #location 25 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_null() { IntegerProperty prop = new IntegerProperty(null); assertWriteXml("", prop, marshaller); }
#vulnerable code @Test public void writeXml_null() { IntegerProperty prop = new IntegerProperty(null); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, ""); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void parseXml() { Result<RequestStatus> result = parseXCalProperty("<code>1.2.3</code><description>description</description><data>data</data>", marshaller); RequestStatus prop = result.getValue(); assertEquals("1.2.3", prop.getStatusCode()); assertEquals("description", prop.getDescription()); assertEquals("data", prop.getExceptionText()); assertWarnings(0, result.getWarnings()); }
#vulnerable code @Test public void parseXml() { ICalParameters params = new ICalParameters(); Element element = xcalPropertyElement(marshaller, "<code>1.2.3</code><description>description</description><data>data</data>"); Result<RequestStatus> result = marshaller.parseXml(element, params); RequestStatus prop = result.getValue(); assertEquals("1.2.3", prop.getStatusCode()); assertEquals("description", prop.getDescription()); assertEquals("data", prop.getExceptionText()); assertWarnings(0, result.getWarnings()); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void writeXml_data() { Attachment prop = new Attachment("image/png", "data".getBytes()); assertWriteXml("<binary>" + Base64.encodeBase64String("data".getBytes()) + "</binary>", prop, marshaller); }
#vulnerable code @Test public void writeXml_data() { Attachment prop = new Attachment("image/png", "data".getBytes()); Document actual = xcalProperty(marshaller); marshaller.writeXml(prop, XmlUtils.getRootElement(actual)); Document expected = xcalProperty(marshaller, "<binary>" + Base64.encodeBase64String("data".getBytes()) + "</binary>"); assertXMLEqual(expected, actual); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void skipMeException() throws Exception { ICalendar ical = new ICalendar(); ical.addProperty(new TestProperty("value")); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new SkipMeMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); }
#vulnerable code @Test public void skipMeException() throws Exception { ICalendar ical = new ICalendar(); ical.addProperty(new TestProperty("value")); StringWriter sw = new StringWriter(); ICalWriter writer = new ICalWriter(sw); writer.registerMarshaller(new SkipMeMarshaller()); writer.write(ical); writer.close(); //@formatter:off String expected = "BEGIN:VCALENDAR\r\n" + "VERSION:2\\.0\r\n" + "PRODID:.*?\r\n" + "END:VCALENDAR\r\n"; //@formatter:on String actual = sw.toString(); assertRegex(expected, actual); assertWarnings(1, writer.getWarnings()); } #location 23 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void createInvertedIndex() { if (currentIndex == null) { currentIndex = Index.createIndex(path,prefix); if (currentIndex == null) { logger.error("No index at ("+path+","+prefix+") to build an inverted index for "); return; } } long beginTimestamp = System.currentTimeMillis(); if (currentIndex.getCollectionStatistics().getNumberOfUniqueTerms() == 0) { logger.error("Index has no terms. Inverted index creation aborted."); return; } if (currentIndex.getCollectionStatistics().getNumberOfDocuments() == 0) { logger.error("Index has no documents. Inverted index creation aborted."); return; } logger.info("Started building the block inverted index..."); invertedIndexBuilder = new BlockInvertedIndexBuilder(currentIndex, "inverted", compressionInvertedConfig); invertedIndexBuilder.createInvertedIndex(); this.finishedInvertedIndexBuild(); try{ currentIndex.flush(); } catch (IOException ioe) { logger.error("Cannot flush index: ", ioe); } long endTimestamp = System.currentTimeMillis(); logger.info("Finished building the block inverted index..."); long seconds = (endTimestamp - beginTimestamp) / 1000; logger.info("Time elapsed for inverted file: " + seconds); }
#vulnerable code public void createInvertedIndex() { if (currentIndex == null) { currentIndex = Index.createIndex(path,prefix); if (currentIndex == null) { logger.error("No index at ("+path+","+prefix+") to build an inverted index for "); } } long beginTimestamp = System.currentTimeMillis(); if (currentIndex.getCollectionStatistics().getNumberOfUniqueTerms() == 0) { logger.error("Index has no terms. Inverted index creation aborted."); return; } if (currentIndex.getCollectionStatistics().getNumberOfDocuments() == 0) { logger.error("Index has no documents. Inverted index creation aborted."); return; } logger.info("Started building the block inverted index..."); invertedIndexBuilder = new BlockInvertedIndexBuilder(currentIndex, "inverted", compressionInvertedConfig); invertedIndexBuilder.createInvertedIndex(); this.finishedInvertedIndexBuild(); try{ currentIndex.flush(); } catch (IOException ioe) { logger.error("Cannot flush index: ", ioe); } long endTimestamp = System.currentTimeMillis(); logger.info("Finished building the block inverted index..."); long seconds = (endTimestamp - beginTimestamp) / 1000; logger.info("Time elapsed for inverted file: " + seconds); } #location 12 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected void doEvaluation(int expectedQueryCount, String qrels, float expectedMAP) throws Exception { // Writer w = Files.writeFileWriter(ApplicationSetup.TREC_QRELS); // System.err.println("Writing qrel files files to " + ApplicationSetup.TREC_QRELS); // w.write(qrels + "\n"); // w.close(); TrecTerrier.main(new String[]{"-e", "-Dtrec.qrels=" + qrels}); float MAP = -1.0f; int queryCount = 0; File[] fs = new File(ApplicationSetup.TREC_RESULTS).listFiles(); assertNotNull(fs); for (File f : fs) { if (f.getName().endsWith(".eval")) { BufferedReader br = Files.openFileReader(f); String line = null; while((line = br.readLine()) != null ) { //System.out.println(line); if (line.startsWith("Average Precision:")) { MAP = Float.parseFloat(line.split(":")[1].trim()); } else if (line.startsWith("Number of queries =")) { queryCount = Integer.parseInt(line.split("\\s+=\\s+")[1].trim()); } } br.close(); break; } } assertEquals("Query count was "+ queryCount + " instead of "+ expectedQueryCount, expectedQueryCount, queryCount); assertEquals("MAP was "+MAP + " instead of "+expectedMAP, expectedMAP, MAP, 0.0d); //System.err.println("Tidying results folder:"); //System.err.println("ls "+ ApplicationSetup.TREC_RESULTS); //System.err.println(Arrays.deepToString(new File(ApplicationSetup.TREC_RESULTS).listFiles())); //delete all runs and evaluations fs = new File(ApplicationSetup.TREC_RESULTS).listFiles(); assertNotNull(fs); for (File f :fs) { //System.err.println("Checking file for possible deletion: "+f); if (f.getName().endsWith(".res") || f.getName().endsWith(".eval")) { System.err.println("Removing finished file "+f); if (! f.delete()) System.err.println("Could not remove finished file "+f); } } }
#vulnerable code protected void doEvaluation(int expectedQueryCount, String qrels, float expectedMAP) throws Exception { // Writer w = Files.writeFileWriter(ApplicationSetup.TREC_QRELS); // System.err.println("Writing qrel files files to " + ApplicationSetup.TREC_QRELS); // w.write(qrels + "\n"); // w.close(); TrecTerrier.main(new String[]{"-e", "-Dtrec.qrels=" + qrels}); float MAP = -1.0f; int queryCount = 0; for (File f : new File(ApplicationSetup.TREC_RESULTS).listFiles()) { if (f.getName().endsWith(".eval")) { BufferedReader br = Files.openFileReader(f); String line = null; while((line = br.readLine()) != null ) { //System.out.println(line); if (line.startsWith("Average Precision:")) { MAP = Float.parseFloat(line.split(":")[1].trim()); } else if (line.startsWith("Number of queries =")) { queryCount = Integer.parseInt(line.split("\\s+=\\s+")[1].trim()); } } br.close(); break; } } assertEquals("Query count was "+ queryCount + " instead of "+ expectedQueryCount, expectedQueryCount, queryCount); assertEquals("MAP was "+MAP + " instead of "+expectedMAP, expectedMAP, MAP, 0.0d); //System.err.println("Tidying results folder:"); //System.err.println("ls "+ ApplicationSetup.TREC_RESULTS); //System.err.println(Arrays.deepToString(new File(ApplicationSetup.TREC_RESULTS).listFiles())); //delete all runs and evaluations for (File f : new File(ApplicationSetup.TREC_RESULTS).listFiles()) { //System.err.println("Checking file for possible deletion: "+f); if (f.getName().endsWith(".res") || f.getName().endsWith(".eval")) { System.err.println("Removing finished file "+f); if (! f.delete()) System.err.println("Could not remove finished file "+f); } } } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void createInvertedIndex() { if (currentIndex == null) { currentIndex = Index.createIndex(path,prefix); if (currentIndex == null) { logger.error("No index at ("+path+","+prefix+") to build an inverted index for "); return; } } final long beginTimestamp = System.currentTimeMillis(); logger.info("Started building the inverted index..."); if (currentIndex.getCollectionStatistics().getNumberOfUniqueTerms() == 0) { logger.error("Index has no terms. Inverted index creation aborted."); return; } if (currentIndex.getCollectionStatistics().getNumberOfDocuments() == 0) { logger.error("Index has no documents. Inverted index creation aborted."); return; } //generate the inverted index logger.info("Started building the inverted index..."); invertedIndexBuilder = new InvertedIndexBuilder(currentIndex, "inverted", compressionInvertedConfig); invertedIndexBuilder.createInvertedIndex(); finishedInvertedIndexBuild(); long endTimestamp = System.currentTimeMillis(); logger.info("Finished building the inverted index..."); long seconds = (endTimestamp - beginTimestamp) / 1000; //long minutes = seconds / 60; logger.info("Time elapsed for inverted file: " + seconds); try{ currentIndex.flush(); } catch (IOException ioe) { logger.warn("Problem flushin index", ioe); } }
#vulnerable code public void createInvertedIndex() { if (currentIndex == null) { currentIndex = Index.createIndex(path,prefix); if (currentIndex == null) { logger.error("No index at ("+path+","+prefix+") to build an inverted index for "); } } final long beginTimestamp = System.currentTimeMillis(); logger.info("Started building the inverted index..."); if (currentIndex.getCollectionStatistics().getNumberOfUniqueTerms() == 0) { logger.error("Index has no terms. Inverted index creation aborted."); return; } if (currentIndex.getCollectionStatistics().getNumberOfDocuments() == 0) { logger.error("Index has no documents. Inverted index creation aborted."); return; } //generate the inverted index logger.info("Started building the inverted index..."); invertedIndexBuilder = new InvertedIndexBuilder(currentIndex, "inverted", compressionInvertedConfig); invertedIndexBuilder.createInvertedIndex(); finishedInvertedIndexBuild(); long endTimestamp = System.currentTimeMillis(); logger.info("Finished building the inverted index..."); long seconds = (endTimestamp - beginTimestamp) / 1000; //long minutes = seconds / 60; logger.info("Time elapsed for inverted file: " + seconds); try{ currentIndex.flush(); } catch (IOException ioe) { logger.warn("Problem flushin index", ioe); } } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void write(String filename) throws IOException { FileSink output = FileSinkFactory.sinkFor(filename); if(output != null) { write(output, filename); } else { throw new IOException("No sink writer for "+filename); } }
#vulnerable code public void write(String filename) throws IOException { FileSink output = FileSinkFactory.sinkFor(filename); write(output, filename); } #location 3 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void checkElementStyleGroup(Element element) { StyleGroup oldGroup = getGroup(getElementGroup(element)); // Get the old element "dynamic" status. boolean isDyn = false; // Get the old event set for the given element. StyleGroup.ElementEvents events = null; if (oldGroup != null) { isDyn = oldGroup.isElementDynamic(element); events = oldGroup.getEventsFor(element); } // Remove the element from its old style and add it to insert it in the // correct style. removeElement(element); addElement_(element); // Eventually push the events on the new style group. StyleGroup newGroup = getGroup(getElementGroup(element)); if (newGroup != null && events != null) { for (String event : events.events) pushEventFor(element, event); } for (StyleGroupListener listener : listeners) listener.elementStyleChanged(element, oldGroup, newGroup); // Eventually set the element as dynamic, if it was. if (newGroup != null && isDyn) newGroup.pushElementAsDynamic(element); }
#vulnerable code public void checkElementStyleGroup(Element element) { StyleGroup oldGroup = getGroup(getElementGroup(element)); // Get the old element "dynamic" status. boolean isDyn = oldGroup.isElementDynamic(element); // Get the old event set for the given element. StyleGroup.ElementEvents events = null; if (oldGroup != null) events = oldGroup.getEventsFor(element); // Remove the element from its old style and add it to insert it in the // correct style. removeElement(element); addElement_(element); // Eventually push the events on the new style group. StyleGroup newGroup = getGroup(getElementGroup(element)); if (newGroup != null && events != null) { for (String event : events.events) pushEventFor(element, event); } for (StyleGroupListener listener : listeners) listener.elementStyleChanged(element, oldGroup, newGroup); // Eventually set the element as dynamic, if it was. if (newGroup != null && isDyn) newGroup.pushElementAsDynamic(element); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected void checkZIndexAndShadow(Rule oldRule, Rule newRule) { if (oldRule != null) { if (oldRule.selector.getId() != null || oldRule.selector.getClazz() != null) { // We may accelerate things a bit when a class or id style is // modified, // since only the groups listed in the style are concerned (we // are at the // bottom of the inheritance tree). if (oldRule.getGroups() != null) for (String s : oldRule.getGroups()) { StyleGroup group = groups.get(s); if(group != null) { zIndex.groupChanged(group); shadow.groupChanged(group); } } } else { // For kind styles "NODE", "EDGE", "GRAPH", "SPRITE", we must // reset // the whole Z and shadows for the kind, since several styles // may // have changed. Selector.Type type = oldRule.selector.type; for (StyleGroup group : groups.values()) { if (group.getType() == type) { zIndex.groupChanged(group); shadow.groupChanged(group); } } } } }
#vulnerable code protected void checkZIndexAndShadow(Rule oldRule, Rule newRule) { if (oldRule != null) { if (oldRule.selector.getId() != null || oldRule.selector.getClazz() != null) { // We may accelerate things a bit when a class or id style is // modified, // since only the groups listed in the style are concerned (we // are at the // bottom of the inheritance tree). if (oldRule.getGroups() != null) for (String s : oldRule.getGroups()) { StyleGroup group = groups.get(s); zIndex.groupChanged(group); shadow.groupChanged(group); } } else { // For kind styles "NODE", "EDGE", "GRAPH", "SPRITE", we must // reset // the whole Z and shadows for the kind, since several styles // may // have changed. Selector.Type type = oldRule.selector.type; for (StyleGroup group : groups.values()) { if (group.getType() == type) { zIndex.groupChanged(group); shadow.groupChanged(group); } } } } } #location 14 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void print(EventType type, Args args) { if (!enable.get(type)) return; String out = formats.get(type); for (String k : args.keySet()) { Object o = args.get(k); out = out.replace(String.format("%%%s%%", k), o == null ? "null" : o.toString()); } this.out.print(out); this.out.printf("\n"); if (autoflush) this.out.flush(); argsPnP(args); }
#vulnerable code private void print(EventType type, Args args) { if (!enable.get(type)) return; String out = formats.get(type); for (String k : args.keySet()) { Object o = args.get(k); out = out.replace(String.format("%%%s%%", k), o == null ? "null" : o.toString()); } this.out.printf(out); this.out.printf("\n"); if (autoflush) this.out.flush(); argsPnP(args); } #location 13 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String... args) throws IOException { HashMap<Option, String> options = new HashMap<Option, String>(); LinkedList<String> others = new LinkedList<String>(); for (Option option : Option.values()) if (option.defaultValue != null) options.put(option, option.defaultValue); if (args != null && args.length > 0) { Pattern valueGetter = Pattern.compile("^--\\w[\\w-]*\\w?(?:=(?:\"([^\"]*)\"|([^\\s]*)))$"); for (int i = 0; i < args.length; i++) { if (args[i].matches("^--\\w[\\w-]*\\w?(=(\"[^\"]*\"|[^\\s]*))?$")) { boolean found = false; for (Option option : Option.values()) { if (args[i].startsWith("--" + option.fullopts + "=")) { Matcher m = valueGetter.matcher(args[i]); if (m.matches()) { options.put(option, m.group(1) == null ? m.group(2) : m.group(1)); } found = true; break; } } if (!found) { LOGGER.severe( String.format("unknown option: %s%n", args[i].substring(0, args[i].indexOf('=')))); System.exit(1); } } else if (args[i].matches("^-\\w$")) { boolean found = false; for (Option option : Option.values()) { if (args[i].equals("-" + option.shortopts)) { options.put(option, args[++i]); break; } } if (!found) { LOGGER.severe(String.format("unknown option: %s%n", args[i])); System.exit(1); } } else { others.addLast(args[i]); } } } else { usage(); System.exit(0); } LinkedList<String> errors = new LinkedList<String>(); if (others.size() == 0) { errors.add("dgs file name missing."); } String imagePrefix; OutputType outputType = null; OutputPolicy outputPolicy = null; Resolution resolution = null; Quality quality = null; String logo; String stylesheet; imagePrefix = options.get(Option.IMAGE_PREFIX); try { outputType = OutputType.valueOf(options.get(Option.IMAGE_TYPE)); } catch (IllegalArgumentException e) { errors.add("bad image type: " + options.get(Option.IMAGE_TYPE)); } try { outputPolicy = OutputPolicy.valueOf(options.get(Option.OUTPUT_POLICY)); } catch (IllegalArgumentException e) { errors.add("bad output policy: " + options.get(Option.OUTPUT_POLICY)); } try { quality = Quality.valueOf(options.get(Option.QUALITY)); } catch (IllegalArgumentException e) { errors.add("bad quality: " + options.get(Option.QUALITY)); } logo = options.get(Option.LOGO); stylesheet = options.get(Option.STYLESHEET); try { resolution = Resolutions.valueOf(options.get(Option.IMAGE_RESOLUTION)); } catch (IllegalArgumentException e) { Pattern p = Pattern.compile("^\\s*(\\d+)\\s*x\\s*(\\d+)\\s*$"); Matcher m = p.matcher(options.get(Option.IMAGE_RESOLUTION)); if (m.matches()) { resolution = new CustomResolution(Integer.parseInt(m.group(1)), Integer.parseInt(m.group(2))); } else { errors.add("bad resolution: " + options.get(Option.IMAGE_RESOLUTION)); } } if (stylesheet != null && stylesheet.length() < 1024) { File test = new File(stylesheet); if (test.exists()) { FileReader in = new FileReader(test); char[] buffer = new char[128]; String content = ""; while (in.ready()) { int c = in.read(buffer, 0, 128); content += new String(buffer, 0, c); } stylesheet = content; in.close(); } } { File test = new File(others.peek()); if (!test.exists()) errors.add(String.format("file \"%s\" does not exist", others.peek())); } if (errors.size() > 0) { LOGGER.info(String.format("error:%n")); for (String error : errors) LOGGER.info(String.format("- %s%n", error)); System.exit(1); } FileSourceDGS dgs = new FileSourceDGS(); FileSinkImages fsi = FileSinkImages.createDefault(); fsi.setOutputPolicy(outputPolicy); fsi.setResolution(resolution); fsi.setOutputType(outputType); dgs.addSink(fsi); if (logo != null) fsi.addFilter(new AddLogoFilter(logo, 0, 0)); fsi.setQuality(quality); if (stylesheet != null) fsi.setStyleSheet(stylesheet); boolean next = true; dgs.begin(others.get(0)); while (next) next = dgs.nextStep(); dgs.end(); }
#vulnerable code public static void main(String... args) throws IOException { HashMap<Option, String> options = new HashMap<Option, String>(); LinkedList<String> others = new LinkedList<String>(); for (Option option : Option.values()) if (option.defaultValue != null) options.put(option, option.defaultValue); if (args != null && args.length > 0) { Pattern valueGetter = Pattern.compile("^--\\w[\\w-]*\\w?(?:=(?:\"([^\"]*)\"|([^\\s]*)))$"); for (int i = 0; i < args.length; i++) { if (args[i].matches("^--\\w[\\w-]*\\w?(=(\"[^\"]*\"|[^\\s]*))?$")) { boolean found = false; for (Option option : Option.values()) { if (args[i].startsWith("--" + option.fullopts + "=")) { Matcher m = valueGetter.matcher(args[i]); if (m.matches()) { options.put(option, m.group(1) == null ? m.group(2) : m.group(1)); } found = true; break; } } if (!found) { LOGGER.severe( String.format("unknown option: %s%n", args[i].substring(0, args[i].indexOf('=')))); System.exit(1); } } else if (args[i].matches("^-\\w$")) { boolean found = false; for (Option option : Option.values()) { if (args[i].equals("-" + option.shortopts)) { options.put(option, args[++i]); break; } } if (!found) { LOGGER.severe(String.format("unknown option: %s%n", args[i])); System.exit(1); } } else { others.addLast(args[i]); } } } else { usage(); System.exit(0); } LinkedList<String> errors = new LinkedList<String>(); if (others.size() == 0) { errors.add("dgs file name missing."); } String imagePrefix; OutputType outputType = null; OutputPolicy outputPolicy = null; Resolution resolution = null; Quality quality = null; String logo; String stylesheet; imagePrefix = options.get(Option.IMAGE_PREFIX); try { outputType = OutputType.valueOf(options.get(Option.IMAGE_TYPE)); } catch (IllegalArgumentException e) { errors.add("bad image type: " + options.get(Option.IMAGE_TYPE)); } try { outputPolicy = OutputPolicy.valueOf(options.get(Option.OUTPUT_POLICY)); } catch (IllegalArgumentException e) { errors.add("bad output policy: " + options.get(Option.OUTPUT_POLICY)); } try { quality = Quality.valueOf(options.get(Option.QUALITY)); } catch (IllegalArgumentException e) { errors.add("bad quality: " + options.get(Option.QUALITY)); } logo = options.get(Option.LOGO); stylesheet = options.get(Option.STYLESHEET); try { resolution = Resolutions.valueOf(options.get(Option.IMAGE_RESOLUTION)); } catch (IllegalArgumentException e) { Pattern p = Pattern.compile("^\\s*(\\d+)\\s*x\\s*(\\d+)\\s*$"); Matcher m = p.matcher(options.get(Option.IMAGE_RESOLUTION)); if (m.matches()) { resolution = new CustomResolution(Integer.parseInt(m.group(1)), Integer.parseInt(m.group(2))); } else { errors.add("bad resolution: " + options.get(Option.IMAGE_RESOLUTION)); } } if (stylesheet != null && stylesheet.length() < 1024) { File test = new File(stylesheet); if (test.exists()) { FileReader in = new FileReader(test); char[] buffer = new char[128]; String content = ""; while (in.ready()) { int c = in.read(buffer, 0, 128); content += new String(buffer, 0, c); } stylesheet = content; in.close(); } } { File test = new File(others.peek()); if (!test.exists()) errors.add(String.format("file \"%s\" does not exist", others.peek())); } if (errors.size() > 0) { LOGGER.info(String.format("error:%n")); for (String error : errors) LOGGER.info(String.format("- %s%n", error)); System.exit(1); } FileSourceDGS dgs = new FileSourceDGS(); FileSinkImages fsi = new FileSinkImages(imagePrefix, outputType, resolution, outputPolicy); dgs.addSink(fsi); if (logo != null) fsi.addFilter(new AddLogoFilter(logo, 0, 0)); fsi.setQuality(quality); if (stylesheet != null) fsi.setStyleSheet(stylesheet); boolean next = true; dgs.begin(others.get(0)); while (next) next = dgs.nextStep(); dgs.end(); } #location 142 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @SuppressWarnings("unchecked") public <T extends Edge> T removeEdge(String from, String to) throws ElementNotFoundException { GraphicNode node0 = (GraphicNode) styleGroups.getNode(from); GraphicNode node1 = (GraphicNode) styleGroups.getNode(to); if (node0 != null && node1 != null) { Collection<GraphicEdge> edges0 = connectivity.get(node0); Collection<GraphicEdge> edges1 = connectivity.get(node1); for (GraphicEdge edge0 : edges0) { for (GraphicEdge edge1 : edges1) { if (edge0 == edge1) { removeEdge(edge0.getId()); return (T) edge0; } } } } return null; }
#vulnerable code @SuppressWarnings("unchecked") public <T extends Edge> T removeEdge(String from, String to) throws ElementNotFoundException { GraphicNode node0 = (GraphicNode) styleGroups.getNode(from); GraphicNode node1 = (GraphicNode) styleGroups.getNode(to); if (node0 != null && node1 != null) { ArrayList<GraphicEdge> edges0 = connectivity.get(node0); ArrayList<GraphicEdge> edges1 = connectivity.get(node1); for (GraphicEdge edge0 : edges0) { for (GraphicEdge edge1 : edges1) { if (edge0 == edge1) { removeEdge(edge0.getId()); return (T) edge0; } } } } return null; } #location 11 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @SuppressWarnings("all") public <T extends Edge> T getEdgeToward(String id) { List<? extends Edge> edges = mygraph.connectivity.get(this); for (Edge edge : edges) { if (edge.getOpposite(this).getId().equals(id)) return (T) edge; } return null; }
#vulnerable code @SuppressWarnings("all") public <T extends Edge> T getEdgeToward(String id) { ArrayList<? extends Edge> edges = mygraph.connectivity.get(this); for (Edge edge : edges) { if (edge.getOpposite(this).getId().equals(id)) return (T) edge; } return null; } #location 5 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void printConnectivity() { Iterator<GraphicNode> keys = connectivity.keySet().iterator(); System.err.printf("Graphic graph connectivity:%n"); while (keys.hasNext()) { GraphicNode node = keys.next(); System.err.printf(" [%s] -> ", node.getId()); Iterable<GraphicEdge> edges = connectivity.get(node); for (GraphicEdge edge : edges) System.err.printf(" (%s %d)", edge.getId(), edge.getMultiIndex()); System.err.printf("%n"); } }
#vulnerable code public void printConnectivity() { Iterator<GraphicNode> keys = connectivity.keySet().iterator(); System.err.printf("Graphic graph connectivity:%n"); while (keys.hasNext()) { GraphicNode node = keys.next(); System.err.printf(" [%s] -> ", node.getId()); ArrayList<GraphicEdge> edges = connectivity.get(node); for (GraphicEdge edge : edges) System.err.printf(" (%s %d)", edge.getId(), edge.getMultiIndex()); System.err.printf("%n"); } } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @SuppressWarnings("unchecked") public <T extends Edge> T removeEdge(String from, String to) throws ElementNotFoundException { GraphicNode node0 = (GraphicNode) styleGroups.getNode(from); GraphicNode node1 = (GraphicNode) styleGroups.getNode(to); if (node0 != null && node1 != null) { Collection<GraphicEdge> edges0 = connectivity.get(node0); Collection<GraphicEdge> edges1 = connectivity.get(node1); for (GraphicEdge edge0 : edges0) { for (GraphicEdge edge1 : edges1) { if (edge0 == edge1) { removeEdge(edge0.getId()); return (T) edge0; } } } } return null; }
#vulnerable code @SuppressWarnings("unchecked") public <T extends Edge> T removeEdge(String from, String to) throws ElementNotFoundException { GraphicNode node0 = (GraphicNode) styleGroups.getNode(from); GraphicNode node1 = (GraphicNode) styleGroups.getNode(to); if (node0 != null && node1 != null) { ArrayList<GraphicEdge> edges0 = connectivity.get(node0); ArrayList<GraphicEdge> edges1 = connectivity.get(node1); for (GraphicEdge edge0 : edges0) { for (GraphicEdge edge1 : edges1) { if (edge0 == edge1) { removeEdge(edge0.getId()); return (T) edge0; } } } } return null; } #location 12 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected void insertKeyValues(KeyValues kv) throws IOException { if (kv.key != null) { if (inGraph) { if (kv.key.equals("node") || kv.key.equals("add-node")) { handleAddNode(kv); } else if (kv.key.equals("edge") || kv.key.equals("add-edge")) { handleAddEdge(kv); } else if (kv.key.equals("del-node") || kv.key.equals("-node")) { handleDelNode(kv); } else if (kv.key.equals("del-edge") || kv.key.equals("-edge")) { handleDelEdge(kv); } else if (kv.key.equals("change-node") || kv.key.equals("+node")) { handleChangeNode(kv); } else if (kv.key.equals("change-edge") || kv.key.equals("+edge")) { handleChangeEdge(kv); } else if (kv.key.equals("step")) { handleStep(kv); } else if (kv.key.equals("directed")) { setDirected(getBoolean(kv.get("directed"))); } else { if (kv.key.startsWith("-")) { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key.substring(1), AttributeChangeEvent.REMOVE, null, null); } else { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key, AttributeChangeEvent.ADD, null, compositeAttribute(kv)); } } } else { // XXX Should we consider these events pertain to the graph ? // XXX if (kv.key.startsWith("-")) { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key.substring(1), AttributeChangeEvent.REMOVE, null, null); } else { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key, AttributeChangeEvent.ADD, null, compositeAttribute(kv)); } } } }
#vulnerable code protected void insertKeyValues(KeyValues kv) throws IOException { if (kv.key != null) { if (inGraph) { if (kv.key.equals("node") || kv.key.equals("add-node")) { handleAddNode(kv); } else if (kv.key.equals("edge") || kv.key.equals("add-edge")) { handleAddEdge(kv); } else if (kv.key.equals("del-node") || kv.key.equals("-node")) { handleDelNode(kv); } else if (kv.key.equals("del-edge") || kv.key.equals("-edge")) { handleDelEdge(kv); } else if (kv.key.equals("change-node") || kv.key.equals("+node")) { handleChangeNode(kv); } else if (kv.key.equals("change-edge") || kv.key.equals("+edge")) { handleChangeEdge(kv); } else if (kv.key.equals("step")) { handleStep(kv); } else if (kv.key.equals("directed")) { setDirected(getBoolean((String) kv.get("directed"))); } else { if (kv.key.startsWith("-")) { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key.substring(1), AttributeChangeEvent.REMOVE, null, null); } else { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key, AttributeChangeEvent.ADD, null, compositeAttribute(kv)); } } } else { // XXX Should we consider these events pertain to the graph ? // XXX if (kv.key.startsWith("-")) { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key.substring(1), AttributeChangeEvent.REMOVE, null, null); } else { gml.sendAttributeChangedEvent(sourceId, sourceId, ElementType.GRAPH, kv.key, AttributeChangeEvent.ADD, null, compositeAttribute(kv)); } } } } #location 21 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void savePropertyGroup(String fileName, String group, InputStream inputstream) throws IOException { List<PropertyItemVO> items = parseInputFile(inputstream); if (!items.isEmpty()) { String groupFullPath = ZKPaths.makePath(ZKPaths.makePath(nodeAuth.getAuthedNode(), versionMB.getSelectedVersion()), group); String commentFullPath = ZKPaths.makePath(ZKPaths.makePath(nodeAuth.getAuthedNode(), versionMB.getSelectedVersion() + "$"), group); boolean created = nodeService.createProperty(groupFullPath, null); if (created) { for (PropertyItemVO item : items) { nodeService.createProperty(ZKPaths.makePath(groupFullPath, item.getName()), item.getValue()); nodeService.createProperty(ZKPaths.makePath(commentFullPath, item.getName()), item.getComment()); } refreshGroup(); FacesContext.getCurrentInstance().addMessage(null, new FacesMessage("Succesful", fileName + " is uploaded.")); } else { FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Create group with file error.", fileName)); } } else { FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "File is empty.", fileName)); } }
#vulnerable code private void savePropertyGroup(String fileName, String group, InputStream inputstream) throws IOException { Reader reader = new InputStreamReader(inputstream, Charsets.UTF_8); Properties properties = new Properties(); properties.load(reader); if (!properties.isEmpty()) { String authedNode = ZKPaths.makePath(nodeAuth.getAuthedNode(), versionMB.getSelectedVersion()); String groupPath = ZKPaths.makePath(authedNode, group); boolean created = nodeService.createProperty(groupPath, null); if (created) { Map<String, String> map = Maps.fromProperties(properties); for (Entry<String, String> entry : map.entrySet()) { nodeService.createProperty(ZKPaths.makePath(groupPath, entry.getKey()), entry.getValue()); } refreshGroup(); FacesContext.getCurrentInstance().addMessage(null, new FacesMessage("Succesful", fileName + " is uploaded.")); } else { FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Create group with file error.", fileName)); } } else { FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "File is empty.", fileName)); } } #location 4 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static Map<String, String> loadLocalProperties(String rootNode, String group) { Preconditions.checkArgument(!Strings.isNullOrEmpty(rootNode) && !Strings.isNullOrEmpty(group), "rootNode or group cannot be empty."); Map<String, String> properties = null; final String localOverrideFile = findLocalOverrideFile(); InputStream in = null; try { in = LocalOverrideFileLoader.class.getClassLoader().getResourceAsStream(localOverrideFile); if (in != null) { final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); final DocumentBuilder builder = factory.newDocumentBuilder(); final Document doc = builder.parse(in); final Element factoriesNode = Preconditions.checkNotNull(doc.getDocumentElement(), "Root xml node node-factories not exists."); Node factoryNode = findChild(factoriesNode, "node-factory", "root", rootNode); if (factoryNode != null) { Node nodeGroup = findChild(factoryNode, "group", "id", group); if (nodeGroup != null) { NodeList childNodes = nodeGroup.getChildNodes(); int nodeCount = childNodes.getLength(); if (nodeCount > 0) { properties = Maps.newHashMap(); for (int i = 0; i < nodeCount; i++) { Node item = childNodes.item(i); if (item.hasAttributes()) { NamedNodeMap attributes = item.getAttributes(); Node keyAttr = attributes.getNamedItem("key"); if (keyAttr != null) { String propKey = keyAttr.getNodeValue(); String propVal = item.getFirstChild().getNodeValue(); if (propKey != null && propVal != null) { properties.put(propKey, propVal); } } } } } } } } } catch (ParserConfigurationException e) { throw Throwables.propagate(e); } catch (SAXException e) { throw Throwables.propagate(e); } catch (IOException e) { throw Throwables.propagate(e); } finally { if (in != null) { try { in.close(); } catch (IOException e) { // IGNORE } } } return properties; }
#vulnerable code public static Map<String, String> loadLocalProperties(String rootNode, String group) { Preconditions.checkArgument(!Strings.isNullOrEmpty(rootNode) && !Strings.isNullOrEmpty(group), "rootNode or group cannot be empty."); Map<String, String> properties = null; final String localOverrideFile = findLocalOverrideFile(); InputStream in = null; try { in = LocalOverrideFileLoader.class.getClassLoader().getResourceAsStream(localOverrideFile); if (in != null) { final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); final DocumentBuilder builder = factory.newDocumentBuilder(); final Document doc = builder.parse(in); final Element factoriesNode = Preconditions.checkNotNull(doc.getDocumentElement(), "Root xml node node-factories not exists."); Node factoryNode = findChild(factoriesNode, "node-factory", "root", rootNode); if (factoriesNode != null) { Node nodeGroup = findChild(factoryNode, "group", "id", group); if (nodeGroup != null) { NodeList childNodes = nodeGroup.getChildNodes(); int nodeCount = childNodes.getLength(); if (nodeCount > 0) { properties = Maps.newHashMap(); for (int i = 0; i < nodeCount; i++) { Node item = childNodes.item(i); if (item.hasAttributes()) { NamedNodeMap attributes = item.getAttributes(); Node keyAttr = attributes.getNamedItem("key"); if (keyAttr != null) { String propKey = keyAttr.getNodeValue(); String propVal = item.getFirstChild().getNodeValue(); if (propKey != null && propVal != null) { properties.put(propKey, propVal); } } } } } } } } } catch (ParserConfigurationException e) { throw Throwables.propagate(e); } catch (SAXException e) { throw Throwables.propagate(e); } catch (IOException e) { throw Throwables.propagate(e); } finally { if (in != null) { try { in.close(); } catch (IOException e) { // IGNORE } } } return properties; } #location 18 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) { String rootNode = "/projectx/modulex"; ZookeeperConfigProfile profile = new ZookeeperConfigProfile("zk.host", rootNode, true); ZookeeperConfigGroup dbConfigs = new ZookeeperConfigGroup(null, profile, "db"); dbConfigs.setConfigLocalCache(new ConfigLocalCache("/your/local/config/folder", rootNode)); }
#vulnerable code public static void main(String[] args) { ZookeeperConfigProfile profile = new ZookeeperConfigProfile("zk.host", "/projectx/modulex", true); profile.setLocalCacheFolder("/your/local/config/folder"); ConfigGroup dbConfigs = new ZookeeperConfigGroup(null, profile, "db"); } #location 5 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) { ConfigFactory configFactory = new ConfigFactory("zoo.host1:8181", "/projectx/modulex", true); ConfigNode propertyGroup1 = configFactory.getConfigNode("property-group1"); System.out.println(propertyGroup1); // Listen changes propertyGroup1.register(new IObserver() { @Override public void notifiy(String data, String value) { // Some initialization } }); String stringProperty = propertyGroup1.getProperty("string_property_key"); Preconditions.checkState("Welcome here.".equals(stringProperty)); String intProperty = propertyGroup1.getProperty("int_property_key"); Preconditions.checkState(1123 == Integer.parseInt(intProperty)); Object lock = new Object(); synchronized (lock) { try { while (true) lock.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } }
#vulnerable code public static void main(String[] args) { ConfigFactory configFactory = new ConfigFactory("zoo.host1:8181", "/projectx/modulex"); ConfigNode propertyGroup1 = configFactory.getConfigNode("property-group1"); System.out.println(propertyGroup1); // Listen changes propertyGroup1.register(new IObserver() { @Override public void notifiy(String data, String value) { // Some initialization } }); String stringProperty = propertyGroup1.getProperty("string_property_key"); Preconditions.checkState("Welcome here.".equals(stringProperty)); String intProperty = propertyGroup1.getProperty("int_property_key"); Preconditions.checkState(1123 == Integer.parseInt(intProperty)); Object lock = new Object(); synchronized (lock) { try { while (true) lock.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } #location 4 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @ManagedOperation public void setProperty(String name, String value) { if (!environment.getPropertySources().contains(MANAGER_PROPERTY_SOURCE)) { synchronized (map) { if (!environment.getPropertySources().contains(MANAGER_PROPERTY_SOURCE)) { MapPropertySource source = new MapPropertySource( MANAGER_PROPERTY_SOURCE, map); environment.getPropertySources().addFirst(source); } } } if (!value.equals(environment.getProperty(name))) { map.put(name, value); publish(new EnvironmentChangeEvent(publisher, Collections.singleton(name))); } }
#vulnerable code @ManagedOperation public void setProperty(String name, String value) { if (!environment.getPropertySources().contains(MANAGER_PROPERTY_SOURCE)) { synchronized (map) { if (!environment.getPropertySources().contains(MANAGER_PROPERTY_SOURCE)) { MapPropertySource source = new MapPropertySource( MANAGER_PROPERTY_SOURCE, map); environment.getPropertySources().addFirst(source); } } } if (!value.equals(environment.getProperty(name))) { map.put(name, value); publish(new EnvironmentChangeEvent(Collections.singleton(name))); } } #location 16 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void close() { executorService.shutdown(); }
#vulnerable code @Override public void close() { if (executorService != null) { synchronized (InetUtils.class) { if (executorService != null) { executorService.shutdown(); executorService = null; } } } } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private String normalizePem(String data) { PEMKeyPair pemKeyPair = null; try (PEMParser pemParser = new PEMParser(new StringReader(data))) { pemKeyPair = (PEMKeyPair) pemParser.readObject(); PrivateKeyInfo privateKeyInfo = pemKeyPair.getPrivateKeyInfo(); StringWriter textWriter = new StringWriter(); try (PemWriter pemWriter = new PemWriter(textWriter)) { PemObjectGenerator pemObjectGenerator = new MiscPEMGenerator( privateKeyInfo); pemWriter.writeObject(pemObjectGenerator); pemWriter.flush(); return textWriter.toString(); } } catch (IOException e) { throw new RuntimeException(e.getMessage(), e); } }
#vulnerable code private String normalizePem(String data) { PEMParser pemParser = new PEMParser(new StringReader(data)); PEMKeyPair pemKeyPair = null; try { pemKeyPair = (PEMKeyPair) pemParser.readObject(); PrivateKeyInfo privateKeyInfo = pemKeyPair.getPrivateKeyInfo(); StringWriter textWriter = new StringWriter(); PemWriter pemWriter = new PemWriter(textWriter); PemObjectGenerator pemObjectGenerator = new MiscPEMGenerator(privateKeyInfo); pemWriter.writeObject(pemObjectGenerator); pemWriter.flush(); return textWriter.toString(); } catch (IOException e) { throw new RuntimeException(e.getMessage(), e); } } #location 5 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test void shouldAutoEnableCaching() { ApplicationContextRunner contextRunner = baseApplicationRunner(); contextRunner.run(context -> { assertThat(context.getBeansOfType(CacheManager.class)).hasSize(1); assertThat(((CacheManager) context.getBean("loadBalancerCacheManager")) .getCacheNames()).hasSize(1); assertThat(context.getBean("loadBalancerCacheManager")) .isInstanceOf(CaffeineCacheManager.class); assertThat(((CacheManager) context.getBean("loadBalancerCacheManager")) .getCacheNames()).contains("CachingServiceInstanceListSupplierCache"); }); }
#vulnerable code @Test void shouldAutoEnableCaching() { AnnotationConfigApplicationContext context = setup(""); assertThat(context.getBeansOfType(CacheManager.class)).isNotEmpty(); assertThat(context.getBeansOfType(CacheManager.class).get("cacheManager")) .isNotInstanceOf(NoOpCacheManager.class); } #location 5 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void newConnectionManagerWithTTL() { HttpClientConnectionManager connectionManager = new DefaultApacheHttpClientConnectionManagerFactory() .newConnectionManager(false, 2, 6, 56L, TimeUnit.DAYS, null); then(((PoolingHttpClientConnectionManager) connectionManager) .getDefaultMaxPerRoute()).isEqualTo(6); then(((PoolingHttpClientConnectionManager) connectionManager).getMaxTotal()) .isEqualTo(2); Object pool = getField((connectionManager), "pool"); then((Long) getField(pool, "timeToLive")).isEqualTo(new Long(56)); TimeUnit timeUnit = getField(pool, "timeUnit"); then(timeUnit).isEqualTo(TimeUnit.DAYS); }
#vulnerable code @Test public void newConnectionManagerWithTTL() throws Exception { HttpClientConnectionManager connectionManager = new DefaultApacheHttpClientConnectionManagerFactory() .newConnectionManager(false, 2, 6, 56L, TimeUnit.DAYS, null); then(((PoolingHttpClientConnectionManager) connectionManager) .getDefaultMaxPerRoute()).isEqualTo(6); then(((PoolingHttpClientConnectionManager) connectionManager).getMaxTotal()) .isEqualTo(2); Object pool = getField(((PoolingHttpClientConnectionManager) connectionManager), "pool"); then((Long) getField(pool, "timeToLive")).isEqualTo(new Long(56)); TimeUnit timeUnit = getField(pool, "tunit"); then(timeUnit).isEqualTo(TimeUnit.DAYS); } #location 9 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.