input
stringlengths 205
73.3k
| output
stringlengths 64
73.2k
| instruction
stringclasses 1
value |
---|---|---|
#vulnerable code
private long loadPredictionModel(Map<Object, PredictionModel> label2model, File file, PrimitiveObjectInspector labelOI, PrimitiveObjectInspector featureOI, WritableFloatObjectInspector weightOI)
throws IOException, SerDeException {
long count = 0L;
if(!file.exists()) {
return count;
}
if(!file.getName().endsWith(".crc")) {
if(file.isDirectory()) {
for(File f : file.listFiles()) {
count += loadPredictionModel(label2model, f, labelOI, featureOI, weightOI);
}
} else {
LazySimpleSerDe serde = HiveUtils.getLineSerde(labelOI, featureOI, weightOI);
StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector();
StructField c1ref = lineOI.getStructFieldRef("c1");
StructField c2ref = lineOI.getStructFieldRef("c2");
StructField c3ref = lineOI.getStructFieldRef("c3");
PrimitiveObjectInspector c1refOI = (PrimitiveObjectInspector) c1ref.getFieldObjectInspector();
PrimitiveObjectInspector c2refOI = (PrimitiveObjectInspector) c2ref.getFieldObjectInspector();
FloatObjectInspector c3refOI = (FloatObjectInspector) c3ref.getFieldObjectInspector();
final BufferedReader reader = HadoopUtils.getBufferedReader(file);
try {
String line;
while((line = reader.readLine()) != null) {
count++;
Text lineText = new Text(line);
Object lineObj = serde.deserialize(lineText);
List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj);
Object f0 = fields.get(0);
Object f1 = fields.get(1);
Object f2 = fields.get(2);
if(f0 == null || f1 == null || f2 == null) {
continue; // avoid the case that key or value is null
}
Object label = c1refOI.getPrimitiveWritableObject(c1refOI.copyObject(f0));
PredictionModel model = label2model.get(label);
if(model == null) {
model = createModel();
label2model.put(label, model);
}
Object k = c2refOI.getPrimitiveWritableObject(c2refOI.copyObject(f1));
float v = c3refOI.get(f2);
model.set(k, new WeightValue(v, false));
}
} finally {
reader.close();
}
}
}
return count;
}
#location 47
#vulnerability type RESOURCE_LEAK
|
#fixed code
private long loadPredictionModel(Map<Object, PredictionModel> label2model, File file, PrimitiveObjectInspector labelOI, PrimitiveObjectInspector featureOI, WritableFloatObjectInspector weightOI)
throws IOException, SerDeException {
long count = 0L;
if(!file.exists()) {
return count;
}
if(!file.getName().endsWith(".crc")) {
if(file.isDirectory()) {
for(File f : file.listFiles()) {
count += loadPredictionModel(label2model, f, labelOI, featureOI, weightOI);
}
} else {
LazySimpleSerDe serde = HiveUtils.getLineSerde(labelOI, featureOI, weightOI);
StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector();
StructField c1ref = lineOI.getStructFieldRef("c1");
StructField c2ref = lineOI.getStructFieldRef("c2");
StructField c3ref = lineOI.getStructFieldRef("c3");
PrimitiveObjectInspector c1refOI = (PrimitiveObjectInspector) c1ref.getFieldObjectInspector();
PrimitiveObjectInspector c2refOI = (PrimitiveObjectInspector) c2ref.getFieldObjectInspector();
FloatObjectInspector c3refOI = (FloatObjectInspector) c3ref.getFieldObjectInspector();
BufferedReader reader = null;
try {
reader = HadoopUtils.getBufferedReader(file);
String line;
while((line = reader.readLine()) != null) {
count++;
Text lineText = new Text(line);
Object lineObj = serde.deserialize(lineText);
List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj);
Object f0 = fields.get(0);
Object f1 = fields.get(1);
Object f2 = fields.get(2);
if(f0 == null || f1 == null || f2 == null) {
continue; // avoid the case that key or value is null
}
Object label = c1refOI.getPrimitiveWritableObject(c1refOI.copyObject(f0));
PredictionModel model = label2model.get(label);
if(model == null) {
model = createModel();
label2model.put(label, model);
}
Object k = c2refOI.getPrimitiveWritableObject(c2refOI.copyObject(f1));
float v = c3refOI.get(f2);
model.set(k, new WeightValue(v, false));
}
} finally {
IOUtils.closeQuietly(reader);
}
}
}
return count;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void test2() {
EuclidDistanceUDF udf = new EuclidDistanceUDF();
List<String> ftvec1 = Arrays.asList("1:1.0", "2:3.0", "3:3.0");
List<String> ftvec2 = Arrays.asList("1:2.0", "3:6.0");
FloatWritable d = udf.evaluate(ftvec1, ftvec2);
Assert.assertEquals((float) Math.sqrt(1.0 + 9.0 + 9.0), d.get(), 0.f);
}
#location 6
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void test2() {
List<String> ftvec1 = Arrays.asList("1:1.0", "2:3.0", "3:3.0");
List<String> ftvec2 = Arrays.asList("1:2.0", "3:6.0");
double d = EuclidDistanceUDF.euclidDistance(ftvec1, ftvec2);
Assert.assertEquals(Math.sqrt(1.0 + 9.0 + 9.0), d, 0.f);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static double evalPredict(RegressionTree tree, double[] x) throws HiveException,
IOException {
String opScript = tree.predictOpCodegen(StackMachine.SEP);
debugPrint(opScript);
DoubleWritable result = (DoubleWritable) TreePredictByStackMachineUDF.evaluate(opScript, x, false);
return result.get();
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private static double evalPredict(RegressionTree tree, double[] x) throws HiveException,
IOException {
TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF();
String opScript = tree.predictOpCodegen(StackMachine.SEP);
debugPrint(opScript);
DoubleWritable result = (DoubleWritable) udf.evaluate(opScript, x, false);
udf.close();
return result.get();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 9
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException {
ArrayList<String> opScript = tree.predictOpCodegen();
System.out.println(opScript);
VMTreePredictTrustedUDF udf = new VMTreePredictTrustedUDF();
udf.initialize(new ObjectInspector[] {
PrimitiveObjectInspectorFactory.javaStringObjectInspector,
ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) });
IntWritable result = (IntWritable) udf.evaluate(opScript, x, true);
udf.close();
return result.get();
}
#location 10
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException {
ArrayList<String> opScript = tree.predictOpCodegen();
System.out.println(opScript);
TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF();
udf.initialize(new ObjectInspector[] {
PrimitiveObjectInspectorFactory.javaStringObjectInspector,
ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) });
IntWritable result = (IntWritable) udf.evaluate(opScript, x, true);
udf.close();
return result.get();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void readObject(@Nonnull final byte[] src, @Nonnull final Externalizable dst)
throws IOException, ClassNotFoundException {
readObject(new FastByteArrayInputStream(src), dst);
}
#location 3
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static void readObject(@Nonnull final byte[] src, @Nonnull final Externalizable dst)
throws IOException, ClassNotFoundException {
readObject(src, src.length, dst);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream();
OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos;
ObjectOutputStream oos = null;
try {
oos = new ObjectOutputStream(wrapped);
_root.writeExternal(oos);
oos.flush();
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(oos);
}
return bos.toByteArray_clear();
}
#location 17
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
try {
if (compress) {
return ObjectUtils.toCompressedBytes(_root);
} else {
return ObjectUtils.toBytes(_root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream();
OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos;
ObjectOutputStream oos = null;
try {
oos = new ObjectOutputStream(wrapped);
_root.writeExternal(oos);
oos.flush();
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(oos);
}
return bos.toByteArray_clear();
}
#location 10
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
try {
if (compress) {
return ObjectUtils.toCompressedBytes(_root);
} else {
return ObjectUtils.toBytes(_root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testEvaluate() {
CosineSimilarityUDF cosine = new CosineSimilarityUDF();
{
List<String> ftvec1 = Arrays.asList("bbb:1.4", "aaa:0.9", "ccc");
Assert.assertEquals(1.f, cosine.evaluate(ftvec1, ftvec1).get(), 0.0);
}
Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")).get(), 0.0);
Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")).get(), 0.0);
Assert.assertEquals(1.f, cosine.evaluate(Arrays.asList("a", "b"), Arrays.asList("a", "b")).get(), 0.0);
Assert.assertEquals(0.5f, cosine.evaluate(Arrays.asList("a", "b"), Arrays.asList("a", "c")).get(), 0.0);
Assert.assertEquals(-1.f, cosine.evaluate(Arrays.asList("a:1.0"), Arrays.asList("a:-1.0")).get(), 0.0);
Assert.assertTrue(cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")).get() > 0.f);
Assert.assertTrue(cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")).get() > 0.f);
Assert.assertTrue((cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange", "apple"))).get() > (cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange"))).get());
Assert.assertEquals(1.0f, cosine.evaluate(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))).get(), 0.0);
Assert.assertEquals(1.0f, cosine.evaluate(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))).get(), 0.0);
{
List<String> tokens1 = Arrays.asList("1:1,2:1,3:1,4:1,5:0,6:1,7:1,8:1,9:0,10:1,11:1".split(","));
List<String> tokens2 = Arrays.asList("1:1,2:1,3:0,4:1,5:1,6:1,7:1,8:0,9:1,10:1,11:1".split(","));
Assert.assertEquals(0.77777f, cosine.evaluate(tokens1, tokens2).get(), 0.00001f);
}
{
List<String> tokens1 = Arrays.asList("1 2 3 4 6 7 8 10 11".split("\\s+"));
List<String> tokens2 = Arrays.asList("1 2 4 5 6 7 9 10 11".split("\\s+"));
double dotp = 1 + 1 + 0 + 1 + 0 + 1 + 1 + 0 + 0 + 1 + 1;
double norm = Math.sqrt(tokens1.size()) * Math.sqrt(tokens2.size());
Assert.assertEquals(dotp / norm, cosine.evaluate(tokens1, tokens2).get(), 0.00001f);
Assert.assertEquals(dotp / norm, cosine.evaluate(tokens1, tokens2).get(), 0.00001f);
Assert.assertEquals(dotp / norm, cosine.evaluate(Arrays.asList("1", "2", "3", "4", "6", "7", "8", "10", "11"), Arrays.asList("1", "2", "4", "5", "6", "7", "9", "10", "11")).get(), 0.00001f);
}
Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("1", "2", "3"), Arrays.asList("4", "5")).get(), 0.0);
Assert.assertEquals(1.f, cosine.evaluate(Arrays.asList("1", "2"), Arrays.asList("1", "2")).get(), 0.0);
}
#location 45
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void testEvaluate() throws IOException {
CosineSimilarityUDF cosine = new CosineSimilarityUDF();
{
List<String> ftvec1 = Arrays.asList("bbb:1.4", "aaa:0.9", "ccc");
Assert.assertEquals(1.f, cosine.evaluate(ftvec1, ftvec1).get(), 0.0);
}
Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")).get(), 0.0);
Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("a", "b", "c"), Arrays.asList("d", "e")).get(), 0.0);
Assert.assertEquals(1.f, cosine.evaluate(Arrays.asList("a", "b"), Arrays.asList("a", "b")).get(), 0.0);
Assert.assertEquals(0.5f, cosine.evaluate(Arrays.asList("a", "b"), Arrays.asList("a", "c")).get(), 0.0);
Assert.assertEquals(-1.f, cosine.evaluate(Arrays.asList("a:1.0"), Arrays.asList("a:-1.0")).get(), 0.0);
Assert.assertTrue(cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")).get() > 0.f);
Assert.assertTrue(cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "apple")).get() > 0.f);
Assert.assertTrue((cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange", "apple"))).get() > (cosine.evaluate(Arrays.asList("apple", "orange"), Arrays.asList("banana", "orange"))).get());
Assert.assertEquals(1.0f, cosine.evaluate(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))).get(), 0.0);
Assert.assertEquals(1.0f, cosine.evaluate(Arrays.asList("This is a sentence with seven tokens".split(" ")), Arrays.<String> asList("This is a sentence with seven tokens".split(" "))).get(), 0.0);
{
List<String> tokens1 = Arrays.asList("1:1,2:1,3:1,4:1,5:0,6:1,7:1,8:1,9:0,10:1,11:1".split(","));
List<String> tokens2 = Arrays.asList("1:1,2:1,3:0,4:1,5:1,6:1,7:1,8:0,9:1,10:1,11:1".split(","));
Assert.assertEquals(0.77777f, cosine.evaluate(tokens1, tokens2).get(), 0.00001f);
}
{
List<String> tokens1 = Arrays.asList("1 2 3 4 6 7 8 10 11".split("\\s+"));
List<String> tokens2 = Arrays.asList("1 2 4 5 6 7 9 10 11".split("\\s+"));
double dotp = 1 + 1 + 0 + 1 + 0 + 1 + 1 + 0 + 0 + 1 + 1;
double norm = Math.sqrt(tokens1.size()) * Math.sqrt(tokens2.size());
Assert.assertEquals(dotp / norm, cosine.evaluate(tokens1, tokens2).get(), 0.00001f);
Assert.assertEquals(dotp / norm, cosine.evaluate(tokens1, tokens2).get(), 0.00001f);
Assert.assertEquals(dotp / norm, cosine.evaluate(Arrays.asList("1", "2", "3", "4", "6", "7", "8", "10", "11"), Arrays.asList("1", "2", "4", "5", "6", "7", "9", "10", "11")).get(), 0.00001f);
}
Assert.assertEquals(0.f, cosine.evaluate(Arrays.asList("1", "2", "3"), Arrays.asList("4", "5")).get(), 0.0);
Assert.assertEquals(1.f, cosine.evaluate(Arrays.asList("1", "2"), Arrays.asList("1", "2")).get(), 0.0);
cosine.close();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public float getCovariance(float scale) {
assert (num_updates > 0) : num_updates;
return (sum_inv_covar * scale) * num_updates; // Harmonic mean
}
#location 3
#vulnerability type UNSAFE_GUARDED_BY_ACCESS
|
#fixed code
@Override
public float getCovariance(float scale) {
return 1.f / (sum_inv_covar * scale);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 9
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public final short getClock() {
return totalClock;
}
#location 2
#vulnerability type UNSAFE_GUARDED_BY_ACCESS
|
#fixed code
public final short getClock() {
return globalClock;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static int evalPredict(RegressionTree tree, double[] x) throws HiveException,
IOException {
ArrayList<String> opScript = tree.predictOpCodegen();
System.out.println(opScript);
VMTreePredictTrustedUDF udf = new VMTreePredictTrustedUDF();
udf.initialize(new ObjectInspector[] {
PrimitiveObjectInspectorFactory.javaStringObjectInspector,
ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) });
IntWritable result = (IntWritable) udf.evaluate(opScript, x, true);
udf.close();
return result.get();
}
#location 11
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static int evalPredict(RegressionTree tree, double[] x) throws HiveException,
IOException {
ArrayList<String> opScript = tree.predictOpCodegen();
System.out.println(opScript);
TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF();
udf.initialize(new ObjectInspector[] {
PrimitiveObjectInspectorFactory.javaStringObjectInspector,
ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) });
IntWritable result = (IntWritable) udf.evaluate(opScript, x, true);
udf.close();
return result.get();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 18
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Nonnull
public static FinishableOutputStream createOutputStream(@Nonnull final OutputStream out,
@Nonnull final CompressionAlgorithm algo) {
switch (algo) {
case deflate: {
final DeflaterOutputStream deflate = new DeflaterOutputStream(out);
return new FinishableOutputStreamAdapter(deflate) {
@Override
public void finish() throws IOException {
deflate.finish();
deflate.flush();
IOUtils.finishStream(out);
}
};
}
case deflate_l7: {
final Deflater l7 = new Deflater(7);
final DeflaterOutputStream deflate = new hivemall.utils.io.DeflaterOutputStream(
out, l7);
return new FinishableOutputStreamAdapter(deflate) {
@Override
public void finish() throws IOException {
deflate.finish();
deflate.flush();
IOUtils.finishStream(out);
}
};
}
case xz: {
final LZMA2Options options;
try {
options = new LZMA2Options(LZMA2Options.PRESET_DEFAULT);
} catch (UnsupportedOptionsException e) {
throw new IllegalStateException("LZMA2Option configuration failed", e);
}
final XZOutputStream xz;
try {
xz = new XZOutputStream(out, options);
} catch (IOException e) {
throw new IllegalStateException("Failed to encode by XZ", e);
}
return new FinishableOutputStreamAdapter(xz) {
@Override
public void finish() throws IOException {
xz.finish();
IOUtils.finishStream(out);
}
};
}
case lzma2: { // level 6
final LZMA2Options options;
try {
options = new LZMA2Options(LZMA2Options.PRESET_DEFAULT);
} catch (UnsupportedOptionsException e) {
throw new IllegalStateException("LZMA2Option configuration failed", e);
}
FinishableWrapperOutputStream wrapped = new FinishableWrapperOutputStream(out);
final org.tukaani.xz.FinishableOutputStream lzma2 = options.getOutputStream(wrapped);
return new FinishableOutputStreamAdapter(lzma2) {
@Override
public void finish() throws IOException {
lzma2.finish();
IOUtils.finishStream(out);
}
};
}
case lzma2_l5: {
final LZMA2Options options;
try {
options = new LZMA2Options(5);
} catch (UnsupportedOptionsException e) {
throw new IllegalStateException("LZMA2Option configuration failed", e);
}
FinishableWrapperOutputStream wrapped = new FinishableWrapperOutputStream(out);
final org.tukaani.xz.FinishableOutputStream lzma2 = options.getOutputStream(wrapped);
return new FinishableOutputStreamAdapter(lzma2) {
@Override
public void finish() throws IOException {
lzma2.finish();
IOUtils.finishStream(out);
}
};
}
default:
throw new UnsupportedOperationException("Unsupported compression algorithm: "
+ algo);
}
}
#location 18
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Nonnull
public static FinishableOutputStream createOutputStream(@Nonnull final OutputStream out,
@Nonnull final CompressionAlgorithm algo) {
return createOutputStream(out, algo, DEFAULT_COMPRESSION_LEVEL);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException {
String opScript = tree.predictOpCodegen(StackMachine.SEP);
debugPrint(opScript);
IntWritable result = (IntWritable) TreePredictByStackMachineUDF.evaluate(opScript, x, true);
return result.get();
}
#location 5
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException {
TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF();
String opScript = tree.predictOpCodegen(StackMachine.SEP);
debugPrint(opScript);
IntWritable result = (IntWritable) udf.evaluate(opScript, x, true);
udf.close();
return result.get();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void test1() {
EuclidDistanceUDF udf = new EuclidDistanceUDF();
List<String> ftvec1 = Arrays.asList("1:1.0", "2:2.0", "3:3.0");
List<String> ftvec2 = Arrays.asList("1:2.0", "2:4.0", "3:6.0");
FloatWritable d = udf.evaluate(ftvec1, ftvec2);
Assert.assertEquals((float) Math.sqrt(1.0 + 4.0 + 9.0), d.get(), 0.f);
}
#location 6
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void test1() {
List<String> ftvec1 = Arrays.asList("1:1.0", "2:2.0", "3:3.0");
List<String> ftvec2 = Arrays.asList("1:2.0", "2:4.0", "3:6.0");
double d = EuclidDistanceUDF.euclidDistance(ftvec1, ftvec2);
Assert.assertEquals(Math.sqrt(1.0 + 4.0 + 9.0), d, 0.f);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
final Attribute[] attrs = _attributes;
assert (attrs != null);
FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream();
OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos;
ObjectOutputStream oos = null;
try {
oos = new ObjectOutputStream(wrapped);
_root.writeExternal(oos);
oos.flush();
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(oos);
}
return bos.toByteArray_clear();
}
#location 13
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
try {
if (compress) {
return ObjectUtils.toCompressedBytes(_root);
} else {
return ObjectUtils.toBytes(_root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 9
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 18
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static void loadValues(OpenHashMap<Object, Object> map, File file, PrimitiveObjectInspector keyOI, PrimitiveObjectInspector valueOI)
throws IOException, SerDeException {
if(!file.exists()) {
return;
}
if(!file.getName().endsWith(".crc")) {
if(file.isDirectory()) {
for(File f : file.listFiles()) {
loadValues(map, f, keyOI, valueOI);
}
} else {
LazySimpleSerDe serde = HiveUtils.getKeyValueLineSerde(keyOI, valueOI);
StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector();
StructField keyRef = lineOI.getStructFieldRef("key");
StructField valueRef = lineOI.getStructFieldRef("value");
PrimitiveObjectInspector keyRefOI = (PrimitiveObjectInspector) keyRef.getFieldObjectInspector();
PrimitiveObjectInspector valueRefOI = (PrimitiveObjectInspector) valueRef.getFieldObjectInspector();
final BufferedReader reader = HadoopUtils.getBufferedReader(file);
try {
String line;
while((line = reader.readLine()) != null) {
Text lineText = new Text(line);
Object lineObj = serde.deserialize(lineText);
List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj);
Object f0 = fields.get(0);
Object f1 = fields.get(1);
Object k = keyRefOI.getPrimitiveJavaObject(f0);
Object v = valueRefOI.getPrimitiveWritableObject(valueRefOI.copyObject(f1));
map.put(k, v);
}
} finally {
reader.close();
}
}
}
}
#location 33
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static void loadValues(OpenHashMap<Object, Object> map, File file, PrimitiveObjectInspector keyOI, PrimitiveObjectInspector valueOI)
throws IOException, SerDeException {
if(!file.exists()) {
return;
}
if(!file.getName().endsWith(".crc")) {
if(file.isDirectory()) {
for(File f : file.listFiles()) {
loadValues(map, f, keyOI, valueOI);
}
} else {
LazySimpleSerDe serde = HiveUtils.getKeyValueLineSerde(keyOI, valueOI);
StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector();
StructField keyRef = lineOI.getStructFieldRef("key");
StructField valueRef = lineOI.getStructFieldRef("value");
PrimitiveObjectInspector keyRefOI = (PrimitiveObjectInspector) keyRef.getFieldObjectInspector();
PrimitiveObjectInspector valueRefOI = (PrimitiveObjectInspector) valueRef.getFieldObjectInspector();
BufferedReader reader = null;
try {
reader = HadoopUtils.getBufferedReader(file);
String line;
while((line = reader.readLine()) != null) {
Text lineText = new Text(line);
Object lineObj = serde.deserialize(lineText);
List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj);
Object f0 = fields.get(0);
Object f1 = fields.get(1);
Object k = keyRefOI.getPrimitiveJavaObject(f0);
Object v = valueRefOI.getPrimitiveWritableObject(valueRefOI.copyObject(f1));
map.put(k, v);
}
} finally {
IOUtils.closeQuietly(reader);
}
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void setV(int i, int f, float nextVif) {
assert (i >= 1) : i;
float[] vi = _V.get(i);
vi[f] = nextVif;
}
#location 5
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public void setV(int i, int f, float nextVif) {
assert (i >= 0) : i;
float[] vi = _V.get(i);
assert (vi != null) : "V[" + i + "] was null";
vi[f] = nextVif;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException {
ArrayList<String> opScript = tree.predictOpCodegen();
debugPrint(opScript);
TreePredictByStackMachineUDF udf = new TreePredictByStackMachineUDF();
udf.initialize(new ObjectInspector[] {
PrimitiveObjectInspectorFactory.javaStringObjectInspector,
ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) });
IntWritable result = (IntWritable) udf.evaluate(opScript, x, true);
udf.close();
return result.get();
}
#location 10
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static int evalPredict(DecisionTree tree, double[] x) throws HiveException, IOException {
String opScript = tree.predictOpCodegen(StackMachine.SEP);
debugPrint(opScript);
IntWritable result = (IntWritable) TreePredictByStackMachineUDF.evaluate(opScript, x, true);
return result.get();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public float getCovariance(float scale) {
assert (num_updates > 0) : num_updates;
return (sum_inv_covar * scale) * num_updates; // Harmonic mean
}
#location 4
#vulnerability type UNSAFE_GUARDED_BY_ACCESS
|
#fixed code
@Override
public float getCovariance(float scale) {
return 1.f / (sum_inv_covar * scale);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 9
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testLargeOpcodes() throws IOException, ParseException, HiveException,
VMRuntimeException {
URL url = new URL("https://gist.githubusercontent.com/myui/b1a8e588f5750e3b658c/raw/a4074d37400dab2b13a2f43d81f5166188d3461a/vmtest01.txt");
InputStream is = new BufferedInputStream(url.openStream());
String opScript = IOUtils.toString(is);
double[] x = new double[] { 36, 2, 1, 2, 0, 436, 1, 0, 0, 13, 0, 567, 1, 595, 2, 1 };
StackMachine sm = new StackMachine();
sm.run(opScript, x);
Double result = sm.getResult();
assertEquals(0.d, result.doubleValue(), 0d);
}
#location 12
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testLargeOpcodes() throws IOException, ParseException, HiveException,
VMRuntimeException {
URL url = new URL("https://gist.githubusercontent.com/myui/b1a8e588f5750e3b658c/raw/a4074d37400dab2b13a2f43d81f5166188d3461a/vmtest01.txt");
InputStream is = new BufferedInputStream(url.openStream());
String opScript = IOUtils.toString(is);
StackMachine sm = new StackMachine();
sm.compile(opScript);
double[] x1 = new double[] { 36, 2, 1, 2, 0, 436, 1, 0, 0, 13, 0, 567, 1, 595, 2, 1 };
sm.eval(x1);
assertEquals(0.d, sm.getResult().doubleValue(), 0d);
double[] x2 = { 31, 2, 1, 2, 0, 354, 1, 0, 0, 30, 0, 502, 1, 9, 2, 2 };
sm.eval(x2);
assertEquals(1.d, sm.getResult().doubleValue(), 0d);
double[] x3 = { 39, 0, 0, 0, 0, 1756, 0, 0, 0, 3, 0, 939, 1, 0, 0, 0 };
sm.eval(x3);
assertEquals(0.d, sm.getResult().doubleValue(), 0d);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
FastMultiByteArrayOutputStream bos = new FastMultiByteArrayOutputStream();
OutputStream wrapped = compress ? new DeflaterOutputStream(bos) : bos;
ObjectOutputStream oos = null;
try {
oos = new ObjectOutputStream(wrapped);
_root.writeExternal(oos);
oos.flush();
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(oos);
}
return bos.toByteArray_clear();
}
#location 15
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Nonnull
public byte[] predictSerCodegen(boolean compress) throws HiveException {
try {
if (compress) {
return ObjectUtils.toCompressedBytes(_root);
} else {
return ObjectUtils.toBytes(_root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while serializing DecisionTree object", ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while serializing DecisionTree object", e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static long loadPredictionModel(PredictionModel model, File file, PrimitiveObjectInspector keyOI, WritableFloatObjectInspector valueOI)
throws IOException, SerDeException {
long count = 0L;
if(!file.exists()) {
return count;
}
if(!file.getName().endsWith(".crc")) {
if(file.isDirectory()) {
for(File f : file.listFiles()) {
count += loadPredictionModel(model, f, keyOI, valueOI);
}
} else {
LazySimpleSerDe serde = HiveUtils.getKeyValueLineSerde(keyOI, valueOI);
StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector();
StructField keyRef = lineOI.getStructFieldRef("key");
StructField valueRef = lineOI.getStructFieldRef("value");
PrimitiveObjectInspector keyRefOI = (PrimitiveObjectInspector) keyRef.getFieldObjectInspector();
FloatObjectInspector varRefOI = (FloatObjectInspector) valueRef.getFieldObjectInspector();
final BufferedReader reader = HadoopUtils.getBufferedReader(file);
try {
String line;
while((line = reader.readLine()) != null) {
count++;
Text lineText = new Text(line);
Object lineObj = serde.deserialize(lineText);
List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj);
Object f0 = fields.get(0);
Object f1 = fields.get(1);
if(f0 == null || f1 == null) {
continue; // avoid the case that key or value is null
}
Object k = keyRefOI.getPrimitiveWritableObject(keyRefOI.copyObject(f0));
float v = varRefOI.get(f1);
model.set(k, new WeightValue(v, false));
}
} finally {
reader.close();
}
}
}
return count;
}
#location 38
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static long loadPredictionModel(PredictionModel model, File file, PrimitiveObjectInspector keyOI, WritableFloatObjectInspector valueOI)
throws IOException, SerDeException {
long count = 0L;
if(!file.exists()) {
return count;
}
if(!file.getName().endsWith(".crc")) {
if(file.isDirectory()) {
for(File f : file.listFiles()) {
count += loadPredictionModel(model, f, keyOI, valueOI);
}
} else {
LazySimpleSerDe serde = HiveUtils.getKeyValueLineSerde(keyOI, valueOI);
StructObjectInspector lineOI = (StructObjectInspector) serde.getObjectInspector();
StructField keyRef = lineOI.getStructFieldRef("key");
StructField valueRef = lineOI.getStructFieldRef("value");
PrimitiveObjectInspector keyRefOI = (PrimitiveObjectInspector) keyRef.getFieldObjectInspector();
FloatObjectInspector varRefOI = (FloatObjectInspector) valueRef.getFieldObjectInspector();
BufferedReader reader = null;
try {
reader = HadoopUtils.getBufferedReader(file);
String line;
while((line = reader.readLine()) != null) {
count++;
Text lineText = new Text(line);
Object lineObj = serde.deserialize(lineText);
List<Object> fields = lineOI.getStructFieldsDataAsList(lineObj);
Object f0 = fields.get(0);
Object f1 = fields.get(1);
if(f0 == null || f1 == null) {
continue; // avoid the case that key or value is null
}
Object k = keyRefOI.getPrimitiveWritableObject(keyRefOI.copyObject(f0));
float v = varRefOI.get(f1);
model.set(k, new WeightValue(v, false));
}
} finally {
IOUtils.closeQuietly(reader);
}
}
}
return count;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 18
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
FastByteArrayInputStream bis = new FastByteArrayInputStream(serializedObj, length);
InputStream wrapped = compressed ? new InflaterInputStream(bis) : bis;
final Node root;
ObjectInputStream ois = null;
try {
ois = new ObjectInputStream(wrapped);
root = new Node();
root.readExternal(ois);
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
} finally {
IOUtils.closeQuietly(ois);
}
return root;
}
#location 18
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static Node deserializeNode(final byte[] serializedObj, final int length,
final boolean compressed) throws HiveException {
final Node root = new Node();
try {
if (compressed) {
ObjectUtils.readCompressedObject(serializedObj, 0, length, root);
} else {
ObjectUtils.readObject(serializedObj, length, root);
}
} catch (IOException ioe) {
throw new HiveException("IOException cause while deserializing DecisionTree object",
ioe);
} catch (Exception e) {
throw new HiveException("Exception cause while deserializing DecisionTree object", e);
}
return root;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public BinaryData download( String resourceId )
throws ResourcesException
{
String resourcePath = getResourcePath( resourceId );
File file = new File( getRootFolder(), resourcePath );
try
{
FileInputStream input = new FileInputStream(file);
byte[] content=IOUtils.toByteArray(input);
//BinaryData data = new BinaryData( content, file.getName() );
BinaryData data = new BinaryData( );
data.setContent(input);
data.setFileName(file.getName());
data.setResourceId( resourceId );
return data;
}
catch ( Exception e )
{
throw new ResourcesException( e );
}
}
#location 17
#vulnerability type RESOURCE_LEAK
|
#fixed code
public BinaryData download( String resourceId )
throws ResourcesException
{
String resourcePath = getResourcePath( resourceId );
File file = new File( getRootFolder(), resourcePath );
try
{
FileInputStream input = new FileInputStream(file);
byte[] content=IOUtils.toByteArray(input);
//BinaryData data = new BinaryData( content, file.getName() );
BinaryData data = new BinaryData( );
data.setContent(content);
data.setFileName(file.getName());
data.setResourceId( resourceId );
return data;
}
catch ( Exception e )
{
throw new ResourcesException( e );
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void main(String[] args) throws Exception {
String fileIn = null;
String fileOut = null;
String templateEngineKind = null;
String jsonData = null;
String jsonFile = null;
IPopulateContextAware contextAware = null;
String arg = null;
for (int i = 0; i < args.length; i++) {
arg = args[i];
if ("-in".equals(arg)) {
fileIn = getValue(args, i);
} else if ("-out".equals(arg)) {
fileOut = getValue(args, i);
} else if ("-engine".equals(arg)) {
templateEngineKind = getValue(args, i);
} else if ("-jsonData".equals(arg)) {
jsonData = getValue(args, i);
contextAware = new JSONPoupluateContextAware(jsonData);
} else if ("-jsonFile".equals(arg)) {
jsonFile = getValue(args, i);
StringWriter jsonDataWriter = new StringWriter();
IOUtils.copy(new FileReader(new File(jsonFile)), jsonDataWriter);
contextAware = new JSONPoupluateContextAware(
jsonDataWriter.toString());
}
}
Tools tools = new Tools();
tools.process(new File(fileIn), new File(fileOut), templateEngineKind,
contextAware);
}
#location 25
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static void main(String[] args) throws Exception {
String fileIn = null;
String fileOut = null;
String templateEngineKind = null;
String jsonData = null;
String jsonFile = null;
String metadataFile = null;
boolean autoGenData = false;
IDataProvider dataProvider = null;
String arg = null;
for (int i = 0; i < args.length; i++) {
arg = args[i];
if ("-in".equals(arg)) {
fileIn = getValue(args, i);
} else if ("-out".equals(arg)) {
fileOut = getValue(args, i);
} else if ("-engine".equals(arg)) {
templateEngineKind = getValue(args, i);
} else if ("-jsonData".equals(arg)) {
jsonData = getValue(args, i);
Map<String, String> parameters = new HashMap<String, String>();
parameters.put("jsonData", jsonData);
dataProvider = DataProviderFactoryRegistry.getRegistry()
.create("json", parameters);
} else if ("-jsonFile".equals(arg)) {
jsonFile = getValue(args, i);
} else if ("-autoGenData".equals(arg)) {
autoGenData = StringUtils.asBoolean(getValue(args, i), false);
} else if ("-metadataFile".equals(arg)) {
metadataFile = getValue(args, i);
}
}
FieldsMetadata fieldsMetadata = null;
if (metadataFile != null) {
fieldsMetadata = FieldsMetadataXMLSerializer.getInstance().load(
new FileInputStream(metadataFile));
}
if (!StringUtils.isEmpty(jsonFile)) {
StringWriter jsonDataWriter = new StringWriter();
File f = new File(jsonFile);
if (!f.exists() && autoGenData && fieldsMetadata != null) {
// Generate JSON
FieldsMetadataJSONSerializer.getInstance().save(fieldsMetadata,
new FileOutputStream(jsonFile), true);
}
IOUtils.copy(new FileReader(f), jsonDataWriter);
Map<String, String> parameters = new HashMap<String, String>();
parameters.put("jsonData", jsonDataWriter.toString());
dataProvider = DataProviderFactoryRegistry.getRegistry().create(
"json", parameters);
}
Tools tools = new Tools();
tools.process(new File(fileIn), new File(fileOut), templateEngineKind,
fieldsMetadata, dataProvider);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testBoldWithB()
throws Exception
{
IContext context = null;
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<b>text</b>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testBoldWithB()
throws Exception
{
IContext context = new MockContext();
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<b>text</b>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void process() {
Collection<BufferedElement> toRemove = new ArrayList<BufferedElement>();
int size = arBufferedRegions.size();
String s = null;
StringBuilder fullContent = new StringBuilder();
boolean fieldFound = false;
ARBufferedRegion currentAR = null;
ARBufferedRegion lastAR = null;
for (int i = 0; i < size; i++) {
currentAR = arBufferedRegions.get(i);
s = currentAR.getTContent();
if (fieldFound) {
fieldFound = !(s == null || s.length() == 0 || Character
.isWhitespace(s.charAt(0)));
} else {
fieldFound = s != null && s.indexOf("$") != -1;
}
if (fieldFound) {
fullContent.append(s);
toRemove.add(currentAR);
} else {
if (fullContent.length() > 0) {
lastAR.setTContent(fullContent.toString());
fullContent.setLength(0);
toRemove.remove(lastAR);
}
}
lastAR = currentAR;
}
if (fullContent.length() > 0) {
lastAR.setTContent(fullContent.toString());
fullContent.setLength(0);
toRemove.remove(lastAR);
}
super.removeAll(toRemove);
}
#location 24
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void process() {
Collection<BufferedElement> toRemove = new ArrayList<BufferedElement>();
int size = arBufferedRegions.size();
String s = null;
StringBuilder fullContent = new StringBuilder();
boolean fieldFound = false;
ARBufferedRegion currentAR = null;
ARBufferedRegion lastAR = null;
for (int i = 0; i < size; i++) {
currentAR = arBufferedRegions.get(i);
s = currentAR.getTContent();
if (fieldFound) {
fieldFound = !(s == null || s.length() == 0 || Character
.isWhitespace(s.charAt(0)));
} else {
fieldFound = s != null && s.indexOf("$") != -1;
}
if (fieldFound) {
fullContent.append(s);
toRemove.add(currentAR);
} else {
update(toRemove, fullContent, lastAR);
}
lastAR = currentAR;
}
update(toRemove, fullContent, lastAR);
super.removeAll(toRemove);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static void processUpload( ResourcesService client, String resourceId, File out )
throws ResourcesException, IOException
{
FileInputStream input= new FileInputStream(out);
byte[] content=IOUtils.toByteArray(input);
// BinaryData data = new BinaryData( content, out.getName() );
BinaryData data = new BinaryData( );
data.setContent(input);
data.setFileName(out.getName());
data.setResourceId( resourceId );
client.upload( data );
}
#location 13
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static void processUpload( ResourcesService client, String resourceId, File out )
throws ResourcesException, IOException
{
FileInputStream input= new FileInputStream(out);
byte[] content=IOUtils.toByteArray(input);
// BinaryData data = new BinaryData( content, out.getName() );
BinaryData data = new BinaryData( );
data.setContent(content);
data.setFileName(out.getName());
data.setResourceId( resourceId );
client.upload( data );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void processReportWithOptions()
throws IOException
{
WebClient client = WebClient.create( BASE_ADDRESS );
client.path( "processReport" );
client.accept( MediaType.APPLICATION_XML );
ReportAndDataRepresentation report = new ReportAndDataRepresentation();
InputStream in = RESTXDocReportServiceTest.class.getClassLoader().getResourceAsStream( "bo.docx" );
report.setReportID( "reportID1" );
report.setDocument( fr.opensagres.xdocreport.core.io.IOUtils.toByteArray( in ) );
report.setTemplateEngine( "Velocity" );
report.getFieldsMetaData().add( "test" );
report.setTemplateEngine( "Velocity" );
report.setDataContext( new ArrayList<DataContext>() );
WSOptions options = new WSOptions();
options.setFrom( DocumentKind.DOCX.name() );
options.setTo( ConverterTypeTo.PDF.name() );
options.setVia( ConverterTypeVia.ITEXT.name() );
report.setOptions( options );
//client.post( report);
byte[] flux= client.post( report,byte[].class );
assertNotNull(flux);
File aFile= new File( "result.pdf");
FileOutputStream fos= new FileOutputStream( aFile );
fos.write( flux );
fos.close();
}
#location 35
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void processReportWithOptions()
throws IOException
{
WebClient client = WebClient.create( BASE_ADDRESS );
client.path( "processReport" );
client.accept( MediaType.APPLICATION_XML );
ReportAndDataRepresentation report = new ReportAndDataRepresentation();
InputStream in = RESTXDocReportServiceTest.class.getClassLoader().getResourceAsStream( "bo.docx" );
report.setReportID( "reportID1" );
report.setDocument( fr.opensagres.xdocreport.core.io.IOUtils.toByteArray( in ) );
report.setTemplateEngine( "Velocity" );
report.getFieldsMetaData().add( "test" );
report.setTemplateEngine( "Velocity" );
report.setDataContext( new ArrayList<DataContext>() );
WSOptions options = new WSOptions();
options.setFrom( DocumentKind.DOCX.name() );
options.setTo( ConverterTypeTo.PDF.name() );
options.setVia( ConverterTypeVia.ITEXT.name() );
report.setOptions( options );
//client.post( report);
byte[] flux= client.post( report,byte[].class );
assertNotNull(flux);
//
// File aFile= new File( "target/result.pdf");
// FileOutputStream fos= new FileOutputStream( aFile );
// fos.write( flux );
// fos.close();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private static void processUpload( ResourcesService client, String resources, String out )
throws IOException, ResourcesException
{
if ( StringUtils.isEmpty( resources ) )
{
throw new IOException( "resources must be not empty" );
}
if ( resources.indexOf( ";" ) == -1 )
{
processUpload( client, resources, IOUtils.toByteArray( new FileInputStream( new File( out ) ) ) );
}
else
{
// TODO : manage list of uppload
}
// String[] resources= s.split( ";" );
// String[] outs= out.split( ";" );
}
#location 10
#vulnerability type RESOURCE_LEAK
|
#fixed code
private static void processUpload( ResourcesService client, String resources, String out )
throws IOException, ResourcesException
{
if ( StringUtils.isEmpty( resources ) )
{
throw new IOException( "resources must be not empty" );
}
if ( resources.indexOf( ";" ) == -1 )
{
processUpload( client, resources, new File( out ) );
}
else
{
// TODO : manage list of uppload
}
// String[] resources= s.split( ";" );
// String[] outs= out.split( ";" );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void applyStyles( Style style )
{
this.lastStyleApplied = style;
// width
StyleTableProperties tableProperties = style.getTableProperties();
if ( tableProperties != null )
{
if ( tableProperties.getWidth() != null )
{
super.setTotalWidth( tableProperties.getWidth() );
}
}
// alignment
int alignment = tableProperties.getAlignment();
if ( alignment != Element.ALIGN_UNDEFINED )
{
super.setHorizontalAlignment( alignment );
}
// margins
Float margin = tableProperties.getMargin();
if ( margin != null && margin > 0.0f )
{
super.setPadding( margin );
}
Float marginLeft = tableProperties.getMarginLeft();
if ( marginLeft != null && marginLeft > 0.0f )
{
super.setPaddingLeft( marginLeft );
}
Float marginRight = tableProperties.getMarginRight();
if ( marginRight != null && marginRight > 0.0f )
{
super.setPaddingRight( marginRight );
}
Float marginTop = tableProperties.getMarginTop();
if ( marginTop != null && marginTop > 0.0f )
{
super.setPaddingTop( marginTop );
}
Float marginBottom = tableProperties.getMarginBottom();
if ( marginBottom != null && marginBottom > 0.0f )
{
super.setPaddingBottom( marginBottom );
}
}
#location 16
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void applyStyles( Style style )
{
this.lastStyleApplied = style;
StyleTableProperties tableProperties = style.getTableProperties();
if ( tableProperties != null )
{
// width
if ( tableProperties.getWidth() != null )
{
super.setTotalWidth( tableProperties.getWidth() );
}
// alignment
int alignment = tableProperties.getAlignment();
if ( alignment != Element.ALIGN_UNDEFINED )
{
super.setHorizontalAlignment( alignment );
}
// margins
Float margin = tableProperties.getMargin();
if ( margin != null && margin > 0.0f )
{
super.setPadding( margin );
}
Float marginLeft = tableProperties.getMarginLeft();
if ( marginLeft != null && marginLeft > 0.0f )
{
super.setPaddingLeft( marginLeft );
}
Float marginRight = tableProperties.getMarginRight();
if ( marginRight != null && marginRight > 0.0f )
{
super.setPaddingRight( marginRight );
}
Float marginTop = tableProperties.getMarginTop();
if ( marginTop != null && marginTop > 0.0f )
{
super.setPaddingTop( marginTop );
}
Float marginBottom = tableProperties.getMarginBottom();
if ( marginBottom != null && marginBottom > 0.0f )
{
super.setPaddingBottom( marginBottom );
}
// table splitting
Boolean mayBreakBetweenRows = tableProperties.getMayBreakBetweenRows();
if ( mayBreakBetweenRows != null )
{
super.setKeepTogether( !mayBreakBetweenRows );
}
}
StyleTableRowProperties tableRowProperties = style.getTableRowProperties();
if ( tableRowProperties != null )
{
// keep together
Boolean keepTogether = tableRowProperties.getKeepTogether();
if ( keepTogether != null )
{
// keep together is table row property in open office
// but it is table property in iText
// so we set keep together = true if any of table rows has this property set to true
if ( super.isSplitLate() == false && keepTogether == true )
{
super.setSplitLate( true );
}
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void loadPNGWithoutUsingImageSizeAndForceWidth()
throws Exception
{
IImageProvider imageProvider =
new FileImageProvider( new File( "src/test/resources/fr/opensagres/xdocreport/document/images/logo.png" ) );
imageProvider.setWidth( 100f );
Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() );
Assert.assertNotNull( imageProvider.getWidth(0) );
Assert.assertEquals( 100f, imageProvider.getWidth(0).floatValue(), 0 );
Assert.assertNull( imageProvider.getHeight(0) );
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void loadPNGWithoutUsingImageSizeAndForceWidth()
throws Exception
{
IImageProvider imageProvider =
new FileImageProvider( new File( "src/test/resources/fr/opensagres/xdocreport/document/images/logo.png" ) );
imageProvider.setWidth( 100f );
Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() );
Assert.assertNotNull( imageProvider.getWidth(null) );
Assert.assertEquals( 100f, imageProvider.getWidth(null).floatValue(), 0 );
Assert.assertNull( imageProvider.getHeight(null) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testItalicWithI()
throws Exception
{
IContext context = null;
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<i>text</i>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testItalicWithI()
throws Exception
{
IContext context = new MockContext();
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<i>text</i>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void loadPNGWithoutUsingImageSizeAndForceWidth()
throws Exception
{
IImageProvider imageProvider =
new ByteArrayImageProvider( ByteArrayImageProviderTestCase.class.getResourceAsStream( "logo.png" ) );
imageProvider.setWidth( 100f );
Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() );
Assert.assertNotNull( imageProvider.getWidth(0) );
Assert.assertEquals( 100f, imageProvider.getWidth(0).floatValue(), 0 );
Assert.assertNull( imageProvider.getHeight(0) );
}
#location 10
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void loadPNGWithoutUsingImageSizeAndForceWidth()
throws Exception
{
IImageProvider imageProvider =
new ByteArrayImageProvider( ByteArrayImageProviderTestCase.class.getResourceAsStream( "logo.png" ) );
imageProvider.setWidth( 100f );
Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() );
Assert.assertNotNull( imageProvider.getWidth(null) );
Assert.assertEquals( 100f, imageProvider.getWidth(null).floatValue(), 0 );
Assert.assertNull( imageProvider.getHeight(null) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private void visitHeadersFooters( IXWPFMasterPage masterPage, CTSectPr sectPr )
throws Exception
{
// see titlePg at http://officeopenxml.com/WPsection.php i
// Specifies whether the section should have a different header and
// footer
// for its first page.
// If the element is set to true (e.g., <w:titlePg/>),
// then the section will use a first page header;
// if it is false (e.g., <w:titlePg w:val="false"/>)
// (the default value), then the first page uses the odd page header. If
// the element is set to true but the
// first page header type is omitted, then a blank header is created.
boolean ignoreFirstHeaderFooter = !XWPFUtils.isCTOnOff( sectPr.getTitlePg() );
Collection<CTHdrFtrRef> headersRef = sectPr.getHeaderReferenceList();
Collection<CTHdrFtrRef> footersRef = sectPr.getFooterReferenceList();
boolean firstHeaderFooter = false;
for ( CTHdrFtrRef headerRef : headersRef )
{
STHdrFtr type = headerRef.xgetType();
firstHeaderFooter = ( type != null && type.enumValue() == STHdrFtr.FIRST );
if ( !firstHeaderFooter || ( firstHeaderFooter && !ignoreFirstHeaderFooter ) )
{
masterPage.setType( type.enumValue().intValue() );
documentHandler.visitHeaderRef( headerRef, sectPr, masterPage );
}
}
for ( CTHdrFtrRef footerRef : footersRef )
{
STHdrFtr type = footerRef.xgetType();
firstHeaderFooter = ( type != null && type.enumValue() == STHdrFtr.FIRST );
if ( !firstHeaderFooter || ( firstHeaderFooter && !ignoreFirstHeaderFooter ) )
{
masterPage.setType( type.enumValue().intValue() );
documentHandler.visitFooterRef( footerRef, sectPr, masterPage );
}
}
masterPage.setType( STHdrFtr.INT_FIRST );
}
#location 26
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private void visitHeadersFooters( IXWPFMasterPage masterPage, CTSectPr sectPr )
throws Exception
{
// see titlePg at http://officeopenxml.com/WPsection.php i
// Specifies whether the section should have a different header and
// footer
// for its first page.
// If the element is set to true (e.g., <w:titlePg/>),
// then the section will use a first page header;
// if it is false (e.g., <w:titlePg w:val="false"/>)
// (the default value), then the first page uses the odd page header. If
// the element is set to true but the
// first page header type is omitted, then a blank header is created.
boolean titlePage = XWPFUtils.isCTOnOff( sectPr.getTitlePg() );
Map<Integer, Object> previousHeaders = headers;
Map<Integer, Object> previousFooters = footers;
headers = new HashMap<Integer, Object>();
footers = new HashMap<Integer, Object>();
for ( CTHdrFtrRef reference : sectPr.getHeaderReferenceList() )
{
STHdrFtr type = reference.xgetType();
int typeValue = type == null ? STHdrFtr.INT_DEFAULT : type.enumValue().intValue();
if ( typeValue != STHdrFtr.INT_FIRST || titlePage )
{
masterPage.setType( typeValue );
documentHandler.visitHeaderRef( reference, sectPr, masterPage );
masterPage.setType( typeValue );
headers.put( typeValue, masterPage.getHeader() );
}
}
for ( CTHdrFtrRef reference : sectPr.getFooterReferenceList() )
{
STHdrFtr type = reference.xgetType();
int typeValue = type == null ? STHdrFtr.INT_DEFAULT : type.enumValue().intValue();
if ( typeValue != STHdrFtr.INT_FIRST || titlePage )
{
masterPage.setType( typeValue );
documentHandler.visitFooterRef( reference, sectPr, masterPage );
masterPage.setType( typeValue );
footers.put( typeValue, masterPage.getFooter() );
}
}
if ( titlePage )
{
inheritHeader( masterPage, STHdrFtr.INT_FIRST, previousHeaders );
inheritFooter( masterPage, STHdrFtr.INT_FIRST, previousFooters );
}
if ( evenAndOddHeaders )
{
inheritHeader( masterPage, STHdrFtr.INT_EVEN, previousHeaders );
inheritFooter( masterPage, STHdrFtr.INT_EVEN, previousFooters );
}
inheritHeader( masterPage, STHdrFtr.INT_DEFAULT, previousHeaders );
inheritFooter( masterPage, STHdrFtr.INT_DEFAULT, previousFooters );
masterPage.setType( STHdrFtr.INT_FIRST );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void visit( StyleColumnElement ele )
{
StyleSectionProperties sectionProperties = currentStyle.getSectionProperties();
if ( sectionProperties == null )
{
// style:column outside style:section-properties, ignore it
return;
}
StyleColumnsProperties columnsProperties = sectionProperties.getColumnsProperties();
if ( columnsProperties == null )
{
// style:column outside style:columns, ignore it
}
StyleColumnProperties columnProperties = new StyleColumnProperties();
// rel-width
String relWidth = ele.getStyleRelWidthAttribute();
if ( StringUtils.isNotEmpty( relWidth ) )
{
columnProperties.setRelWidth( ODFUtils.getRelativeSize( relWidth ) );
}
columnsProperties.getColumnProperties().add( columnProperties );
super.visit( ele );
}
#location 26
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public void visit( StyleColumnElement ele )
{
List<StyleColumnProperties> styleColumnPropertiesList = currentStyle.getColumnPropertiesList();
if ( styleColumnPropertiesList == null )
{
styleColumnPropertiesList = new ArrayList<StyleColumnProperties>();
currentStyle.setColumnPropertiesList( styleColumnPropertiesList );
}
StyleColumnProperties columnProperties = new StyleColumnProperties();
// rel-width
String relWidth = ele.getStyleRelWidthAttribute();
if ( StringUtils.isNotEmpty( relWidth ) )
{
columnProperties.setRelWidth( ODFUtils.getRelativeSize( relWidth ) );
}
styleColumnPropertiesList.add( columnProperties );
super.visit( ele );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testBoldWithStrong()
throws Exception
{
IContext context = null;
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<strong>text</strong>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testBoldWithStrong()
throws Exception
{
IContext context = new MockContext();
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<strong>text</strong>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:b /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void loadPNGWithoutUsingImageSizeAndForceWidth()
throws Exception
{
IImageProvider imageProvider = new ClassPathImageProvider( ClassPathImageProviderTestCase.class, "logo.png" );
imageProvider.setWidth( 100f );
Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() );
Assert.assertNotNull( imageProvider.getWidth(0) );
Assert.assertEquals( 100f, imageProvider.getWidth(0).floatValue(), 0 );
Assert.assertNull( imageProvider.getHeight(0) );
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void loadPNGWithoutUsingImageSizeAndForceWidth()
throws Exception
{
IImageProvider imageProvider = new ClassPathImageProvider( ClassPathImageProviderTestCase.class, "logo.png" );
imageProvider.setWidth( 100f );
Assert.assertEquals( ImageFormat.png, imageProvider.getImageFormat() );
Assert.assertNotNull( imageProvider.getWidth(null) );
Assert.assertEquals( 100f, imageProvider.getWidth(null).floatValue(), 0 );
Assert.assertNull( imageProvider.getHeight(null) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testItalicWithEm()
throws Exception
{
IContext context = null;
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<em>text</em>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testItalicWithEm()
throws Exception
{
IContext context = new MockContext();
BufferedElement parent = null;
ITextStylingTransformer formatter = HTMLTextStylingTransformer.INSTANCE;
IDocumentHandler handler = new DocxDocumentHandler( parent, context, "word/document.xml" );
formatter.transform( "<em>text</em>", handler );
Assert.assertEquals( "<w:r><w:rPr><w:i /></w:rPr><w:t xml:space=\"preserve\" >text</w:t></w:r>",
handler.getTextBody() );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void process() {
Collection<BufferedElement> toRemove = new ArrayList<BufferedElement>();
int size = arBufferedRegions.size();
String s = null;
StringBuilder fullContent = new StringBuilder();
boolean fieldFound = false;
ARBufferedRegion currentAR = null;
ARBufferedRegion lastAR = null;
for (int i = 0; i < size; i++) {
currentAR = arBufferedRegions.get(i);
s = currentAR.getTContent();
if (fieldFound) {
fieldFound = !(s == null || s.length() == 0 || Character
.isWhitespace(s.charAt(0)));
} else {
fieldFound = s != null && s.indexOf("$") != -1;
}
if (fieldFound) {
fullContent.append(s);
toRemove.add(currentAR);
} else {
if (fullContent.length() > 0) {
lastAR.setTContent(fullContent.toString());
fullContent.setLength(0);
toRemove.remove(lastAR);
}
}
lastAR = currentAR;
}
if (fullContent.length() > 0) {
lastAR.setTContent(fullContent.toString());
fullContent.setLength(0);
toRemove.remove(lastAR);
}
super.removeAll(toRemove);
}
#location 24
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void process() {
Collection<BufferedElement> toRemove = new ArrayList<BufferedElement>();
int size = arBufferedRegions.size();
String s = null;
StringBuilder fullContent = new StringBuilder();
boolean fieldFound = false;
ARBufferedRegion currentAR = null;
ARBufferedRegion lastAR = null;
for (int i = 0; i < size; i++) {
currentAR = arBufferedRegions.get(i);
s = currentAR.getTContent();
if (fieldFound) {
fieldFound = !(s == null || s.length() == 0 || Character
.isWhitespace(s.charAt(0)));
} else {
fieldFound = s != null && s.indexOf("$") != -1;
}
if (fieldFound) {
fullContent.append(s);
toRemove.add(currentAR);
} else {
update(toRemove, fullContent, lastAR);
}
lastAR = currentAR;
}
update(toRemove, fullContent, lastAR);
super.removeAll(toRemove);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void main(final String[] args) throws Exception {
// Right now we accept one parameter, the number of nodes in the cluster.
final int clusterSize;
if (args.length > 0) {
clusterSize = Integer.parseInt(args[0]);
} else {
clusterSize = 1;
}
logger.info("Starting up kafka cluster with {} brokers", clusterSize);
// Create a test cluster
final KafkaTestCluster kafkaTestCluster = new KafkaTestCluster(clusterSize);
// Start the cluster.
kafkaTestCluster.start();
kafkaTestCluster
.getKafkaBrokers()
.stream()
.forEach((broker) -> logger.info("Started broker with Id {} at {}", broker.getBrokerId(), broker.getConnectString()));
logger.info("Cluster started at: {}", kafkaTestCluster.getKafkaConnectString());
// Wait forever.
Thread.currentThread().join();
}
#location 23
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static void main(final String[] args) throws Exception {
// Right now we accept one parameter, the number of nodes in the cluster.
final int clusterSize;
if (args.length > 0) {
clusterSize = Integer.parseInt(args[0]);
} else {
clusterSize = 1;
}
logger.info("Starting up kafka cluster with {} brokers", clusterSize);
// Create a test cluster
final KafkaTestCluster kafkaTestCluster = new KafkaTestCluster(clusterSize);
// Start the cluster.
kafkaTestCluster.start();
// Create a topic
final String topicName = "TestTopicA";
final KafkaTestUtils utils = new KafkaTestUtils(kafkaTestCluster);
utils.createTopic(topicName, clusterSize, (short) clusterSize);
// Publish some data into that topic
for (int partition = 0; partition < clusterSize; partition++) {
utils.produceRecords(1000, topicName, partition);
}
kafkaTestCluster
.getKafkaBrokers()
.stream()
.forEach((broker) -> logger.info("Started broker with Id {} at {}", broker.getBrokerId(), broker.getConnectString()));
logger.info("Cluster started at: {}", kafkaTestCluster.getKafkaConnectString());
// Wait forever.
Thread.currentThread().join();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public Class<? extends Deserializer> getDeserializerClass(final String jarName, final String classpath) throws LoaderException {
try {
final String absolutePath = getPathForJar(jarName).toString();
final URL jarUrl = new URL("file://" + absolutePath);
final ClassLoader pluginClassLoader = new PluginClassLoader(jarUrl);
final Class loadedClass = pluginClassLoader.loadClass(classpath);
if (!Deserializer.class.isAssignableFrom(loadedClass)) {
throw new WrongImplementationException("Class does not implement " + Deserializer.class.getName(), null);
}
return loadedClass;
} catch (MalformedURLException exception) {
throw new LoaderException("Unable to load jar " + jarName, exception);
} catch (ClassNotFoundException exception) {
throw new UnableToFindClassException("Unable to find class " + classpath + " in jar " + jarName, exception);
}
}
#location 6
#vulnerability type RESOURCE_LEAK
|
#fixed code
public Class<? extends Deserializer> getDeserializerClass(final String jarName, final String classpath) throws LoaderException {
try {
final String absolutePath = getPathForJar(jarName).toString();
final URL jarUrl = new URL("file://" + absolutePath);
final ClassLoader pluginClassLoader = new PluginClassLoader(jarUrl);
return getDeserializerClass(pluginClassLoader, classpath);
} catch (MalformedURLException exception) {
throw new LoaderException("Unable to load jar " + jarName, exception);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@RequestMapping(path = "/create", method = RequestMethod.GET)
public String createViewForm(final ViewForm viewForm, final Model model) {
// Setup breadcrumbs
if (!model.containsAttribute("BreadCrumbs")) {
setupBreadCrumbs(model, "Create", null);
}
// Retrieve all clusters
model.addAttribute("clusters", clusterRepository.findAllByOrderByNameAsc());
// Retrieve all message formats
model.addAttribute("defaultMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(true));
model.addAttribute("customMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(false));
// If we have a cluster Id
model.addAttribute("topics", new ArrayList<>());
model.addAttribute("partitions", new ArrayList<>());
// Retrieve all filters
model.addAttribute("filterList", filterRepository.findAllByOrderByNameAsc());
model.addAttribute("filterParameters", new HashMap<Long, Map<String, String>>());
if (viewForm.getClusterId() != null) {
// Lets load the topics now
// Retrieve cluster
final Cluster cluster = clusterRepository.findOne(viewForm.getClusterId());
if (cluster != null) {
try (final KafkaOperations operations = kafkaOperationsFactory.create(cluster, getLoggedInUserId())) {
final TopicList topics = operations.getAvailableTopics();
model.addAttribute("topics", topics.getTopics());
// If we have a selected topic
if (viewForm.getTopic() != null && !"!".equals(viewForm.getTopic())) {
final TopicDetails topicDetails = operations.getTopicDetails(viewForm.getTopic());
model.addAttribute("partitions", topicDetails.getPartitions());
}
}
}
}
return "configuration/view/create";
}
#location 35
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@RequestMapping(path = "/create", method = RequestMethod.GET)
public String createViewForm(final ViewForm viewForm, final Model model) {
// Setup breadcrumbs
if (!model.containsAttribute("BreadCrumbs")) {
setupBreadCrumbs(model, "Create", null);
}
// Retrieve all clusters
model.addAttribute("clusters", clusterRepository.findAllByOrderByNameAsc());
// Retrieve all message formats
model.addAttribute("defaultMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(true));
model.addAttribute("customMessageFormats", messageFormatRepository.findByIsDefaultFormatOrderByNameAsc(false));
// If we have a cluster Id
model.addAttribute("topics", new ArrayList<>());
model.addAttribute("partitions", new ArrayList<>());
// Retrieve all filters
model.addAttribute("filterList", filterRepository.findAllByOrderByNameAsc());
model.addAttribute("filterParameters", new HashMap<Long, Map<String, String>>());
if (viewForm.getClusterId() != null) {
// Lets load the topics now
// Retrieve cluster
clusterRepository.findById(viewForm.getClusterId()).ifPresent((cluster) -> {
try (final KafkaOperations operations = kafkaOperationsFactory.create(cluster, getLoggedInUserId())) {
final TopicList topics = operations.getAvailableTopics();
model.addAttribute("topics", topics.getTopics());
// If we have a selected topic
if (viewForm.getTopic() != null && !"!".equals(viewForm.getTopic())) {
final TopicDetails topicDetails = operations.getTopicDetails(viewForm.getTopic());
model.addAttribute("partitions", topicDetails.getPartitions());
}
}
});
}
return "configuration/view/create";
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException {
PDPageContentStream articleTitle = new PDPageContentStream(this.document, this.currentPage, true, true);
articleTitle.beginText();
articleTitle.setFont(font, fontSize);
articleTitle.moveTextPositionByAmount(getMargin(), yStart);
articleTitle.setNonStrokingColor(Color.black);
articleTitle.drawString(title);
articleTitle.endText();
if (textType != null) {
switch (textType) {
case HIGHLIGHT:
throw new NotImplementedException();
case SQUIGGLY:
throw new NotImplementedException();
case STRIKEOUT:
throw new NotImplementedException();
case UNDERLINE:
float y = (float) (yStart - 1.5);
float titleWidth = font.getStringWidth(title) / 1000 * fontSize;
articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y);
break;
default:
break;
}
}
articleTitle.close();
yStart = (float) (yStart - (fontSize / 1.5));
}
#location 14
#vulnerability type RESOURCE_LEAK
|
#fixed code
public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException {
PDPageContentStream articleTitle = createPdPageContentStream();
articleTitle.beginText();
articleTitle.setFont(font, fontSize);
articleTitle.moveTextPositionByAmount(getMargin(), yStart);
articleTitle.setNonStrokingColor(Color.black);
articleTitle.drawString(title);
articleTitle.endText();
if (textType != null) {
switch (textType) {
case HIGHLIGHT:
throw new NotImplementedException();
case SQUIGGLY:
throw new NotImplementedException();
case STRIKEOUT:
throw new NotImplementedException();
case UNDERLINE:
float y = (float) (yStart - 1.5);
float titleWidth = font.getStringWidth(title) / 1000 * fontSize;
articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y);
break;
default:
break;
}
}
articleTitle.close();
yStart = (float) (yStart - (fontSize / 1.5));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void SampleTest2() throws IOException, COSVisitorException {
//Set margins
float margin = 10;
List<String[]> facts = getFacts();
//A list of bookmarks of all the tables
List<PDOutlineItem> bookmarks = new ArrayList<PDOutlineItem>();
//Initialize Document
PDDocument doc = new PDDocument();
PDPage page = addNewPage(doc);
//Initialize table
float tableWidth = page.findMediaBox().getWidth()-(2*margin);
float top = page.findMediaBox().getHeight() - (2 * margin);
boolean drawContent = true;
boolean drawLines = true;
Table table = new Table(top,tableWidth, margin, doc, page, drawLines, drawContent);
//Create Header row
Row headerRow = table.createRow(15f);
Cell cell = headerRow.createCell(100,"Awesome Facts About Belgium");
cell.setFont(PDType1Font.HELVETICA_BOLD);
cell.setFillColor(Color.BLACK);cell.setTextColor(Color.WHITE);
table.setHeader(headerRow);
//Create 2 column row
Row row = table.createRow(15f);
cell = row.createCell(75,"Source:");
cell.setFont(PDType1Font.HELVETICA);
cell = row.createCell(25,"http://www.factsofbelgium.com/");
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
//Create Fact header row
Row factHeaderrow = table.createRow(15f);
cell = factHeaderrow.createCell((100/3) * 2 ,"Fact");
cell.setFont(PDType1Font.HELVETICA);
cell.setFontSize(6);
cell.setFillColor(Color.LIGHT_GRAY);
cell = factHeaderrow.createCell((100/3),"Tags");
cell.setFillColor(Color.LIGHT_GRAY);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6);
//Add multiple rows with random facts about Belgium
int bookmarkid = 0;
for(String[] fact : facts) {
row = table.createRow(10f);
cell = row.createCell((100/3)*2 ,fact[0]+ " " + fact[0]+ " " + fact[0]);
cell.setFont(PDType1Font.HELVETICA);cell.setFontSize(6);
//Create a bookmark for each record
PDOutlineItem outlineItem = new PDOutlineItem();
outlineItem.setTitle((++bookmarkid ) + ") " + fact[0]);
row.setBookmark( outlineItem);
for(int i = 1; i< fact.length; i++) {
cell = row.createCell((100/9) ,fact[i]);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6);
//Set colors
if(fact[i].contains("beer"))cell.setFillColor(Color.yellow);
if(fact[i].contains("champion"))cell.setTextColor(Color.GREEN);
}
}
table.draw();
//Get all bookmarks of previous table
bookmarks.addAll(table.getBookmarks());
//Create document outline
PDDocumentOutline outline = new PDDocumentOutline();
for(PDOutlineItem bm : bookmarks) {
outline.appendChild(bm);
}
doc.getDocumentCatalog().setDocumentOutline(outline);
//Save the document
File file = new File("target/BoxableSample2.pdf");
Files.createParentDirs(file);
doc.save(file);
doc.close();
}
#location 72
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void SampleTest2() throws IOException, COSVisitorException {
//Set margins
float margin = 10;
List<String[]> facts = getFacts();
//A list of bookmarks of all the tables
List<PDOutlineItem> bookmarks = new ArrayList<PDOutlineItem>();
//Initialize Document
PDDocument doc = new PDDocument();
PDPage page = addNewPage(doc);
//Initialize table
float tableWidth = page.findMediaBox().getWidth()-(2*margin);
float yStartNewPage = page.findMediaBox().getHeight() - (2 * margin);
boolean drawContent = true;
boolean drawLines = true;
float yStart = yStartNewPage;
float bottomMargin = 70;
BaseTable table = new BaseTable(yStart,yStartNewPage,bottomMargin,tableWidth, margin, doc, page, drawLines, drawContent);
//Create Header row
Row headerRow = table.createRow(15f);
Cell cell = headerRow.createCell(100,"Awesome Facts About Belgium");
cell.setFont(PDType1Font.HELVETICA_BOLD);
cell.setFillColor(Color.BLACK);cell.setTextColor(Color.WHITE);
table.setHeader(headerRow);
//Create 2 column row
Row row = table.createRow(15f);
cell = row.createCell(75,"Source:");
cell.setFont(PDType1Font.HELVETICA);
cell = row.createCell(25,"http://www.factsofbelgium.com/");
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
//Create Fact header row
Row factHeaderrow = table.createRow(15f);
cell = factHeaderrow.createCell((100/3) * 2 ,"Fact");
cell.setFont(PDType1Font.HELVETICA);
cell.setFontSize(6);
cell.setFillColor(Color.LIGHT_GRAY);
cell = factHeaderrow.createCell((100/3),"Tags");
cell.setFillColor(Color.LIGHT_GRAY);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6);
//Add multiple rows with random facts about Belgium
int bookmarkid = 0;
for(String[] fact : facts) {
row = table.createRow(10f);
cell = row.createCell((100/3)*2 ,fact[0]+ " " + fact[0]+ " " + fact[0]);
cell.setFont(PDType1Font.HELVETICA);cell.setFontSize(6);
//Create a bookmark for each record
PDOutlineItem outlineItem = new PDOutlineItem();
outlineItem.setTitle((++bookmarkid ) + ") " + fact[0]);
row.setBookmark( outlineItem);
for(int i = 1; i< fact.length; i++) {
cell = row.createCell((100/9) ,fact[i]);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);cell.setFontSize(6);
//Set colors
if(fact[i].contains("beer"))cell.setFillColor(Color.yellow);
if(fact[i].contains("champion"))cell.setTextColor(Color.GREEN);
}
}
table.draw();
//Get all bookmarks of previous table
bookmarks.addAll(table.getBookmarks());
//Create document outline
PDDocumentOutline outline = new PDDocumentOutline();
for(PDOutlineItem bm : bookmarks) {
outline.appendChild(bm);
}
doc.getDocumentCatalog().setDocumentOutline(outline);
//Save the document
File file = new File("target/BoxableSample2.pdf");
Files.createParentDirs(file);
doc.save(file);
doc.close();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException {
PDPageContentStream articleTitle = new PDPageContentStream(this.document, this.currentPage, true, true);
articleTitle.beginText();
articleTitle.setFont(font, fontSize);
articleTitle.moveTextPositionByAmount(getMargin(), yStart);
articleTitle.setNonStrokingColor(Color.black);
articleTitle.drawString(title);
articleTitle.endText();
if (textType != null) {
switch (textType) {
case HIGHLIGHT:
throw new NotImplementedException();
case SQUIGGLY:
throw new NotImplementedException();
case STRIKEOUT:
throw new NotImplementedException();
case UNDERLINE:
float y = (float) (yStart - 1.5);
float titleWidth = font.getStringWidth(title) / 1000 * fontSize;
articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y);
break;
default:
break;
}
}
articleTitle.close();
yStart = (float) (yStart - (fontSize / 1.5));
}
#location 14
#vulnerability type RESOURCE_LEAK
|
#fixed code
public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException {
PDPageContentStream articleTitle = createPdPageContentStream();
articleTitle.beginText();
articleTitle.setFont(font, fontSize);
articleTitle.moveTextPositionByAmount(getMargin(), yStart);
articleTitle.setNonStrokingColor(Color.black);
articleTitle.drawString(title);
articleTitle.endText();
if (textType != null) {
switch (textType) {
case HIGHLIGHT:
throw new NotImplementedException();
case SQUIGGLY:
throw new NotImplementedException();
case STRIKEOUT:
throw new NotImplementedException();
case UNDERLINE:
float y = (float) (yStart - 1.5);
float titleWidth = font.getStringWidth(title) / 1000 * fontSize;
articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y);
break;
default:
break;
}
}
articleTitle.close();
yStart = (float) (yStart - (fontSize / 1.5));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void Sample1 () throws IOException, COSVisitorException {
//Set margins
float margin = 10;
List<String[]> facts = getFacts();
//Initialize Document
PDDocument doc = new PDDocument();
PDPage page = addNewPage(doc);
float top = page.findMediaBox().getHeight() - (2 * margin);
//Initialize table
float tableWidth = page.findMediaBox().getWidth() - (2 * margin);
boolean drawContent = false;
Table table = new Table(top,tableWidth, margin, doc, page, true, drawContent);
//Create Header row
Row headerRow = table.createRow(15f);
Cell cell = headerRow.createCell(100, "Awesome Facts About Belgium");
cell.setFont(PDType1Font.HELVETICA_BOLD);
cell.setFillColor(Color.BLACK);
cell.setTextColor(Color.WHITE);
table.setHeader(headerRow);
//Create 2 column row
Row row = table.createRow(15f);
cell = row.createCell(30,"Source:");
cell.setFont(PDType1Font.HELVETICA);
cell = row.createCell(70, "http://www.factsofbelgium.com/");
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
//Create Fact header row
Row factHeaderrow = table.createRow(15f);
cell = factHeaderrow.createCell((100 / 3) * 2, "Fact");
cell.setFont(PDType1Font.HELVETICA);
cell.setFontSize(6);
cell.setFillColor(Color.LIGHT_GRAY);
cell = factHeaderrow.createCell((100 / 3), "Tags");
cell.setFillColor(Color.LIGHT_GRAY);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
cell.setFontSize(6);
//Add multiple rows with random facts about Belgium
for (String[] fact : facts) {
row = table.createRow(10f);
cell = row.createCell((100 / 3) * 2, fact[0]);
cell.setFont(PDType1Font.HELVETICA);
cell.setFontSize(6);
for (int i = 1; i < fact.length; i++) {
cell = row.createCell((100 / 9), fact[i]);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
cell.setFontSize(6);
//Set colors
if (fact[i].contains("beer")) cell.setFillColor(Color.yellow);
if (fact[i].contains("champion")) cell.setTextColor(Color.GREEN);
}
}
table.draw();
//Close Stream and save pdf
File file = new File("target/BoxableSample1.pdf");
Files.createParentDirs(file);
doc.save(file);
doc.close();
}
#location 67
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void Sample1 () throws IOException, COSVisitorException {
//Set margins
float margin = 10;
List<String[]> facts = getFacts();
//Initialize Document
PDDocument doc = new PDDocument();
PDPage page = addNewPage(doc);
float yStartNewPage = page.findMediaBox().getHeight() - (2 * margin);
//Initialize table
float tableWidth = page.findMediaBox().getWidth() - (2 * margin);
boolean drawContent = false;
float yStart = yStartNewPage;
float bottomMargin = 70;
BaseTable table = new BaseTable(yStart,yStartNewPage, bottomMargin, tableWidth, margin, doc, page, true, drawContent);
//Create Header row
Row headerRow = table.createRow(15f);
Cell cell = headerRow.createCell(100, "Awesome Facts About Belgium");
cell.setFont(PDType1Font.HELVETICA_BOLD);
cell.setFillColor(Color.BLACK);
cell.setTextColor(Color.WHITE);
table.setHeader(headerRow);
//Create 2 column row
Row row = table.createRow(15f);
cell = row.createCell(30,"Source:");
cell.setFont(PDType1Font.HELVETICA);
cell = row.createCell(70, "http://www.factsofbelgium.com/");
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
//Create Fact header row
Row factHeaderrow = table.createRow(15f);
cell = factHeaderrow.createCell((100 / 3) * 2, "Fact");
cell.setFont(PDType1Font.HELVETICA);
cell.setFontSize(6);
cell.setFillColor(Color.LIGHT_GRAY);
cell = factHeaderrow.createCell((100 / 3), "Tags");
cell.setFillColor(Color.LIGHT_GRAY);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
cell.setFontSize(6);
//Add multiple rows with random facts about Belgium
for (String[] fact : facts) {
row = table.createRow(10f);
cell = row.createCell((100 / 3) * 2, fact[0]);
cell.setFont(PDType1Font.HELVETICA);
cell.setFontSize(6);
for (int i = 1; i < fact.length; i++) {
cell = row.createCell((100 / 9), fact[i]);
cell.setFont(PDType1Font.HELVETICA_OBLIQUE);
cell.setFontSize(6);
//Set colors
if (fact[i].contains("beer")) cell.setFillColor(Color.yellow);
if (fact[i].contains("champion")) cell.setTextColor(Color.GREEN);
}
}
table.draw();
//Close Stream and save pdf
File file = new File("target/BoxableSample1.pdf");
Files.createParentDirs(file);
doc.save(file);
doc.close();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException {
PDPageContentStream articleTitle = new PDPageContentStream(this.document, this.currentPage, true, true);
articleTitle.beginText();
articleTitle.setFont(font, fontSize);
articleTitle.moveTextPositionByAmount(getMargin(), yStart);
articleTitle.setNonStrokingColor(Color.black);
articleTitle.drawString(title);
articleTitle.endText();
if (textType != null) {
switch (textType) {
case HIGHLIGHT:
throw new NotImplementedException();
case SQUIGGLY:
throw new NotImplementedException();
case STRIKEOUT:
throw new NotImplementedException();
case UNDERLINE:
float y = (float) (yStart - 1.5);
float titleWidth = font.getStringWidth(title) / 1000 * fontSize;
articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y);
break;
default:
break;
}
}
articleTitle.close();
yStart = (float) (yStart - (fontSize / 1.5));
}
#location 14
#vulnerability type RESOURCE_LEAK
|
#fixed code
public void drawTitle(String title, PDFont font, int fontSize, TextType textType) throws IOException {
PDPageContentStream articleTitle = createPdPageContentStream();
articleTitle.beginText();
articleTitle.setFont(font, fontSize);
articleTitle.moveTextPositionByAmount(getMargin(), yStart);
articleTitle.setNonStrokingColor(Color.black);
articleTitle.drawString(title);
articleTitle.endText();
if (textType != null) {
switch (textType) {
case HIGHLIGHT:
throw new NotImplementedException();
case SQUIGGLY:
throw new NotImplementedException();
case STRIKEOUT:
throw new NotImplementedException();
case UNDERLINE:
float y = (float) (yStart - 1.5);
float titleWidth = font.getStringWidth(title) / 1000 * fontSize;
articleTitle.drawLine(getMargin(), y, getMargin() + titleWidth, y);
break;
default:
break;
}
}
articleTitle.close();
yStart = (float) (yStart - (fontSize / 1.5));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
void store(CipherInputStream is, byte[] checksum) throws IOException {
store.outputStream(checksum)
.<IORunnable>map(u -> () -> {
logger.debug("-- store() - copying chunk into store: 0x{}", Hex.toHexString(checksum));
copy(is, u);
})
.orElse(() -> {
logger.debug("-- store() - store now contains chunk: 0x{}", Hex.toHexString(checksum));
})
.run();
}
#location 10
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
#fixed code
Optional<Chunk> chunk(InputStream inputStream, ChunkServer.ChunkInfo chunkInfo, int index) throws IOException {
logger.trace("<< chunk() - chunkInfo: {} index: {}", chunkInfo, index);
BoundedInputStream bis = new BoundedInputStream(inputStream, chunkInfo.getChunkLength());
bis.setPropagateClose(false);
Optional<Chunk> chunk = chunk(bis, chunkInfo, index);
consume(bis);
logger.trace(">> chunk() - chunk: {}", chunk);
return chunk;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
Optional<List<Chunk>>
assemble(Map<ChunkReference, Chunk> map, List<ChunkReference> references) {
if (map.keySet().containsAll(references)) {
logger.warn("-- assemble() - missing chunks");
return Optional.empty();
}
List<Chunk> chunkList = references.stream()
.map(map::get)
.collect(Collectors.toList());
return Optional.of(chunkList);
}
#location 4
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
#fixed code
Optional<Map<ChunkReference, Chunk>>
fetch(HttpClient httpClient, ChunkKeyEncryptionKeys keks, Map<Integer, StorageHostChunkList> containers,
Asset asset) {
Map<ChunkReference, Chunk> map = new HashMap<>();
for (Map.Entry<Integer, StorageHostChunkList> entry : containers.entrySet()) {
Optional<Map<ChunkReference, Chunk>> chunks = keks.apply(entry.getValue())
.flatMap(kek -> fetch(httpClient, kek, entry.getValue(), entry.getKey()));
if (!chunks.isPresent()) {
return Optional.empty();
}
map.putAll(chunks.get());
}
return Optional.of(map);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
Optional<Chunk> chunk(BoundedInputStream bis, ChunkServer.ChunkInfo chunkInfo, int index) throws IOException {
byte[] checksum = chunkInfo.getChunkChecksum().toByteArray();
return store.chunk(checksum)
.<IOSupplier<Optional<Chunk>>>map(u -> () -> {
logger.debug("-- chunk() - chunk present in store: 0x:{}", Hex.toHexString(checksum));
return Optional.of(u);
})
.orElseGet(() -> () -> {
logger.debug("-- chunk() - chunk not present in store: 0x:{}", Hex.toHexString(checksum));
byte[] chunkEncryptionKey = chunkInfo.getChunkEncryptionKey().toByteArray();
return decrypt(bis, chunkEncryptionKey, checksum, index);
})
.get();
}
#location 13
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
#fixed code
Optional<Chunk> chunk(InputStream inputStream, ChunkServer.ChunkInfo chunkInfo, int index) throws IOException {
logger.trace("<< chunk() - chunkInfo: {} index: {}", chunkInfo, index);
BoundedInputStream bis = new BoundedInputStream(inputStream, chunkInfo.getChunkLength());
bis.setPropagateClose(false);
Optional<Chunk> chunk = chunk(bis, chunkInfo, index);
consume(bis);
logger.trace(">> chunk() - chunk: {}", chunk);
return chunk;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
Optional<Map<ChunkReference, Chunk>>
fetch(HttpClient httpClient, KeyEncryptionKeys keks, Map<Integer, StorageHostChunkList> containers,
Asset asset) {
Map<ChunkReference, Chunk> map = new HashMap<>();
for (Map.Entry<Integer, StorageHostChunkList> entry : containers.entrySet()) {
Optional<Map<ChunkReference, Chunk>> chunks = keks.apply(entry.getValue())
.flatMap(kek -> fetch(httpClient, kek, entry.getValue(), entry.getKey()));
if (!chunks.isPresent()) {
return Optional.empty();
}
map.putAll(chunks.get());
}
return Optional.of(map);
}
#location 6
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
#fixed code
Optional<Map<ChunkReference, Chunk>>
fetch(HttpClient httpClient, ChunkKeyEncryptionKeys keks, Map<Integer, StorageHostChunkList> containers,
Asset asset) {
Map<ChunkReference, Chunk> map = new HashMap<>();
for (Map.Entry<Integer, StorageHostChunkList> entry : containers.entrySet()) {
Optional<Map<ChunkReference, Chunk>> chunks = keks.apply(entry.getValue())
.flatMap(kek -> fetch(httpClient, kek, entry.getValue(), entry.getKey()));
if (!chunks.isPresent()) {
return Optional.empty();
}
map.putAll(chunks.get());
}
return Optional.of(map);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
ChunkServer.StorageHostChunkList adjustExpiryTimestamp(ChunkServer.StorageHostChunkList container, long offset) {
if (!container.getHostInfo().hasExpiry()) {
// Shouldn't happen, can probably remove this check.
logger.warn("-- adjustExpiryTimestamp() - no expiry timestamp: {}", container.getHostInfo());
return setExpiryTimestamp(container, System.currentTimeMillis() + FALLBACK_DURATION_MS);
}
long timestamp = container.getHostInfo().getExpiry() + offset;
return setExpiryTimestamp(container, timestamp);
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
FileGroups adjustExpiryTimestamp(FileGroups fileGroups, Optional<Long> timestampOffset) {
// We adjust the FileGroups timestamps based on machine time/ server time deltas. This allows us to function
// with inaccurate machine clocks.
List<FileChecksumStorageHostChunkLists> fileGroupsList = fileGroups.getFileGroupsList()
.stream()
.map(u -> adjustExpiryTimestamp(u, timestampOffset))
.collect(toList());
return fileGroups
.toBuilder()
.clearFileGroups()
.addAllFileGroups(fileGroupsList)
.build();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
void download(HttpClient httpClient, List<Asset> assets, Path relativePath) throws UncheckedIOException {
Path outputFolder = folder.resolve(relativePath);
keyBagManager.update(httpClient, assets);
XFileKeyFactory fileKeys = new XFileKeyFactory(keyBagManager::keyBag);
FileAssembler fileAssembler = new FileAssembler(fileKeys, outputFolder);
AuthorizedAssets authorizedAssets = authorizeAssets.authorize(httpClient, assets);
assetDownloader.accept(httpClient, authorizedAssets, fileAssembler);
}
#location 9
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
void download(HttpClient httpClient, List<Asset> assets, Path relativePath) {
if (assets.isEmpty()) {
return;
}
Path outputFolder = folder.resolve(relativePath);
keyBagManager.update(httpClient, assets);
XFileKeyFactory fileKeys = new XFileKeyFactory(keyBagManager::keyBag);
FileAssembler fileAssembler = new FileAssembler(fileKeys, outputFolder);
authorizeAssets.apply(httpClient, assets)
.ifPresent(u -> assetDownloader.accept(httpClient, u, fileAssembler));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
Optional<Chunk>
decrypt(BoundedInputStream bis, byte[] chunkEncryptionKey, byte[] checksum, int index) throws IOException {
unwrapKey(chunkEncryptionKey, index)
.map(u -> {
logger.debug("-- decrypt() - key unwrapped: 0x{} chunk: 0x{}",
Hex.toHexString(u), Hex.toHexString(checksum));
return cipherInputStreams.apply(u, bis);
})
.<IORunnable>map(u -> () -> store(u, checksum))
.orElse(() -> {
logger.warn("-- decrypt() - key unwrap failed chunk: 0x{}", Hex.toHexString(checksum));
})
.run();
return store.chunk(checksum);
}
#location 13
#vulnerability type INTERFACE_NOT_THREAD_SAFE
|
#fixed code
Optional<Chunk> chunk(InputStream inputStream, ChunkServer.ChunkInfo chunkInfo, int index) throws IOException {
logger.trace("<< chunk() - chunkInfo: {} index: {}", chunkInfo, index);
BoundedInputStream bis = new BoundedInputStream(inputStream, chunkInfo.getChunkLength());
bis.setPropagateClose(false);
Optional<Chunk> chunk = chunk(bis, chunkInfo, index);
consume(bis);
logger.trace(">> chunk() - chunk: {}", chunk);
return chunk;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static int runCommand(final CCTask task, final File workingDir, final String[] cmdline,
final boolean newEnvironment, final Environment env) throws BuildException {
try {
task.log(Commandline.toString(cmdline), task.getCommandLogLevel());
final Execute exe = new Execute(new LogStreamHandler(task, Project.MSG_INFO, Project.MSG_ERR));
if (System.getProperty("os.name").equals("OS/390")) {
exe.setVMLauncher(false);
}
exe.setAntRun(task.getProject());
exe.setCommandline(cmdline);
exe.setWorkingDirectory(workingDir);
if (env != null) {
final String[] environment = env.getVariables();
if (environment != null) {
for (final String element : environment) {
task.log("Setting environment variable: " + element, Project.MSG_VERBOSE);
}
}
exe.setEnvironment(environment);
}
exe.setNewenvironment(newEnvironment);
return exe.execute();
} catch (final java.io.IOException exc) {
throw new BuildException("Could not launch " + cmdline[0] + ": " + exc, task.getLocation());
}
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public static int runCommand(final CCTask task, final File workingDir, final String[] cmdline,
final boolean newEnvironment, final Environment env) throws BuildException {
try {
task.log(Commandline.toString(cmdline), task.getCommandLogLevel());
/* final Execute exe = new Execute(new LogStreamHandler(task, Project.MSG_INFO, Project.MSG_ERR));
if (System.getProperty("os.name").equals("OS/390")) {
exe.setVMLauncher(false);
}
exe.setAntRun(task.getProject());
exe.setCommandline(cmdline);
exe.setWorkingDirectory(workingDir);
if (env != null) {
final String[] environment = env.getVariables();
if (environment != null) {
for (final String element : environment) {
task.log("Setting environment variable: " + element, Project.MSG_VERBOSE);
}
}
exe.setEnvironment(environment);
}
exe.setNewenvironment(newEnvironment);
return exe.execute();
*/
return CommandExecution.runCommand(cmdline,workingDir,task);
} catch (final java.io.IOException exc) {
throw new BuildException("Could not launch " + cmdline[0] + ": " + exc, task.getLocation());
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public final List/* <AttachedNarArtifact> */getAttachedNarDependencies( List/* <NarArtifacts> */narArtifacts,
AOL archOsLinker, String type )
throws MojoExecutionException, MojoFailureException
{
boolean noarch = false;
AOL aol = archOsLinker;
if ( aol == null )
{
noarch = true;
aol = defaultAOL;
}
List artifactList = new ArrayList();
for ( Iterator i = narArtifacts.iterator(); i.hasNext(); )
{
Artifact dependency = (Artifact) i.next();
NarInfo narInfo = getNarInfo( dependency );
if ( noarch )
{
artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) );
}
// use preferred binding, unless non existing.
String binding = narInfo.getBinding( aol, type != null ? type : Library.STATIC );
// FIXME kludge, but does not work anymore since AOL is now a class
if ( aol.equals( NarConstants.NAR_NO_ARCH ) )
{
// FIXME no handling of local
artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) );
}
else
{
artifactList.addAll( getAttachedNarDependencies( dependency, aol, binding ) );
}
}
return artifactList;
}
#location 24
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public final List/* <AttachedNarArtifact> */getAttachedNarDependencies( List/* <NarArtifacts> */narArtifacts,
AOL archOsLinker, String type )
throws MojoExecutionException, MojoFailureException
{
boolean noarch = false;
AOL aol = archOsLinker;
if ( aol == null )
{
noarch = true;
aol = defaultAOL;
}
List artifactList = new ArrayList();
for ( Iterator i = narArtifacts.iterator(); i.hasNext(); )
{
Artifact dependency = (Artifact) i.next();
if ( noarch )
{
artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) );
}
// FIXME kludge, but does not work anymore since AOL is now a class
if ( aol.equals( NarConstants.NAR_NO_ARCH ) )
{
// FIXME no handling of local
artifactList.addAll( getAttachedNarDependencies( dependency, null, NarConstants.NAR_NO_ARCH ) );
}
else
{
if ( type != null )
{
artifactList.addAll( getAttachedNarDependencies( dependency, aol, type ) );
}
else
{
for ( int j = 0; j < narTypes.length; j++ )
{
artifactList.addAll( getAttachedNarDependencies( dependency, aol, narTypes[j] ));
}
}
}
}
return artifactList;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private boolean isClang() {
final String command = getCommand();
if (command == null) {
return false;
}
if (command.startsWith("clang")) {
return true;
}
if (!GPP_COMMAND.equals(command)) {
return false;
}
final String[] cmd = {
command, "--version"
};
final String[] cmdout = CaptureStreamHandler.execute(cmd).getStdout();
return cmdout.length > 0 && cmdout[0].contains("(clang-");
}
#location 16
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private boolean isClang() {
final String command = getCommand();
if (command == null) {
return false;
}
if (command.startsWith("clang")) {
return true;
}
if (!GPP_COMMAND.equals(command)) {
return false;
}
final String[] cmd = {
command, "--version"
};
final String[] cmdout = CaptureStreamHandler.execute(cmd).getStdout();
return cmdout != null && cmdout.length > 0 && cmdout[0].contains("(clang-");
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 61
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void nextTuple() {
if (!active)
return;
// synchronize access to buffer needed in case of asynchronous
// queries to the backend
synchronized (buffer) {
if (!buffer.isEmpty()) {
// track how long the buffer had been empty for
if (timestampEmptyBuffer != -1) {
eventCounter.scope("empty.buffer").incrBy(
System.currentTimeMillis() - timestampEmptyBuffer);
timestampEmptyBuffer = -1;
}
List<Object> fields = buffer.remove();
String url = fields.get(0).toString();
this._collector.emit(fields, url);
beingProcessed.put(url, null);
eventCounter.scope("emitted").incrBy(1);
return;
} else if (timestampEmptyBuffer == -1) {
timestampEmptyBuffer = System.currentTimeMillis();
}
}
if (isInQuery.get() || throttleQueries() > 0) {
// sleep for a bit but not too much in order to give ack/fail a
// chance
Utils.sleep(10);
return;
}
// re-populate the buffer
populateBuffer();
timeLastQuery = System.currentTimeMillis();
}
#location 37
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void nextTuple() {
if (!active)
return;
// synchronize access to buffer needed in case of asynchronous
// queries to the backend
synchronized (buffer) {
// force the refresh of the buffer even if the buffer is not empty
if (!isInQuery.get() && triggerQueries()) {
populateBuffer();
}
if (!buffer.isEmpty()) {
// track how long the buffer had been empty for
if (timestampEmptyBuffer != -1) {
eventCounter.scope("empty.buffer").incrBy(
System.currentTimeMillis() - timestampEmptyBuffer);
timestampEmptyBuffer = -1;
}
List<Object> fields = buffer.remove();
String url = fields.get(0).toString();
this._collector.emit(fields, url);
beingProcessed.put(url, null);
in_buffer.remove(url);
eventCounter.scope("emitted").incrBy(1);
return;
} else if (timestampEmptyBuffer == -1) {
timestampEmptyBuffer = System.currentTimeMillis();
}
}
if (isInQuery.get() || throttleQueries() > 0) {
// sleep for a bit but not too much in order to give ack/fail a
// chance
Utils.sleep(10);
return;
}
// re-populate the buffer
populateBuffer();
timeLastQuerySent = System.currentTimeMillis();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void onResponse(SearchResponse response) {
long timeTaken = System.currentTimeMillis() - timeStartESQuery;
Aggregations aggregs = response.getAggregations();
SingleBucketAggregation sample = aggregs.get("sample");
if (sample != null) {
aggregs = sample.getAggregations();
}
Terms agg = aggregs.get("partition");
int numhits = 0;
int numBuckets = 0;
int alreadyprocessed = 0;
synchronized (buffer) {
// For each entry
for (Terms.Bucket entry : agg.getBuckets()) {
String key = (String) entry.getKey(); // bucket key
long docCount = entry.getDocCount(); // Doc count
int hitsForThisBucket = 0;
// filter results so that we don't include URLs we are already
// being processed
TopHits topHits = entry.getAggregations().get("docs");
for (SearchHit hit : topHits.getHits().getHits()) {
hitsForThisBucket++;
Map<String, Object> keyValues = hit.sourceAsMap();
String url = (String) keyValues.get("url");
LOG.debug("{} -> id [{}], _source [{}]", logIdprefix,
hit.getId(), hit.getSourceAsString());
// is already being processed - skip it!
if (beingProcessed.containsKey(url)) {
alreadyprocessed++;
continue;
}
Metadata metadata = fromKeyValues(keyValues);
buffer.add(new Values(url, metadata));
}
if (hitsForThisBucket > 0)
numBuckets++;
numhits += hitsForThisBucket;
LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix,
key, hitsForThisBucket, docCount, alreadyprocessed);
}
// Shuffle the URLs so that we don't get blocks of URLs from the
// same
// host or domain
Collections.shuffle((List) buffer);
}
LOG.info(
"{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed);
esQueryTimes.addMeasurement(timeTaken);
eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
eventCounter.scope("ES_queries").incrBy(1);
eventCounter.scope("ES_docs").incrBy(numhits);
// change the date only if we don't get any results at all
if (numBuckets == 0) {
lastDate = null;
}
// remove lock
isInESQuery.set(false);
}
#location 73
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void onResponse(SearchResponse response) {
long timeTaken = System.currentTimeMillis() - timeStartESQuery;
Aggregations aggregs = response.getAggregations();
SingleBucketAggregation sample = aggregs.get("sample");
if (sample != null) {
aggregs = sample.getAggregations();
}
Terms agg = aggregs.get("partition");
int numhits = 0;
int numBuckets = 0;
int alreadyprocessed = 0;
Date mostRecentDateFound = null;
SimpleDateFormat formatter = new SimpleDateFormat(
"yyyy-MM-dd'T'HH:mm:ss.SSSX");
synchronized (buffer) {
// For each entry
for (Terms.Bucket entry : agg.getBuckets()) {
String key = (String) entry.getKey(); // bucket key
long docCount = entry.getDocCount(); // Doc count
int hitsForThisBucket = 0;
// filter results so that we don't include URLs we are already
// being processed
TopHits topHits = entry.getAggregations().get("docs");
for (SearchHit hit : topHits.getHits().getHits()) {
hitsForThisBucket++;
Map<String, Object> keyValues = hit.sourceAsMap();
String url = (String) keyValues.get("url");
// 2017-04-06T10:14:28.662Z
String strDate = (String) keyValues.get("nextFetchDate");
try {
Date nextFetchDate = formatter.parse(strDate);
if (mostRecentDateFound == null
|| nextFetchDate.after(mostRecentDateFound)) {
mostRecentDateFound = nextFetchDate;
}
} catch (ParseException e) {
throw new RuntimeException("can't parse date :"
+ strDate);
}
LOG.debug("{} -> id [{}], _source [{}]", logIdprefix,
hit.getId(), hit.getSourceAsString());
// is already being processed - skip it!
if (beingProcessed.containsKey(url)) {
alreadyprocessed++;
continue;
}
Metadata metadata = fromKeyValues(keyValues);
buffer.add(new Values(url, metadata));
}
if (hitsForThisBucket > 0)
numBuckets++;
numhits += hitsForThisBucket;
LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix,
key, hitsForThisBucket, docCount, alreadyprocessed);
}
// Shuffle the URLs so that we don't get blocks of URLs from the
// same
// host or domain
Collections.shuffle((List) buffer);
}
LOG.info(
"{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed);
esQueryTimes.addMeasurement(timeTaken);
eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
eventCounter.scope("ES_queries").incrBy(1);
eventCounter.scope("ES_docs").incrBy(numhits);
// optimise the nextFetchDate by getting the most recent value
// returned in the query and add to it, unless the previous value is
// within n mins in which case we'll keep it
if (mostRecentDateFound != null && recentDateIncrease >= 0) {
Calendar cal = Calendar.getInstance();
cal.setTime(mostRecentDateFound);
cal.add(Calendar.MINUTE, recentDateIncrease);
Date potentialNewDate = cal.getTime();
Date oldDate = null;
// check boundaries
if (this.recentDateMinGap > 0) {
Calendar low = Calendar.getInstance();
low.setTime(lastDate);
low.add(Calendar.MINUTE, -recentDateMinGap);
Calendar high = Calendar.getInstance();
high.setTime(lastDate);
high.add(Calendar.MINUTE, recentDateMinGap);
if (high.before(potentialNewDate)
|| low.after(potentialNewDate)) {
oldDate = lastDate;
lastDate = potentialNewDate;
}
} else {
oldDate = lastDate;
lastDate = potentialNewDate;
}
if (oldDate != null) {
LOG.info(
"{} lastDate changed from {} to {} based on mostRecentDateFound {}",
logIdprefix, oldDate, lastDate, mostRecentDateFound);
}
}
// change the date only if we don't get any results at all
if (numBuckets == 0) {
lastDate = null;
}
// remove lock
isInESQuery.set(false);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void execute(Tuple input) {
// main thread in charge of acking and failing
// see
// https://github.com/nathanmarz/storm/wiki/Troubleshooting#nullpointerexception-from-deep-inside-storm
int acked = 0;
int failed = 0;
int emitted = 0;
// emit with or without anchors
// before acking
synchronized (emitQueue) {
for (Object[] toemit : this.emitQueue) {
String streamID = (String) toemit[0];
Tuple anchor = (Tuple) toemit[1];
Values vals = (Values) toemit[2];
if (anchor == null)
_collector.emit(streamID, vals);
else
_collector.emit(streamID, Arrays.asList(anchor), vals);
}
emitted = emitQueue.size();
emitQueue.clear();
}
// have a tick tuple to make sure we don't get starved
synchronized (ackQueue) {
for (Tuple toack : this.ackQueue) {
_collector.ack(toack);
}
acked = ackQueue.size();
ackQueue.clear();
}
synchronized (failQueue) {
for (Tuple toack : this.failQueue) {
_collector.fail(toack);
}
failed = failQueue.size();
failQueue.clear();
}
if (acked + failed + emitted > 0)
LOG.info("[Fetcher #" + taskIndex + "] Acked : " + acked
+ "\tFailed : " + failed + "\tEmitted : " + emitted);
if (isTickTuple(input)) {
_collector.ack(input);
return;
}
CountMetric metric = metricGauge.scope("activethreads");
metric.getValueAndReset();
metric.incrBy(this.activeThreads.get());
metric = metricGauge.scope("in queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.inQueues.get());
metric = metricGauge.scope("queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.queues.size());
LOG.info("[Fetcher #" + taskIndex + "] Threads : "
+ this.activeThreads.get() + "\tqueues : "
+ this.fetchQueues.queues.size() + "\tin_queues : "
+ this.fetchQueues.inQueues.get());
String url = input.getStringByField("url");
// check whether this tuple has a url field
if (url == null) {
LOG.info("[Fetcher #" + taskIndex
+ "] Missing url field for tuple " + input);
// ignore silently
_collector.ack(input);
return;
}
fetchQueues.addFetchItem(input);
}
#location 50
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void execute(Tuple input) {
// triggered by the arrival of a tuple
// be it a tick or normal one
flushQueues();
if (isTickTuple(input)) {
_collector.ack(input);
return;
}
CountMetric metric = metricGauge.scope("activethreads");
metric.getValueAndReset();
metric.incrBy(this.activeThreads.get());
metric = metricGauge.scope("in queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.inQueues.get());
metric = metricGauge.scope("queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.queues.size());
LOG.info("[Fetcher #" + taskIndex + "] Threads : "
+ this.activeThreads.get() + "\tqueues : "
+ this.fetchQueues.queues.size() + "\tin_queues : "
+ this.fetchQueues.inQueues.get());
String url = input.getStringByField("url");
// check whether this tuple has a url field
if (url == null) {
LOG.info("[Fetcher #" + taskIndex
+ "] Missing url field for tuple " + input);
// ignore silently
_collector.ack(input);
return;
}
fetchQueues.addFetchItem(input);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void execute(Tuple tuple) {
HashMap<String, String[]> metadata = (HashMap<String, String[]>) tuple
.getValueByField("metadata");
// TODO check that we have the right number of fields ?
String isBoolean = KeyValues.getValue(isSitemapKey, metadata);
if (StringUtils.isBlank(isBoolean)
|| !isBoolean.equalsIgnoreCase("true")) {
// just pass it on
this.collector.emit(tuple.getValues());
this.collector.ack(tuple);
return;
}
// it does have the right key/value
byte[] content = tuple.getBinaryByField("content");
String url = tuple.getStringByField("url");
String ct = KeyValues.getValue(HttpHeaders.CONTENT_TYPE, metadata);
List<Values> outlinks = parseSiteMap(url, content, ct);
if (outlinks == null) {
// likely to happen ad lib - just ack
// error has been logged
this.collector.ack(tuple);
return;
}
// send to status stream
for (Values ol : outlinks) {
collector.emit(Constants.StatusStreamName, new Values(ol));
}
// marking the main URL as successfully fetched
collector.emit(Constants.StatusStreamName, new Values(url, metadata,
Status.FETCHED));
this.collector.ack(tuple);
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public void execute(Tuple tuple) {
HashMap<String, String[]> metadata = (HashMap<String, String[]>) tuple
.getValueByField("metadata");
// TODO check that we have the right number of fields ?
String isSitemap = KeyValues.getValue(isSitemapKey, metadata);
if (!Boolean.valueOf(isSitemap)) {
// just pass it on
this.collector.emit(tuple.getValues());
this.collector.ack(tuple);
return;
}
// it does have the right key/value
byte[] content = tuple.getBinaryByField("content");
String url = tuple.getStringByField("url");
String ct = KeyValues.getValue(HttpHeaders.CONTENT_TYPE, metadata);
List<Values> outlinks = parseSiteMap(url, content, ct);
// send to status stream
for (Values ol : outlinks) {
collector.emit(Constants.StatusStreamName, ol);
}
// marking the main URL as successfully fetched
// regardless of whether we got a parse exception or not
collector.emit(Constants.StatusStreamName, new Values(url, metadata,
Status.FETCHED));
this.collector.ack(tuple);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (lastDate == null) {
lastDate = new Date();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print(
lastDate.getTime());
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
if (filterQuery != null) {
queryBuilder = boolQuery().must(queryBuilder).filter(
QueryBuilders.queryStringQuery(filterQuery));
}
SearchRequest request = new SearchRequest(indexName).types(docType)
.searchType(SearchType.QUERY_THEN_FETCH);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(queryBuilder);
sourceBuilder.from(lastStartOffset);
sourceBuilder.size(maxBucketNum);
sourceBuilder.explain(false);
sourceBuilder.trackTotalHits(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
request.preference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
sourceBuilder.sort(new FieldSortBuilder(totalSortField)
.order(SortOrder.ASC));
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
sourceBuilder.collapse(collapse);
request.source(sourceBuilder);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, request.toString());
isInQuery.set(true);
client.searchAsync(request, this);
}
#location 15
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (queryDate == null) {
queryDate = new Date();
lastTimeResetToNOW = Instant.now();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print(
queryDate.getTime());
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
if (filterQuery != null) {
queryBuilder = boolQuery().must(queryBuilder).filter(
QueryBuilders.queryStringQuery(filterQuery));
}
SearchRequest request = new SearchRequest(indexName).types(docType)
.searchType(SearchType.QUERY_THEN_FETCH);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(queryBuilder);
sourceBuilder.from(lastStartOffset);
sourceBuilder.size(maxBucketNum);
sourceBuilder.explain(false);
sourceBuilder.trackTotalHits(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
request.preference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
sourceBuilder.sort(new FieldSortBuilder(totalSortField)
.order(SortOrder.ASC));
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
sourceBuilder.collapse(collapse);
request.source(sourceBuilder);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, request.toString());
isInQuery.set(true);
client.searchAsync(request, this);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void execute(Tuple input) {
// main thread in charge of acking and failing
// see
// https://github.com/nathanmarz/storm/wiki/Troubleshooting#nullpointerexception-from-deep-inside-storm
int acked = 0;
int failed = 0;
int emitted = 0;
// emit with or without anchors
// before acking
synchronized (emitQueue) {
for (Object[] toemit : this.emitQueue) {
String streamID = (String) toemit[0];
Tuple anchor = (Tuple) toemit[1];
Values vals = (Values) toemit[2];
if (anchor == null)
_collector.emit(streamID, vals);
else
_collector.emit(streamID, Arrays.asList(anchor), vals);
}
emitted = emitQueue.size();
emitQueue.clear();
}
// have a tick tuple to make sure we don't get starved
synchronized (ackQueue) {
for (Tuple toack : this.ackQueue) {
_collector.ack(toack);
}
acked = ackQueue.size();
ackQueue.clear();
}
synchronized (failQueue) {
for (Tuple toack : this.failQueue) {
_collector.fail(toack);
}
failed = failQueue.size();
failQueue.clear();
}
if (acked + failed + emitted > 0)
LOG.info("[Fetcher #" + taskIndex + "] Acked : " + acked
+ "\tFailed : " + failed + "\tEmitted : " + emitted);
if (isTickTuple(input)) {
_collector.ack(input);
return;
}
CountMetric metric = metricGauge.scope("activethreads");
metric.getValueAndReset();
metric.incrBy(this.activeThreads.get());
metric = metricGauge.scope("in queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.inQueues.get());
metric = metricGauge.scope("queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.queues.size());
LOG.info("[Fetcher #" + taskIndex + "] Threads : "
+ this.activeThreads.get() + "\tqueues : "
+ this.fetchQueues.queues.size() + "\tin_queues : "
+ this.fetchQueues.inQueues.get());
String url = input.getStringByField("url");
// check whether this tuple has a url field
if (url == null) {
LOG.info("[Fetcher #" + taskIndex
+ "] Missing url field for tuple " + input);
// ignore silently
_collector.ack(input);
return;
}
fetchQueues.addFetchItem(input);
}
#location 62
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void execute(Tuple input) {
// triggered by the arrival of a tuple
// be it a tick or normal one
flushQueues();
if (isTickTuple(input)) {
_collector.ack(input);
return;
}
CountMetric metric = metricGauge.scope("activethreads");
metric.getValueAndReset();
metric.incrBy(this.activeThreads.get());
metric = metricGauge.scope("in queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.inQueues.get());
metric = metricGauge.scope("queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.queues.size());
LOG.info("[Fetcher #" + taskIndex + "] Threads : "
+ this.activeThreads.get() + "\tqueues : "
+ this.fetchQueues.queues.size() + "\tin_queues : "
+ this.fetchQueues.inQueues.get());
String url = input.getStringByField("url");
// check whether this tuple has a url field
if (url == null) {
LOG.info("[Fetcher #" + taskIndex
+ "] Missing url field for tuple " + input);
// ignore silently
_collector.ack(input);
return;
}
fetchQueues.addFetchItem(input);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 7
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
// one ES client per JVM
synchronized (ElasticSearchSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("Assigned shard ID {}", shardID);
}
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 13
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
super.open(stormConf, context, collector);
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
super.open(stormConf, context, collector);
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
// one ES client per JVM
synchronized (AbstractSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
// TODO use the admin API when it gets available
// TODO or the low level one with
// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html
// TODO identify local shards and use those if possible
// ClusterSearchShardsRequest request = new
// ClusterSearchShardsRequest(
// indexName);
// ClusterSearchShardsResponse shardresponse = client.admin()
// .cluster().searchShards(request).actionGet();
// ClusterSearchShardsGroup[] shardgroups =
// shardresponse.getGroups();
// if (totalTasks != shardgroups.length) {
// throw new RuntimeException(
// "Number of ES spout instances should be the same as number of
// shards ("
// + shardgroups.length + ") but is " + totalTasks);
// }
// shardID = shardgroups[context.getThisTaskIndex()].getShardId()
// .getId();
// TEMPORARY simply use the task index as shard index
shardID = context.getThisTaskIndex();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
partitionField = ConfUtils.getString(stormConf,
ESStatusBucketFieldParamName, "metadata.hostname");
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
resetFetchDateAfterNSecs = ConfUtils.getInt(stormConf,
ESStatusResetFetchDateParamName, resetFetchDateAfterNSecs);
filterQuery = ConfUtils.getString(stormConf, ESStatusFilterParamName);
}
#location 76
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
super.open(stormConf, context, collector);
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
// one ES client per JVM
synchronized (AbstractSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
// TODO use the admin API when it gets available
// TODO or the low level one with
// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html
// TODO identify local shards and use those if possible
// ClusterSearchShardsRequest request = new
// ClusterSearchShardsRequest(
// indexName);
// ClusterSearchShardsResponse shardresponse = client.admin()
// .cluster().searchShards(request).actionGet();
// ClusterSearchShardsGroup[] shardgroups =
// shardresponse.getGroups();
// if (totalTasks != shardgroups.length) {
// throw new RuntimeException(
// "Number of ES spout instances should be the same as number of
// shards ("
// + shardgroups.length + ") but is " + totalTasks);
// }
// shardID = shardgroups[context.getThisTaskIndex()].getShardId()
// .getId();
// TEMPORARY simply use the task index as shard index
shardID = context.getThisTaskIndex();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
partitionField = ConfUtils.getString(stormConf,
ESStatusBucketFieldParamName, "metadata.hostname");
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
filterQuery = ConfUtils.getString(stormConf, ESStatusFilterParamName);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
// one ES client per JVM
synchronized (ElasticSearchSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("Assigned shard ID {}", shardID);
}
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 11
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
super.open(stormConf, context, collector);
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void store(String url, Status status, Metadata metadata,
Date nextFetch, Tuple tuple) throws Exception {
String sha256hex = org.apache.commons.codec.digest.DigestUtils
.sha256Hex(url);
// need to synchronize: otherwise it might get added to the cache
// without having been sent to ES
synchronized (waitAck) {
// check that the same URL is not being sent to ES
List<Tuple> alreadySent = waitAck.getIfPresent(sha256hex);
if (alreadySent != null && status.equals(Status.DISCOVERED)) {
// if this object is discovered - adding another version of it
// won't make any difference
LOG.debug(
"Already being sent to ES {} with status {} and ID {}",
url, status, sha256hex);
// ack straight away!
super.ack(tuple, url);
return;
}
}
XContentBuilder builder = jsonBuilder().startObject();
builder.field("url", url);
builder.field("status", status);
// check that we don't overwrite an existing entry
// When create is used, the index operation will fail if a document
// by that id already exists in the index.
boolean create = status.equals(Status.DISCOVERED);
builder.startObject("metadata");
Iterator<String> mdKeys = metadata.keySet().iterator();
while (mdKeys.hasNext()) {
String mdKey = mdKeys.next();
String[] values = metadata.getValues(mdKey);
// periods are not allowed in ES2 - replace with %2E
mdKey = mdKey.replaceAll("\\.", "%2E");
builder.array(mdKey, values);
}
String partitionKey = partitioner.getPartition(url, metadata);
if (partitionKey == null) {
partitionKey = "_DEFAULT_";
}
// store routing key in metadata?
if (StringUtils.isNotBlank(fieldNameForRoutingKey)
&& routingFieldNameInMetadata) {
builder.field(fieldNameForRoutingKey, partitionKey);
}
builder.endObject();
// store routing key outside metadata?
if (StringUtils.isNotBlank(fieldNameForRoutingKey)
&& !routingFieldNameInMetadata) {
builder.field(fieldNameForRoutingKey, partitionKey);
}
builder.field("nextFetchDate", nextFetch);
builder.endObject();
IndexRequest request = new IndexRequest(getIndexName(metadata))
.type(docType);
request.source(builder).id(sha256hex).create(create);
if (doRouting) {
request.routing(partitionKey);
}
synchronized (waitAck) {
List<Tuple> tt = waitAck.getIfPresent(sha256hex);
if (tt == null) {
tt = new LinkedList<>();
waitAck.put(sha256hex, tt);
}
tt.add(tuple);
LOG.debug("Added to waitAck {} with ID {} total {}", url,
sha256hex, tt.size());
}
LOG.debug("Sending to ES buffer {} with ID {}", url, sha256hex);
connection.getProcessor().add(request);
}
#location 68
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void store(String url, Status status, Metadata metadata,
Date nextFetch, Tuple tuple) throws Exception {
String sha256hex = org.apache.commons.codec.digest.DigestUtils
.sha256Hex(url);
// need to synchronize: otherwise it might get added to the cache
// without having been sent to ES
synchronized (waitAck) {
// check that the same URL is not being sent to ES
List<Tuple> alreadySent = waitAck.getIfPresent(sha256hex);
if (alreadySent != null && status.equals(Status.DISCOVERED)) {
// if this object is discovered - adding another version of it
// won't make any difference
LOG.debug(
"Already being sent to ES {} with status {} and ID {}",
url, status, sha256hex);
// ack straight away!
super.ack(tuple, url);
return;
}
}
XContentBuilder builder = jsonBuilder().startObject();
builder.field("url", url);
builder.field("status", status);
// check that we don't overwrite an existing entry
// When create is used, the index operation will fail if a document
// by that id already exists in the index.
boolean create = status.equals(Status.DISCOVERED);
builder.startObject("metadata");
Iterator<String> mdKeys = metadata.keySet().iterator();
while (mdKeys.hasNext()) {
String mdKey = mdKeys.next();
String[] values = metadata.getValues(mdKey);
// periods are not allowed in ES2 - replace with %2E
mdKey = mdKey.replaceAll("\\.", "%2E");
builder.array(mdKey, values);
}
String partitionKey = partitioner.getPartition(url, metadata);
if (partitionKey == null) {
partitionKey = "_DEFAULT_";
}
// store routing key in metadata?
if (StringUtils.isNotBlank(fieldNameForRoutingKey)
&& routingFieldNameInMetadata) {
builder.field(fieldNameForRoutingKey, partitionKey);
}
builder.endObject();
// store routing key outside metadata?
if (StringUtils.isNotBlank(fieldNameForRoutingKey)
&& !routingFieldNameInMetadata) {
builder.field(fieldNameForRoutingKey, partitionKey);
}
builder.field("nextFetchDate", nextFetch);
builder.endObject();
IndexRequest request = new IndexRequest(getIndexName(metadata));
request.source(builder).id(sha256hex).create(create);
if (doRouting) {
request.routing(partitionKey);
}
synchronized (waitAck) {
List<Tuple> tt = waitAck.getIfPresent(sha256hex);
if (tt == null) {
tt = new LinkedList<>();
waitAck.put(sha256hex, tt);
}
tt.add(tuple);
LOG.debug("Added to waitAck {} with ID {} total {}", url,
sha256hex, tt.size());
}
LOG.debug("Sending to ES buffer {} with ID {}", url, sha256hex);
connection.getProcessor().add(request);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
if (lastDate == null) {
lastDate = String.format(DATEFORMAT, new Date());
}
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
lastDate);
QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery(
"nextFetchDate").lte(lastDate);
SearchRequestBuilder srb = client.prepareSearch(indexName)
.setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(rangeQueryBuilder).setFrom(0).setSize(0)
.setExplain(false);
TermsAggregationBuilder aggregations = AggregationBuilders
.terms("partition").field(partitionField).size(maxBucketNum);
TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs")
.size(maxURLsPerBucket).explain(false);
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField)
.order(SortOrder.ASC);
tophits.sort(sorter);
}
aggregations.subAggregation(tophits);
// sort between buckets
if (StringUtils.isNotBlank(totalSortField)) {
MinAggregationBuilder minBuilder = AggregationBuilders.min(
"top_hit").field(totalSortField);
aggregations.subAggregation(minBuilder);
aggregations.order(Terms.Order.aggregation("top_hit", true));
}
if (sample) {
DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder(
"sample");
sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket);
sab.shardSize(maxURLsPerBucket * maxBucketNum);
sab.subAggregation(aggregations);
srb.addAggregation(sab);
} else {
srb.addAggregation(aggregations);
}
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
srb.setPreference("_shards:" + shardID);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, srb.toString());
timeStartESQuery = System.currentTimeMillis();
isInESQuery.set(true);
srb.execute(this);
}
#location 12
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
if (lastDate == null) {
lastDate = new Date();
}
String formattedLastDate = String.format(DATEFORMAT, lastDate);
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery(
"nextFetchDate").lte(formattedLastDate);
SearchRequestBuilder srb = client.prepareSearch(indexName)
.setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(rangeQueryBuilder).setFrom(0).setSize(0)
.setExplain(false);
TermsAggregationBuilder aggregations = AggregationBuilders
.terms("partition").field(partitionField).size(maxBucketNum);
TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs")
.size(maxURLsPerBucket).explain(false);
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField)
.order(SortOrder.ASC);
tophits.sort(sorter);
}
aggregations.subAggregation(tophits);
// sort between buckets
if (StringUtils.isNotBlank(totalSortField)) {
MinAggregationBuilder minBuilder = AggregationBuilders.min(
"top_hit").field(totalSortField);
aggregations.subAggregation(minBuilder);
aggregations.order(Terms.Order.aggregation("top_hit", true));
}
if (sample) {
DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder(
"sample");
sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket);
sab.shardSize(maxURLsPerBucket * maxBucketNum);
sab.subAggregation(aggregations);
srb.addAggregation(sab);
} else {
srb.addAggregation(aggregations);
}
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
srb.setPreference("_shards:" + shardID);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, srb.toString());
timeStartESQuery = System.currentTimeMillis();
isInESQuery.set(true);
srb.execute(this);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
// one ES client per JVM
synchronized (ElasticSearchSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("Assigned shard ID {}", shardID);
}
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 51
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
super.open(stormConf, context, collector);
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void onResponse(SearchResponse response) {
long timeTaken = System.currentTimeMillis() - timeLastQuery;
Aggregations aggregs = response.getAggregations();
if (aggregs == null) {
isInQuery.set(false);
return;
}
SingleBucketAggregation sample = aggregs.get("sample");
if (sample != null) {
aggregs = sample.getAggregations();
}
Terms agg = aggregs.get("partition");
int numhits = 0;
int numBuckets = 0;
int alreadyprocessed = 0;
Date mostRecentDateFound = null;
SimpleDateFormat formatter = new SimpleDateFormat(
"yyyy-MM-dd'T'HH:mm:ss.SSSX");
synchronized (buffer) {
// For each entry
Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg
.getBuckets().iterator();
while (iterator.hasNext()) {
Terms.Bucket entry = iterator.next();
String key = (String) entry.getKey(); // bucket key
long docCount = entry.getDocCount(); // Doc count
int hitsForThisBucket = 0;
// filter results so that we don't include URLs we are already
// being processed
TopHits topHits = entry.getAggregations().get("docs");
for (SearchHit hit : topHits.getHits().getHits()) {
hitsForThisBucket++;
Map<String, Object> keyValues = hit.getSourceAsMap();
String url = (String) keyValues.get("url");
LOG.debug("{} -> id [{}], _source [{}]", logIdprefix,
hit.getId(), hit.getSourceAsString());
// consider only the first document of the last bucket
// for optimising the nextFetchDate
if (hitsForThisBucket == 1 && !iterator.hasNext()) {
String strDate = (String) keyValues
.get("nextFetchDate");
try {
mostRecentDateFound = formatter.parse(strDate);
} catch (ParseException e) {
throw new RuntimeException("can't parse date :"
+ strDate);
}
}
// is already being processed - skip it!
if (beingProcessed.containsKey(url)) {
alreadyprocessed++;
continue;
}
Metadata metadata = fromKeyValues(keyValues);
buffer.add(new Values(url, metadata));
}
if (hitsForThisBucket > 0)
numBuckets++;
numhits += hitsForThisBucket;
LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix,
key, hitsForThisBucket, docCount, alreadyprocessed);
}
// Shuffle the URLs so that we don't get blocks of URLs from the
// same
// host or domain
Collections.shuffle((List) buffer);
}
LOG.info(
"{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed);
queryTimes.addMeasurement(timeTaken);
eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
eventCounter.scope("ES_queries").incrBy(1);
eventCounter.scope("ES_docs").incrBy(numhits);
// reset the value for next fetch date if the previous one is too old
if (resetFetchDateAfterNSecs != -1) {
Calendar diffCal = Calendar.getInstance();
diffCal.setTime(lastDate);
diffCal.add(Calendar.SECOND, resetFetchDateAfterNSecs);
// compare to now
if (diffCal.before(Calendar.getInstance())) {
LOG.info(
"{} lastDate set to null based on resetFetchDateAfterNSecs {}",
logIdprefix, resetFetchDateAfterNSecs);
lastDate = null;
}
}
// optimise the nextFetchDate by getting the most recent value
// returned in the query and add to it, unless the previous value is
// within n mins in which case we'll keep it
else if (mostRecentDateFound != null && recentDateIncrease >= 0) {
Calendar potentialNewDate = Calendar.getInstance();
potentialNewDate.setTime(mostRecentDateFound);
potentialNewDate.add(Calendar.MINUTE, recentDateIncrease);
Date oldDate = null;
// check boundaries
if (this.recentDateMinGap > 0) {
Calendar low = Calendar.getInstance();
low.setTime(lastDate);
low.add(Calendar.MINUTE, -recentDateMinGap);
Calendar high = Calendar.getInstance();
high.setTime(lastDate);
high.add(Calendar.MINUTE, recentDateMinGap);
if (high.before(potentialNewDate)
|| low.after(potentialNewDate)) {
oldDate = lastDate;
}
} else {
oldDate = lastDate;
}
if (oldDate != null) {
lastDate = potentialNewDate.getTime();
LOG.info(
"{} lastDate changed from {} to {} based on mostRecentDateFound {}",
logIdprefix, oldDate, lastDate, mostRecentDateFound);
} else {
LOG.info(
"{} lastDate kept at {} based on mostRecentDateFound {}",
logIdprefix, lastDate, mostRecentDateFound);
}
}
// change the date if we don't get any results at all
if (numBuckets == 0) {
lastDate = null;
}
// remove lock
isInQuery.set(false);
}
#location 88
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void onResponse(SearchResponse response) {
long timeTaken = System.currentTimeMillis() - timeLastQuery;
Aggregations aggregs = response.getAggregations();
if (aggregs == null) {
isInQuery.set(false);
return;
}
SingleBucketAggregation sample = aggregs.get("sample");
if (sample != null) {
aggregs = sample.getAggregations();
}
Terms agg = aggregs.get("partition");
int numhits = 0;
int numBuckets = 0;
int alreadyprocessed = 0;
Date mostRecentDateFound = null;
SimpleDateFormat formatter = new SimpleDateFormat(
"yyyy-MM-dd'T'HH:mm:ss.SSSX");
synchronized (buffer) {
// For each entry
Iterator<Terms.Bucket> iterator = (Iterator<Bucket>) agg
.getBuckets().iterator();
while (iterator.hasNext()) {
Terms.Bucket entry = iterator.next();
String key = (String) entry.getKey(); // bucket key
long docCount = entry.getDocCount(); // Doc count
int hitsForThisBucket = 0;
// filter results so that we don't include URLs we are already
// being processed
TopHits topHits = entry.getAggregations().get("docs");
for (SearchHit hit : topHits.getHits().getHits()) {
hitsForThisBucket++;
Map<String, Object> keyValues = hit.getSourceAsMap();
String url = (String) keyValues.get("url");
LOG.debug("{} -> id [{}], _source [{}]", logIdprefix,
hit.getId(), hit.getSourceAsString());
// consider only the first document of the last bucket
// for optimising the nextFetchDate
if (hitsForThisBucket == 1 && !iterator.hasNext()) {
String strDate = (String) keyValues
.get("nextFetchDate");
try {
mostRecentDateFound = formatter.parse(strDate);
} catch (ParseException e) {
throw new RuntimeException("can't parse date :"
+ strDate);
}
}
// is already being processed - skip it!
if (beingProcessed.containsKey(url)) {
alreadyprocessed++;
continue;
}
Metadata metadata = fromKeyValues(keyValues);
buffer.add(new Values(url, metadata));
}
if (hitsForThisBucket > 0)
numBuckets++;
numhits += hitsForThisBucket;
LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix,
key, hitsForThisBucket, docCount, alreadyprocessed);
}
// Shuffle the URLs so that we don't get blocks of URLs from the
// same
// host or domain
Collections.shuffle((List) buffer);
}
LOG.info(
"{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed);
queryTimes.addMeasurement(timeTaken);
eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
eventCounter.scope("ES_queries").incrBy(1);
eventCounter.scope("ES_docs").incrBy(numhits);
// optimise the nextFetchDate by getting the most recent value
// returned in the query and add to it, unless the previous value is
// within n mins in which case we'll keep it
if (mostRecentDateFound != null && recentDateIncrease >= 0) {
Calendar potentialNewDate = Calendar.getInstance();
potentialNewDate.setTime(mostRecentDateFound);
potentialNewDate.add(Calendar.MINUTE, recentDateIncrease);
Date oldDate = null;
// check boundaries
if (this.recentDateMinGap > 0) {
Calendar low = Calendar.getInstance();
low.setTime(queryDate);
low.add(Calendar.MINUTE, -recentDateMinGap);
Calendar high = Calendar.getInstance();
high.setTime(queryDate);
high.add(Calendar.MINUTE, recentDateMinGap);
if (high.before(potentialNewDate)
|| low.after(potentialNewDate)) {
oldDate = queryDate;
}
} else {
oldDate = queryDate;
}
if (oldDate != null) {
queryDate = potentialNewDate.getTime();
LOG.info(
"{} lastDate changed from {} to {} based on mostRecentDateFound {}",
logIdprefix, oldDate, queryDate, mostRecentDateFound);
} else {
LOG.info(
"{} lastDate kept at {} based on mostRecentDateFound {}",
logIdprefix, queryDate, mostRecentDateFound);
}
}
// reset the value for next fetch date if the previous one is too old
if (resetFetchDateAfterNSecs != -1) {
Instant changeNeededOn = Instant.ofEpochMilli(lastTimeResetToNOW
.toEpochMilli() + (resetFetchDateAfterNSecs * 1000));
if (Instant.now().isAfter(changeNeededOn)) {
LOG.info(
"{} lastDate set to null based on resetFetchDateAfterNSecs {}",
logIdprefix, resetFetchDateAfterNSecs);
queryDate = null;
}
}
// change the date if we don't get any results at all
if (numBuckets == 0) {
queryDate = null;
}
// remove lock
isInQuery.set(false);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void execute(Tuple input) {
// main thread in charge of acking and failing
// see
// https://github.com/nathanmarz/storm/wiki/Troubleshooting#nullpointerexception-from-deep-inside-storm
int acked = 0;
int failed = 0;
int emitted = 0;
// emit with or without anchors
// before acking
synchronized (emitQueue) {
for (Object[] toemit : this.emitQueue) {
String streamID = (String) toemit[0];
Tuple anchor = (Tuple) toemit[1];
Values vals = (Values) toemit[2];
if (anchor == null)
_collector.emit(streamID, vals);
else
_collector.emit(streamID, Arrays.asList(anchor), vals);
}
emitted = emitQueue.size();
emitQueue.clear();
}
// have a tick tuple to make sure we don't get starved
synchronized (ackQueue) {
for (Tuple toack : this.ackQueue) {
_collector.ack(toack);
}
acked = ackQueue.size();
ackQueue.clear();
}
synchronized (failQueue) {
for (Tuple toack : this.failQueue) {
_collector.fail(toack);
}
failed = failQueue.size();
failQueue.clear();
}
if (acked + failed + emitted > 0)
LOG.info("[Fetcher #" + taskIndex + "] Acked : " + acked
+ "\tFailed : " + failed + "\tEmitted : " + emitted);
if (isTickTuple(input)) {
_collector.ack(input);
return;
}
CountMetric metric = metricGauge.scope("activethreads");
metric.getValueAndReset();
metric.incrBy(this.activeThreads.get());
metric = metricGauge.scope("in queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.inQueues.get());
metric = metricGauge.scope("queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.queues.size());
LOG.info("[Fetcher #" + taskIndex + "] Threads : "
+ this.activeThreads.get() + "\tqueues : "
+ this.fetchQueues.queues.size() + "\tin_queues : "
+ this.fetchQueues.inQueues.get());
String url = input.getStringByField("url");
// check whether this tuple has a url field
if (url == null) {
LOG.info("[Fetcher #" + taskIndex
+ "] Missing url field for tuple " + input);
// ignore silently
_collector.ack(input);
return;
}
fetchQueues.addFetchItem(input);
}
#location 81
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void execute(Tuple input) {
// triggered by the arrival of a tuple
// be it a tick or normal one
flushQueues();
if (isTickTuple(input)) {
_collector.ack(input);
return;
}
CountMetric metric = metricGauge.scope("activethreads");
metric.getValueAndReset();
metric.incrBy(this.activeThreads.get());
metric = metricGauge.scope("in queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.inQueues.get());
metric = metricGauge.scope("queues");
metric.getValueAndReset();
metric.incrBy(this.fetchQueues.queues.size());
LOG.info("[Fetcher #" + taskIndex + "] Threads : "
+ this.activeThreads.get() + "\tqueues : "
+ this.fetchQueues.queues.size() + "\tin_queues : "
+ this.fetchQueues.inQueues.get());
String url = input.getStringByField("url");
// check whether this tuple has a url field
if (url == null) {
LOG.info("[Fetcher #" + taskIndex
+ "] Missing url field for tuple " + input);
// ignore silently
_collector.ack(input);
return;
}
fetchQueues.addFetchItem(input);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (lastDate == null) {
lastDate = new Date();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
lastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(String.format(DATEFORMAT, lastDate));
SearchRequestBuilder srb = client.prepareSearch(indexName)
.setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder).setFrom(lastStartOffset)
.setSize(maxBucketNum).setExplain(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
srb.setPreference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(totalSortField)
.order(SortOrder.ASC);
srb.addSort(sorter);
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
srb.setCollapse(collapse);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, srb.toString());
timeStartESQuery = System.currentTimeMillis();
isInESQuery.set(true);
srb.execute(this);
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (lastDate == null) {
lastDate = new Date();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
String formattedLastDate = String.format(DATEFORMAT, lastDate);
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
SearchRequestBuilder srb = client.prepareSearch(indexName)
.setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder).setFrom(lastStartOffset)
.setSize(maxBucketNum).setExplain(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
srb.setPreference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(totalSortField)
.order(SortOrder.ASC);
srb.addSort(sorter);
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
srb.setCollapse(collapse);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, srb.toString());
timeStartESQuery = System.currentTimeMillis();
isInESQuery.set(true);
srb.execute(this);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void execute(Tuple tuple) {
String url = tuple.getStringByField("url");
// Distinguish the value used for indexing
// from the one used for the status
String normalisedurl = valueForURL(tuple);
LOG.info("Indexing {} as {}", url, normalisedurl);
Metadata metadata = (Metadata) tuple.getValueByField("metadata");
String text = tuple.getStringByField("text");
boolean keep = filterDocument(metadata);
if (!keep) {
LOG.info("Filtered {}", url);
eventCounter.scope("Filtered").incrBy(1);
// treat it as successfully processed even if
// we do not index it
_collector.emit(StatusStreamName, tuple, new Values(url, metadata,
Status.FETCHED));
_collector.ack(tuple);
return;
}
String docID = org.apache.commons.codec.digest.DigestUtils
.sha256Hex(normalisedurl);
try {
XContentBuilder builder = jsonBuilder().startObject();
// display text of the document?
if (fieldNameForText() != null) {
builder.field(fieldNameForText(), trimText(text));
}
// send URL as field?
if (fieldNameForURL() != null) {
builder.field(fieldNameForURL(), normalisedurl);
}
// which metadata to display?
Map<String, String[]> keyVals = filterMetadata(metadata);
Iterator<String> iterator = keyVals.keySet().iterator();
while (iterator.hasNext()) {
String fieldName = iterator.next();
String[] values = keyVals.get(fieldName);
if (values.length == 1) {
builder.field(fieldName, values[0]);
} else if (values.length > 1) {
builder.array(fieldName, values);
}
}
builder.endObject();
IndexRequest indexRequest = new IndexRequest(
getIndexName(metadata), docType, docID).source(builder);
DocWriteRequest.OpType optype = DocWriteRequest.OpType.INDEX;
if (create) {
optype = DocWriteRequest.OpType.CREATE;
}
indexRequest.opType(optype);
if (pipeline != null) {
indexRequest.setPipeline(pipeline);
}
connection.getProcessor().add(indexRequest);
eventCounter.scope("Indexed").incrBy(1);
perSecMetrics.scope("Indexed").update(1);
synchronized (waitAck) {
waitAck.put(docID, tuple);
}
} catch (IOException e) {
LOG.error("Error building document for ES", e);
// do not send to status stream so that it gets replayed
_collector.fail(tuple);
if (docID != null) {
synchronized (waitAck) {
waitAck.invalidate(docID);
}
}
}
}
#location 60
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void execute(Tuple tuple) {
String url = tuple.getStringByField("url");
// Distinguish the value used for indexing
// from the one used for the status
String normalisedurl = valueForURL(tuple);
LOG.info("Indexing {} as {}", url, normalisedurl);
Metadata metadata = (Metadata) tuple.getValueByField("metadata");
String text = tuple.getStringByField("text");
boolean keep = filterDocument(metadata);
if (!keep) {
LOG.info("Filtered {}", url);
eventCounter.scope("Filtered").incrBy(1);
// treat it as successfully processed even if
// we do not index it
_collector.emit(StatusStreamName, tuple, new Values(url, metadata,
Status.FETCHED));
_collector.ack(tuple);
return;
}
String docID = org.apache.commons.codec.digest.DigestUtils
.sha256Hex(normalisedurl);
try {
XContentBuilder builder = jsonBuilder().startObject();
// display text of the document?
if (fieldNameForText() != null) {
builder.field(fieldNameForText(), trimText(text));
}
// send URL as field?
if (fieldNameForURL() != null) {
builder.field(fieldNameForURL(), normalisedurl);
}
// which metadata to display?
Map<String, String[]> keyVals = filterMetadata(metadata);
Iterator<String> iterator = keyVals.keySet().iterator();
while (iterator.hasNext()) {
String fieldName = iterator.next();
String[] values = keyVals.get(fieldName);
if (values.length == 1) {
builder.field(fieldName, values[0]);
} else if (values.length > 1) {
builder.array(fieldName, values);
}
}
builder.endObject();
String sha256hex = org.apache.commons.codec.digest.DigestUtils
.sha256Hex(normalisedurl);
IndexRequest indexRequest = new IndexRequest(getIndexName(metadata))
.source(builder).id(sha256hex);
DocWriteRequest.OpType optype = DocWriteRequest.OpType.INDEX;
if (create) {
optype = DocWriteRequest.OpType.CREATE;
}
indexRequest.opType(optype);
if (pipeline != null) {
indexRequest.setPipeline(pipeline);
}
connection.getProcessor().add(indexRequest);
eventCounter.scope("Indexed").incrBy(1);
perSecMetrics.scope("Indexed").update(1);
synchronized (waitAck) {
waitAck.put(docID, tuple);
}
} catch (IOException e) {
LOG.error("Error building document for ES", e);
// do not send to status stream so that it gets replayed
_collector.fail(tuple);
if (docID != null) {
synchronized (waitAck) {
waitAck.invalidate(docID);
}
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 46
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
// one ES client per JVM
synchronized (ElasticSearchSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("Assigned shard ID {}", shardID);
}
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 15
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
super.open(stormConf, context, collector);
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 13
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
// one ES client per JVM
synchronized (ElasticSearchSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("Assigned shard ID {}", shardID);
}
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 52
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
super.open(stormConf, context, collector);
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void nextTuple() {
// inactive?
if (active == false)
return;
// have anything in the buffer?
if (!buffer.isEmpty()) {
Values fields = buffer.remove();
String url = fields.get(0).toString();
beingProcessed.add(url);
this._collector.emit(fields, url);
eventCounter.scope("emitted").incrBy(1);
return;
}
// re-populate the buffer
populateBuffer();
}
#location 15
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void nextTuple() {
// inactive?
if (active == false)
return;
// have anything in the buffer?
if (!buffer.isEmpty()) {
Values fields = buffer.remove();
String url = fields.get(0).toString();
beingProcessed.add(url);
_collector.emit(fields, url);
eventCounter.scope("emitted").incrBy(1);
return;
}
// re-populate the buffer
populateBuffer();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 64
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
if (scrollId == null) {
// scrollID is null because all the documents have been exhausted
if (hasStarted) {
Utils.sleep(10);
return;
}
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(QueryBuilders.matchAllQuery());
searchSourceBuilder.size(maxURLsPerBucket * maxBucketNum);
SearchRequest searchRequest = new SearchRequest(indexName);
searchRequest.source(searchSourceBuilder);
searchRequest.scroll(TimeValue.timeValueMinutes(5L));
if (shardID != -1) {
searchRequest.preference("_shards:" + shardID);
}
isInQuery.set(true);
client.searchAsync(searchRequest, RequestOptions.DEFAULT, this);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, searchRequest.toString());
return;
}
SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId);
scrollRequest.scroll(TimeValue.timeValueMinutes(5L));
isInQuery.set(true);
client.scrollAsync(scrollRequest, RequestOptions.DEFAULT, this);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, scrollRequest.toString());
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
if (hasFinished) {
Utils.sleep(10);
return;
}
// initial request
if (scrollId == null) {
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(QueryBuilders.matchAllQuery());
searchSourceBuilder.size(maxURLsPerBucket * maxBucketNum);
SearchRequest searchRequest = new SearchRequest(indexName);
searchRequest.source(searchSourceBuilder);
searchRequest.scroll(TimeValue.timeValueMinutes(5L));
if (shardID != -1) {
searchRequest.preference("_shards:" + shardID);
}
isInQuery.set(true);
client.searchAsync(searchRequest, RequestOptions.DEFAULT, this);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, searchRequest.toString());
return;
}
SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId);
scrollRequest.scroll(TimeValue.timeValueMinutes(5L));
isInQuery.set(true);
client.scrollAsync(scrollRequest, RequestOptions.DEFAULT, this);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, scrollRequest.toString());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
if (lastDate == null) {
lastDate = new Date();
}
String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print(
lastDate.getTime());
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
if (filterQuery != null) {
queryBuilder = boolQuery().must(queryBuilder).filter(
QueryBuilders.queryStringQuery(filterQuery));
}
SearchRequest request = new SearchRequest(indexName).types(docType)
.searchType(SearchType.QUERY_THEN_FETCH);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(queryBuilder);
sourceBuilder.from(0);
sourceBuilder.size(0);
sourceBuilder.explain(false);
sourceBuilder.trackTotalHits(false);
TermsAggregationBuilder aggregations = AggregationBuilders
.terms("partition").field(partitionField).size(maxBucketNum);
TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs")
.size(maxURLsPerBucket).explain(false);
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField)
.order(SortOrder.ASC);
tophits.sort(sorter);
}
aggregations.subAggregation(tophits);
// sort between buckets
if (StringUtils.isNotBlank(totalSortField)) {
MinAggregationBuilder minBuilder = AggregationBuilders.min(
"top_hit").field(totalSortField);
aggregations.subAggregation(minBuilder);
aggregations.order(BucketOrder.aggregation("top_hit", true));
}
if (sample) {
DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder(
"sample");
sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket);
sab.shardSize(maxURLsPerBucket * maxBucketNum);
sab.subAggregation(aggregations);
sourceBuilder.aggregation(sab);
} else {
sourceBuilder.aggregation(aggregations);
}
request.source(sourceBuilder);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
request.preference("_shards:" + shardID);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, request.toString());
isInQuery.set(true);
client.searchAsync(request, this);
}
#location 9
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
if (queryDate == null) {
queryDate = new Date();
lastTimeResetToNOW = Instant.now();
}
String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print(
queryDate.getTime());
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
if (filterQuery != null) {
queryBuilder = boolQuery().must(queryBuilder).filter(
QueryBuilders.queryStringQuery(filterQuery));
}
SearchRequest request = new SearchRequest(indexName).types(docType)
.searchType(SearchType.QUERY_THEN_FETCH);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(queryBuilder);
sourceBuilder.from(0);
sourceBuilder.size(0);
sourceBuilder.explain(false);
sourceBuilder.trackTotalHits(false);
TermsAggregationBuilder aggregations = AggregationBuilders
.terms("partition").field(partitionField).size(maxBucketNum);
TopHitsAggregationBuilder tophits = AggregationBuilders.topHits("docs")
.size(maxURLsPerBucket).explain(false);
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField)
.order(SortOrder.ASC);
tophits.sort(sorter);
}
aggregations.subAggregation(tophits);
// sort between buckets
if (StringUtils.isNotBlank(totalSortField)) {
MinAggregationBuilder minBuilder = AggregationBuilders.min(
"top_hit").field(totalSortField);
aggregations.subAggregation(minBuilder);
aggregations.order(BucketOrder.aggregation("top_hit", true));
}
if (sample) {
DiversifiedAggregationBuilder sab = new DiversifiedAggregationBuilder(
"sample");
sab.field(partitionField).maxDocsPerValue(maxURLsPerBucket);
sab.shardSize(maxURLsPerBucket * maxBucketNum);
sab.subAggregation(aggregations);
sourceBuilder.aggregation(sab);
} else {
sourceBuilder.aggregation(aggregations);
}
request.source(sourceBuilder);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
request.preference("_shards:" + shardID);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, request.toString());
isInQuery.set(true);
client.searchAsync(request, this);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (lastDate == null) {
lastDate = new Date();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
lastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(String.format(DATEFORMAT, lastDate));
SearchRequestBuilder srb = client.prepareSearch(indexName)
.setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder).setFrom(lastStartOffset)
.setSize(maxBucketNum).setExplain(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
srb.setPreference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(totalSortField)
.order(SortOrder.ASC);
srb.addSort(sorter);
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
srb.setCollapse(collapse);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, srb.toString());
timeStartESQuery = System.currentTimeMillis();
isInESQuery.set(true);
srb.execute(this);
}
#location 17
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (lastDate == null) {
lastDate = new Date();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
String formattedLastDate = String.format(DATEFORMAT, lastDate);
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
SearchRequestBuilder srb = client.prepareSearch(indexName)
.setTypes(docType).setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder).setFrom(lastStartOffset)
.setSize(maxBucketNum).setExplain(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
srb.setPreference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
FieldSortBuilder sorter = SortBuilders.fieldSort(totalSortField)
.order(SortOrder.ASC);
srb.addSort(sorter);
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
srb.setCollapse(collapse);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, srb.toString());
timeStartESQuery = System.currentTimeMillis();
isInESQuery.set(true);
srb.execute(this);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private List<RegexRule> readRules(String rulesFile) {
List<RegexRule> rules = new ArrayList<RegexRule>();
try {
InputStream regexStream = getClass().getClassLoader()
.getResourceAsStream(rulesFile);
Reader reader = new InputStreamReader(regexStream, "UTF-8");
BufferedReader in = new BufferedReader(reader);
String line;
while ((line = in.readLine()) != null) {
if (line.length() == 0) {
continue;
}
char first = line.charAt(0);
boolean sign = false;
switch (first) {
case '+':
sign = true;
break;
case '-':
sign = false;
break;
case ' ':
case '\n':
case '#': // skip blank & comment lines
continue;
default:
throw new IOException("Invalid first character: " + line);
}
String regex = line.substring(1);
LOG.trace("Adding rule [{}]", regex);
RegexRule rule = createRule(sign, regex);
rules.add(rule);
}
} catch (IOException e) {
LOG.error("There was an error reading the default-regex-filters file");
e.printStackTrace();
}
return rules;
}
#location 39
#vulnerability type RESOURCE_LEAK
|
#fixed code
private List<RegexRule> readRules(String rulesFile) {
List<RegexRule> rules = new ArrayList<RegexRule>();
try {
InputStream regexStream = getClass().getClassLoader()
.getResourceAsStream(rulesFile);
Reader reader = new InputStreamReader(regexStream, StandardCharsets.UTF_8);
BufferedReader in = new BufferedReader(reader);
String line;
while ((line = in.readLine()) != null) {
if (line.length() == 0) {
continue;
}
char first = line.charAt(0);
boolean sign = false;
switch (first) {
case '+':
sign = true;
break;
case '-':
sign = false;
break;
case ' ':
case '\n':
case '#': // skip blank & comment lines
continue;
default:
throw new IOException("Invalid first character: " + line);
}
String regex = line.substring(1);
LOG.trace("Adding rule [{}]", regex);
RegexRule rule = createRule(sign, regex);
rules.add(rule);
}
} catch (IOException e) {
LOG.error("There was an error reading the default-regex-filters file");
e.printStackTrace();
}
return rules;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void nextTuple() {
// inactive?
if (active == false)
return;
// have anything in the buffer?
if (!buffer.isEmpty()) {
Values fields = buffer.remove();
String url = fields.get(0).toString();
beingProcessed.add(url);
this._collector.emit(fields, url);
eventCounter.scope("emitted").incrBy(1);
return;
}
// re-populate the buffer
populateBuffer();
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void nextTuple() {
// inactive?
if (active == false)
return;
// have anything in the buffer?
if (!buffer.isEmpty()) {
Values fields = buffer.remove();
String url = fields.get(0).toString();
beingProcessed.add(url);
_collector.emit(fields, url);
eventCounter.scope("emitted").incrBy(1);
return;
}
// re-populate the buffer
populateBuffer();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (lastDate == null) {
lastDate = new Date();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print(
lastDate.getTime());
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
if (filterQuery != null) {
queryBuilder = boolQuery().must(queryBuilder).filter(
QueryBuilders.queryStringQuery(filterQuery));
}
SearchRequest request = new SearchRequest(indexName).types(docType)
.searchType(SearchType.QUERY_THEN_FETCH);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(queryBuilder);
sourceBuilder.from(lastStartOffset);
sourceBuilder.size(maxBucketNum);
sourceBuilder.explain(false);
sourceBuilder.trackTotalHits(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
request.preference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
sourceBuilder.sort(new FieldSortBuilder(totalSortField)
.order(SortOrder.ASC));
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
sourceBuilder.collapse(collapse);
request.source(sourceBuilder);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, request.toString());
isInQuery.set(true);
client.searchAsync(request, this);
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
protected void populateBuffer() {
// not used yet or returned empty results
if (queryDate == null) {
queryDate = new Date();
lastTimeResetToNOW = Instant.now();
lastStartOffset = 0;
}
// been running same query for too long and paging deep?
else if (maxStartOffset != -1 && lastStartOffset > maxStartOffset) {
LOG.info("Reached max start offset {}", lastStartOffset);
lastStartOffset = 0;
}
String formattedLastDate = ISODateTimeFormat.dateTimeNoMillis().print(
queryDate.getTime());
LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix,
formattedLastDate);
QueryBuilder queryBuilder = QueryBuilders.rangeQuery("nextFetchDate")
.lte(formattedLastDate);
if (filterQuery != null) {
queryBuilder = boolQuery().must(queryBuilder).filter(
QueryBuilders.queryStringQuery(filterQuery));
}
SearchRequest request = new SearchRequest(indexName).types(docType)
.searchType(SearchType.QUERY_THEN_FETCH);
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
sourceBuilder.query(queryBuilder);
sourceBuilder.from(lastStartOffset);
sourceBuilder.size(maxBucketNum);
sourceBuilder.explain(false);
sourceBuilder.trackTotalHits(false);
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
// _shards:2,3
if (shardID != -1) {
request.preference("_shards:" + shardID);
}
if (StringUtils.isNotBlank(totalSortField)) {
sourceBuilder.sort(new FieldSortBuilder(totalSortField)
.order(SortOrder.ASC));
}
CollapseBuilder collapse = new CollapseBuilder(partitionField);
// group expansion -> sends sub queries for each bucket
if (maxURLsPerBucket > 1) {
InnerHitBuilder ihb = new InnerHitBuilder();
ihb.setSize(maxURLsPerBucket);
ihb.setName("urls_per_bucket");
// sort within a bucket
if (StringUtils.isNotBlank(bucketSortField)) {
List<SortBuilder<?>> sorts = new LinkedList<>();
FieldSortBuilder bucketsorter = SortBuilders.fieldSort(
bucketSortField).order(SortOrder.ASC);
sorts.add(bucketsorter);
ihb.setSorts(sorts);
}
collapse.setInnerHits(ihb);
}
sourceBuilder.collapse(collapse);
request.source(sourceBuilder);
// dump query to log
LOG.debug("{} ES query {}", logIdprefix, request.toString());
isInQuery.set(true);
client.searchAsync(request, this);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
// one ES client per JVM
synchronized (ElasticSearchSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("Assigned shard ID {}", shardID);
}
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 58
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxInflightParamName, 1);
maxBufferSize = ConfUtils.getInt(stormConf,
ESStatusBufferSizeParamName, 100);
randomSort = ConfUtils.getBoolean(stormConf, ESRandomSortParamName,
true);
maxSecSinceQueriedDate = ConfUtils.getInt(stormConf,
ESMaxSecsSinceQueriedDateParamName, -1);
sortField = ConfUtils.getString(stormConf, ESStatusSortFieldParamName,
"nextFetchDate");
super.open(stormConf, context, collector);
partitioner = new URLPartitioner();
partitioner.configure(stormConf);
context.registerMetric("beingProcessed", new IMetric() {
@Override
public Object getValueAndReset() {
return beingProcessed.size();
}
}, 10);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
"status");
docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
"status");
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
// one ES client per JVM
synchronized (AggregationSpout.class) {
try {
if (client == null) {
client = ElasticSearchConnection.getClient(stormConf,
ESBoltType);
}
} catch (Exception e1) {
LOG.error("Can't connect to ElasticSearch", e1);
throw new RuntimeException(e1);
}
}
// if more than one instance is used we expect their number to be the
// same as the number of shards
int totalTasks = context
.getComponentTasks(context.getThisComponentId()).size();
if (totalTasks > 1) {
logIdprefix = "[" + context.getThisComponentId() + " #"
+ context.getThisTaskIndex() + "] ";
// determine the number of shards so that we can restrict the
// search
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest(
indexName);
ClusterSearchShardsResponse shardresponse = client.admin()
.cluster().searchShards(request).actionGet();
ClusterSearchShardsGroup[] shardgroups = shardresponse.getGroups();
if (totalTasks != shardgroups.length) {
throw new RuntimeException(
"Number of ES spout instances should be the same as number of shards ("
+ shardgroups.length + ") but is " + totalTasks);
}
shardID = shardgroups[context.getThisTaskIndex()].getShardId();
LOG.info("{} assigned shard ID {}", logIdprefix, shardID);
}
_collector = collector;
this.eventCounter = context.registerMetric("counters",
new MultiCountMetric(), 10);
context.registerMetric("buffer_size", new IMetric() {
@Override
public Object getValueAndReset() {
return buffer.size();
}
}, 10);
}
#location 60
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusRoutingFieldParamName);
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
minDelayBetweenQueries = ConfUtils.getLong(stormConf,
ESStatusMinDelayParamName, 2000);
super.open(stormConf, context, collector);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.