output
stringlengths 64
73.2k
| input
stringlengths 208
73.3k
| instruction
stringclasses 1
value |
---|---|---|
#fixed code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommittedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % bucketNumber)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
} | #vulnerable code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % bucketNumber)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
}
#location 2
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test public void testMultiPutSameRow() throws Exception {
try{
TransactionManager tm = new TransactionManager(hbaseConf);
TTable table1 = new TTable(hbaseConf, TEST_TABLE);
int num=10;
TransactionState t=tm.beginTransaction();
for(int j=0;j<num;j++) {
byte[]data=Bytes.toBytes(j);
Put put=new Put(data);
put.add(Bytes.toBytes(TEST_FAMILY), Bytes.toBytes("value"), data);
table1.put(t,put);
}
int key=15;
Get g=new Get(Bytes.toBytes(key));
Result r=table1.get(t,g);
assertTrue("Found a row that should not exist", r.isEmpty());
tm.tryCommit(t);
} catch (Exception e) {
LOG.error("Exception in test", e);
throw e;
}
} | #vulnerable code
@Test public void testMultiPutSameRow() throws Exception {
try{
TransactionManager tm = new TransactionManager(hbaseConf);
TransactionalTable table1 = new TransactionalTable(hbaseConf, TEST_TABLE);
int num=10;
TransactionState t=tm.beginTransaction();
for(int j=0;j<num;j++) {
byte[]data=Bytes.toBytes(j);
Put put=new Put(data);
put.add(Bytes.toBytes(TEST_FAMILY), Bytes.toBytes("value"), data);
table1.put(t,put);
}
int key=15;
Get g=new Get(Bytes.toBytes(key));
Result r=table1.get(t,g);
assertTrue("Found a row that should not exist", r.isEmpty());
tm.tryCommit(t);
} catch (Exception e) {
LOG.error("Exception in test", e);
throw e;
}
}
#location 16
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static TSOState getState(TSOServerConfig config){
TSOState returnValue;
if(!config.isRecoveryEnabled()){
LOG.warn("Logger is disabled");
returnValue = new TSOState(new TimestampOracle());
} else {
BookKeeperStateBuilder builder = new BookKeeperStateBuilder(config);
try{
returnValue = builder.buildState();
LOG.info("State built");
} catch (Throwable e) {
LOG.error("Error while building the state.", e);
returnValue = null;
} finally {
builder.shutdown();
}
}
return returnValue;
} | #vulnerable code
public static TSOState getState(TSOServerConfig config){
TSOState returnValue;
if(config.getZkServers() == null){
LOG.warn("Logger is disabled");
returnValue = new TSOState(new TimestampOracle());
} else {
BookKeeperStateBuilder builder = new BookKeeperStateBuilder(config);
try{
returnValue = builder.buildState();
LOG.info("State built");
} catch (Throwable e) {
LOG.error("Error while building the state.", e);
returnValue = null;
} finally {
builder.shutdown();
}
}
return returnValue;
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void commit(Transaction transaction) throws RollbackException, TransactionException {
if (LOG.isTraceEnabled()) {
LOG.trace("commit " + transaction);
}
if (transaction.getStatus() != Status.RUNNING) {
throw new IllegalArgumentException("Transaction has already been " + transaction.getStatus());
}
// Check rollbackOnly status
if (transaction.isRollbackOnly()) {
rollback(transaction);
throw new RollbackException();
}
// Flush all pending writes
if (!flushTables(transaction)) {
cleanup(transaction);
throw new RollbackException();
}
SyncCommitCallback cb = new SyncCommitCallback();
TimerContext commitTimer = tsoclient.getMetrics().startTimer(Timers.COMMIT);
try {
tsoclient.commit(transaction.getStartTimestamp(), transaction.getRows(), cb);
cb.await();
} catch (Exception e) {
throw new TransactionException("Could not commit", e);
} finally {
commitTimer.stop();
}
if (cb.getException() != null) {
throw new TransactionException("Error committing", cb.getException());
}
if (LOG.isTraceEnabled()) {
LOG.trace("doneCommit " + transaction.getStartTimestamp() + " TS_c: " + cb.getCommitTimestamp()
+ " Success: " + (cb.getResult() == TSOClient.Result.OK));
}
if (cb.getResult() == TSOClient.Result.ABORTED) {
cleanup(transaction);
throw new RollbackException();
}
transaction.setStatus(Status.COMMITTED);
transaction.setCommitTimestamp(cb.getCommitTimestamp());
} | #vulnerable code
public void commit(Transaction transaction) throws RollbackException, TransactionException {
if (LOG.isTraceEnabled()) {
LOG.trace("commit " + transaction);
}
// Check rollbackOnly status
if (transaction.isRollbackOnly()) {
rollback(transaction);
throw new RollbackException();
}
// Flush all pending writes
if (!flushTables(transaction)) {
cleanup(transaction);
throw new RollbackException();
}
SyncCommitCallback cb = new SyncCommitCallback();
TimerContext commitTimer = tsoclient.getMetrics().startTimer(Timers.COMMIT);
try {
tsoclient.commit(transaction.getStartTimestamp(), transaction.getRows(), cb);
cb.await();
} catch (Exception e) {
throw new TransactionException("Could not commit", e);
} finally {
commitTimer.stop();
}
if (cb.getException() != null) {
throw new TransactionException("Error committing", cb.getException());
}
if (LOG.isTraceEnabled()) {
LOG.trace("doneCommit " + transaction.getStartTimestamp() + " TS_c: " + cb.getCommitTimestamp()
+ " Success: " + (cb.getResult() == TSOClient.Result.OK));
}
if (cb.getResult() == TSOClient.Result.ABORTED) {
cleanup(transaction);
throw new RollbackException();
}
transaction.setCommitTimestamp(cb.getCommitTimestamp());
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void runTestWriteWriteConflict() throws Exception {
TransactionManager tm = new TransactionManager(hbaseConf);
TTable tt = new TTable(hbaseConf, TEST_TABLE);
TransactionState t1 = tm.beginTransaction();
LOG.info("Transaction created " + t1);
TransactionState t2 = tm.beginTransaction();
LOG.info("Transaction created" + t2);
byte[] row = Bytes.toBytes("test-simple");
byte[] fam = Bytes.toBytes(TEST_FAMILY);
byte[] col = Bytes.toBytes("testdata");
byte[] data1 = Bytes.toBytes("testWrite-1");
byte[] data2 = Bytes.toBytes("testWrite-2");
Put p = new Put(row);
p.add(fam, col, data1);
tt.put(t1, p);
Put p2 = new Put(row);
p2.add(fam, col, data2);
tt.put(t2, p2);
tm.tryCommit(t2);
boolean aborted = false;
try {
tm.tryCommit(t1);
assertTrue("Transaction commited successfully", false);
} catch (CommitUnsuccessfulException e) {
aborted = true;
}
assertTrue("Transaction didn't raise exception", aborted);
} | #vulnerable code
@Test
public void runTestWriteWriteConflict() throws Exception {
TransactionManager tm = new TransactionManager(hbaseConf);
TransactionalTable tt = new TransactionalTable(hbaseConf, TEST_TABLE);
TransactionState t1 = tm.beginTransaction();
LOG.info("Transaction created " + t1);
TransactionState t2 = tm.beginTransaction();
LOG.info("Transaction created" + t2);
byte[] row = Bytes.toBytes("test-simple");
byte[] fam = Bytes.toBytes(TEST_FAMILY);
byte[] col = Bytes.toBytes("testdata");
byte[] data1 = Bytes.toBytes("testWrite-1");
byte[] data2 = Bytes.toBytes("testWrite-2");
Put p = new Put(row);
p.add(fam, col, data1);
tt.put(t1, p);
Put p2 = new Put(row);
p2.add(fam, col, data2);
tt.put(t2, p2);
tm.tryCommit(t2);
boolean aborted = false;
try {
tm.tryCommit(t1);
assertTrue("Transaction commited successfully", false);
} catch (CommitUnsuccessfulException e) {
aborted = true;
}
assertTrue("Transaction didn't raise exception", aborted);
}
#location 24
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % bucketNumber)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
} | #vulnerable code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % BKT_NUMBER)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
}
#location 14
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public void run() {
// *** Start the Netty configuration ***
// Start server with Nb of active threads = 2*NB CPU + 1 as maximum.
ChannelFactory factory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("worker-%d").build()), (Runtime
.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap bootstrap = new ServerBootstrap(factory);
// Create the global ChannelGroup
ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
// threads max
// int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
int maxThreads = 5;
// Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824,
100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
@Override
public int estimateSize(Object o) {
return 1000;
}
}, new ThreadFactoryBuilder().setNameFormat("executor-%d").build());
// TODO use dependency injection
if (config.getFsLog() != null) {
state = FileSystemTimestampOnlyStateBuilder.getState(config);
} else {
state = BookKeeperStateBuilder.getState(this.config);
}
if (state == null) {
LOG.error("Couldn't build state");
return;
}
state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() {
@Override
public void addRecordComplete(int rc, Object ctx) {
}
}, null);
String metricsConfig = config.getMetrics();
if (metricsConfig != null) {
MetricsUtils.initMetrics(metricsConfig);
}
LOG.info("PARAM MAX_ITEMS: " + state.maxItems);
LOG.info("PARAM BATCH_SIZE: " + state.batchSize);
LOG.info("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR);
LOG.info("PARAM MAX_THREADS: " + maxThreads);
final TSOHandler handler = new TSOHandler(channelGroup, state);
handler.start();
bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
bootstrap.setOption("tcpNoDelay", true);
// setting buffer size can improve I/O
bootstrap.setOption("child.sendBufferSize", 1048576);
bootstrap.setOption("child.receiveBufferSize", 1048576);
// better to have an receive buffer predictor
bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory());
// if the server is sending 1000 messages per sec, optimum write buffer water marks will
// prevent unnecessary throttling, Check NioSocketChannelConfig doc
bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.keepAlive", true);
bootstrap.setOption("child.reuseAddress", true);
bootstrap.setOption("child.connectTimeoutMillis", 60000);
// *** Start the Netty running ***
// Add the parent channel to the group
Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
channelGroup.add(channel);
// Compacter handler
ChannelFactory comFactory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-worker-%d").build()),
(Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
ChannelGroup comGroup = new DefaultChannelGroup("compacter");
final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("decoder", new ObjectDecoder());
pipeline.addLast("encoder", new ObjectEncoder());
pipeline.addLast("handler", comHandler);
return pipeline;
}
});
comBootstrap.setOption("tcpNoDelay", true);
comBootstrap.setOption("child.tcpNoDelay", true);
comBootstrap.setOption("child.keepAlive", true);
comBootstrap.setOption("child.reuseAddress", true);
comBootstrap.setOption("child.connectTimeoutMillis", 100);
comBootstrap.setOption("readWriteFair", true);
channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));
synchronized (lock) {
while (!finish) {
try {
lock.wait();
} catch (InterruptedException e) {
break;
}
}
}
// timestampOracle.stop();
handler.stop();
comHandler.stop();
state.stop();
state = null;
// *** Start the Netty shutdown ***
// Now close all channels
LOG.info("End of channel group");
channelGroup.close().awaitUninterruptibly();
comGroup.close().awaitUninterruptibly();
// Close the executor for Pipeline
LOG.info("End of pipeline executor");
pipelineExecutor.shutdownNow();
// Now release resources
LOG.info("End of resources");
factory.releaseExternalResources();
comFactory.releaseExternalResources();
comBootstrap.releaseExternalResources();
} | #vulnerable code
@Override
public void run() {
// *** Start the Netty configuration ***
// Start server with Nb of active threads = 2*NB CPU + 1 as maximum.
ChannelFactory factory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("worker-%d").build()), (Runtime
.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap bootstrap = new ServerBootstrap(factory);
// Create the global ChannelGroup
ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
// threads max
// int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
int maxThreads = 5;
// Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824,
100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
@Override
public int estimateSize(Object o) {
return 1000;
}
}, new ThreadFactoryBuilder().setNameFormat("executor-%d").build());
// TODO use dependency injection
if (config.getFsLog() != null) {
state = FileSystemTimestampOnlyStateBuilder.getState(config);
} else {
state = BookKeeperStateBuilder.getState(this.config);
}
if (state == null) {
LOG.error("Couldn't build state");
return;
}
state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() {
@Override
public void addRecordComplete(int rc, Object ctx) {
}
}, null);
String metricsConfig = config.getMetrics();
if (metricsConfig != null) {
MetricsUtils.initMetrics(metricsConfig);
}
TSOState.BATCH_SIZE = config.getBatchSize();
LOG.info("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS);
LOG.info("PARAM BATCH_SIZE: " + TSOState.BATCH_SIZE);
LOG.info("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR);
LOG.info("PARAM MAX_THREADS: " + maxThreads);
final TSOHandler handler = new TSOHandler(channelGroup, state);
handler.start();
bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
bootstrap.setOption("tcpNoDelay", true);
// setting buffer size can improve I/O
bootstrap.setOption("child.sendBufferSize", 1048576);
bootstrap.setOption("child.receiveBufferSize", 1048576);
// better to have an receive buffer predictor
bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory());
// if the server is sending 1000 messages per sec, optimum write buffer water marks will
// prevent unnecessary throttling, Check NioSocketChannelConfig doc
bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.keepAlive", true);
bootstrap.setOption("child.reuseAddress", true);
bootstrap.setOption("child.connectTimeoutMillis", 60000);
// *** Start the Netty running ***
// Add the parent channel to the group
Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
channelGroup.add(channel);
// Compacter handler
ChannelFactory comFactory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-worker-%d").build()),
(Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
ChannelGroup comGroup = new DefaultChannelGroup("compacter");
final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("decoder", new ObjectDecoder());
pipeline.addLast("encoder", new ObjectEncoder());
pipeline.addLast("handler", comHandler);
return pipeline;
}
});
comBootstrap.setOption("tcpNoDelay", true);
comBootstrap.setOption("child.tcpNoDelay", true);
comBootstrap.setOption("child.keepAlive", true);
comBootstrap.setOption("child.reuseAddress", true);
comBootstrap.setOption("child.connectTimeoutMillis", 100);
comBootstrap.setOption("readWriteFair", true);
channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));
synchronized (lock) {
while (!finish) {
try {
lock.wait();
} catch (InterruptedException e) {
break;
}
}
}
// timestampOracle.stop();
handler.stop();
comHandler.stop();
state.stop();
state = null;
// *** Start the Netty shutdown ***
// Now close all channels
LOG.info("End of channel group");
channelGroup.close().awaitUninterruptibly();
comGroup.close().awaitUninterruptibly();
// Close the executor for Pipeline
LOG.info("End of pipeline executor");
pipelineExecutor.shutdownNow();
// Now release resources
LOG.info("End of resources");
factory.releaseExternalResources();
comFactory.releaseExternalResources();
comBootstrap.releaseExternalResources();
}
#location 48
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test(timeOut = 30_000)
public void runTestInterleaveScanWhenATransactionAborts() throws Exception {
TransactionManager tm = newTransactionManager();
TTable tt = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
LOG.info("Transaction created " + t1);
byte[] fam = Bytes.toBytes(TEST_FAMILY);
byte[] col = Bytes.toBytes("testdata");
byte[] data1 = Bytes.toBytes("testWrite-1");
byte[] data2 = Bytes.toBytes("testWrite-2");
byte[] startrow = Bytes.toBytes("test-scan" + 0);
byte[] stoprow = Bytes.toBytes("test-scan" + 9);
byte[] modrow = Bytes.toBytes("test-scan" + 3);
for (int i = 0; i < 10; i++) {
byte[] row = Bytes.toBytes("test-scan" + i);
Put p = new Put(row);
p.add(fam, col, data1);
tt.put(t1, p);
}
tm.commit(t1);
Transaction t2 = tm.begin();
Put p = new Put(modrow);
p.add(fam, col, data2);
tt.put(t2, p);
int modifiedrows = 0;
ResultScanner rs = tt.getScanner(t2, new Scan().setStartRow(startrow).setStopRow(stoprow).addColumn(fam, col));
Result r = rs.next();
while (r != null) {
if (Bytes.equals(data2, r.getValue(fam, col))) {
if (LOG.isTraceEnabled()) {
LOG.trace("Modified :" + Bytes.toString(r.getRow()));
}
modifiedrows++;
}
r = rs.next();
}
assertTrue(modifiedrows == 1, "Expected 1 row modified, but " + modifiedrows + " are.");
tm.rollback(t2);
Transaction tscan = tm.begin();
rs = tt.getScanner(tscan, new Scan().setStartRow(startrow).setStopRow(stoprow).addColumn(fam, col));
r = rs.next();
while (r != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan1 :" + Bytes.toString(r.getRow()) + " => " + Bytes.toString(r.getValue(fam, col)));
}
assertTrue(Bytes.equals(data1, r.getValue(fam, col)),
"Unexpected value for SI scan " + tscan + ": " + Bytes.toString(r.getValue(fam, col)));
r = rs.next();
}
} | #vulnerable code
@Test(timeOut = 30_000)
public void runTestInterleaveScanWhenATransactionAborts() throws Exception {
TransactionManager tm = newTransactionManager();
TTable tt = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
LOG.info("Transaction created " + t1);
byte[] fam = Bytes.toBytes(TEST_FAMILY);
byte[] col = Bytes.toBytes("testdata");
byte[] data1 = Bytes.toBytes("testWrite-1");
byte[] data2 = Bytes.toBytes("testWrite-2");
byte[] startrow = Bytes.toBytes("test-scan" + 0);
byte[] stoprow = Bytes.toBytes("test-scan" + 9);
byte[] modrow = Bytes.toBytes("test-scan" + 3);
for (int i = 0; i < 10; i++) {
byte[] row = Bytes.toBytes("test-scan" + i);
Put p = new Put(row);
p.add(fam, col, data1);
tt.put(t1, p);
}
tm.commit(t1);
Transaction t2 = tm.begin();
Put p = new Put(modrow);
p.add(fam, col, data2);
tt.put(t2, p);
int modifiedrows = 0;
ResultScanner rs = tt.getScanner(t2, new Scan().setStartRow(startrow).setStopRow(stoprow).addColumn(fam, col));
Result r = rs.next();
while (r != null) {
if (Bytes.equals(data2, r.getValue(fam, col))) {
if (LOG.isTraceEnabled()) {
LOG.trace("Modified :" + Bytes.toString(r.getRow()));
}
modifiedrows++;
}
r = rs.next();
}
assertTrue(modifiedrows == 1, "Expected 1 row modified, but " + modifiedrows + " are.");
tm.rollback(t2);
Transaction tscan = tm.begin();
rs = tt.getScanner(tscan, new Scan().setStartRow(startrow).setStopRow(stoprow).addColumn(fam, col));
r = rs.next();
while (r != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan1 :" + Bytes.toString(r.getRow()) + " => " + Bytes.toString(r.getValue(fam, col)));
}
assertTrue(Bytes.equals(data1, r.getValue(fam, col)),
"Unexpected value for SI scan " + tscan + ": " + Bytes.toString(r.getValue(fam, col)));
r = rs.next();
}
}
#location 46
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test(timeout = 60000)
public void testTimestampRequestSucceedWithMultipleTimeouts() throws Exception {
Configuration clientConfiguration = getClientConfiguration();
clientConfiguration.setProperty(TSOClient.REQUEST_TIMEOUT_IN_MS_CONFKEY, 100);
clientConfiguration.setProperty(TSOClient.REQUEST_MAX_RETRIES_CONFKEY, 10000);
TSOClient client = TSOClient.newBuilder().withConfiguration(clientConf).build();
pauseTSO();
Future<Long> future = client.createTransaction();
while(!isTsoBlockingRequest()) {}
Thread.sleep(1000);
resumeTSO();
future.get();
} | #vulnerable code
@Test(timeout = 60000)
public void testTimestampRequestSucceedWithMultipleTimeouts() throws Exception {
Configuration clientConfiguration = getClientConfiguration();
clientConfiguration.setProperty(TSOClient.REQUEST_TIMEOUT_IN_MS_CONFKEY, 100);
clientConfiguration.setProperty(TSOClient.REQUEST_MAX_RETRIES_CONFKEY, 10000);
TSOClient client = TSOClient.newBuilder().withConfiguration(clientConf)
.withCommitTableClient(getCommitTable().getClient().get()).build();
pauseTSO();
Future<Long> future = client.createTransaction();
while(!isTsoBlockingRequest()) {}
Thread.sleep(1000);
resumeTSO();
future.get();
}
#location 8
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testShadowCellsBasics() throws Exception {
TransactionManager tm = newTransactionManager();
TTable table = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
// Test shadow cell are created properly
Put put = new Put(row);
put.add(family, qualifier, data1);
table.put(t1, put);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertFalse("Shadow cell shouldn't be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
tm.commit(t1);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertTrue("Shadow cell should be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
// Test that we can make a valid read after adding a shadow cell without hitting the commit table
CommitTable.Client commitTableClient = spy(getTSO().getCommitTable().getClient().get());
TSOClient client = TSOClient.newBuilder().withConfiguration(getTSO().getClientConfiguration())
.build();
TransactionManager tm2 = TransactionManager.newBuilder()
.withConfiguration(hbaseConf).withTSOClient(client)
.withCommitTableClient(commitTableClient).build();
Transaction t2 = tm2.begin();
Get get = new Get(row);
get.addColumn(family, qualifier);
Result getResult = table.get(t2, get);
assertTrue("Values should be the same", Arrays.equals(data1, getResult.getValue(family, qualifier)));
verify(commitTableClient, never()).getCommitTimestamp(anyLong());
} | #vulnerable code
@Test
public void testShadowCellsBasics() throws Exception {
TransactionManager tm = newTransactionManager();
TTable table = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
// Test shadow cell are created properly
Put put = new Put(row);
put.add(family, qualifier, data1);
table.put(t1, put);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertFalse("Shadow cell shouldn't be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
tm.commit(t1);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertTrue("Shadow cell should be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
// Test that we can make a valid read after adding a shadow cell without hitting the commit table
CommitTable.Client commitTableClient = spy(getTSO().getCommitTable().getClient().get());
TSOClient client = TSOClient.newBuilder().withConfiguration(getTSO().getClientConfiguration())
.withCommitTableClient(commitTableClient).build();
TransactionManager tm2 = TransactionManager.newBuilder()
.withConfiguration(hbaseConf).withTSOClient(client).build();
Transaction t2 = tm2.begin();
Get get = new Get(row);
get.addColumn(family, qualifier);
Result getResult = table.get(t2, get);
assertTrue("Values should be the same", Arrays.equals(data1, getResult.getValue(family, qualifier)));
verify(commitTableClient, never()).getCommitTimestamp(anyLong());
}
#location 28
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % bucketNumber)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
} | #vulnerable code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % BKT_NUMBER)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
}
#location 2
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test(timeout = 60000)
public void testCommitCanSucceedWithMultipleTimeouts() throws Exception {
Configuration clientConfiguration = getClientConfiguration();
clientConfiguration.setProperty(TSOClient.REQUEST_TIMEOUT_IN_MS_CONFKEY, 100);
clientConfiguration.setProperty(TSOClient.REQUEST_MAX_RETRIES_CONFKEY, 10000);
TSOClient client = TSOClient.newBuilder().withConfiguration(clientConf).build();
long ts1 = client.createTransaction().get();
pauseTSO();
TSOFuture<Long> future = client.commit(ts1, Sets.newSet(c1, c2));
while(!isTsoBlockingRequest()) {}
Thread.sleep(1000);
resumeTSO();
future.get();
} | #vulnerable code
@Test(timeout = 60000)
public void testCommitCanSucceedWithMultipleTimeouts() throws Exception {
Configuration clientConfiguration = getClientConfiguration();
clientConfiguration.setProperty(TSOClient.REQUEST_TIMEOUT_IN_MS_CONFKEY, 100);
clientConfiguration.setProperty(TSOClient.REQUEST_MAX_RETRIES_CONFKEY, 10000);
TSOClient client = TSOClient.newBuilder().withConfiguration(clientConf)
.withCommitTableClient(getCommitTable().getClient().get()).build();
long ts1 = client.createTransaction().get();
pauseTSO();
TSOFuture<Long> future = client.commit(ts1, Sets.newSet(c1, c2));
while(!isTsoBlockingRequest()) {}
Thread.sleep(1000);
resumeTSO();
future.get();
}
#location 8
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public void run() {
// *** Start the Netty configuration ***
// Start server with Nb of active threads = 2*NB CPU + 1 as maximum.
ChannelFactory factory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("worker-%d").build()), (Runtime
.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap bootstrap = new ServerBootstrap(factory);
// Create the global ChannelGroup
ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
// threads max
// int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
int maxThreads = 5;
// Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824,
100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
@Override
public int estimateSize(Object o) {
return 1000;
}
}, new ThreadFactoryBuilder().setNameFormat("executor-%d").build());
// TODO use dependency injection
if (config.getFsLog() != null) {
state = FileSystemTimestampOnlyStateBuilder.getState(config);
} else {
state = BookKeeperStateBuilder.getState(this.config);
}
if (state == null) {
LOG.error("Couldn't build state");
return;
}
state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() {
@Override
public void addRecordComplete(int rc, Object ctx) {
}
}, null);
String metricsConfig = config.getMetrics();
if (metricsConfig != null) {
MetricsUtils.initMetrics(metricsConfig);
}
LOG.info("PARAM MAX_ITEMS: " + state.maxItems);
LOG.info("PARAM BATCH_SIZE: " + state.batchSize);
LOG.info("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR);
LOG.info("PARAM MAX_THREADS: " + maxThreads);
final TSOHandler handler = new TSOHandler(channelGroup, state);
handler.start();
bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
bootstrap.setOption("tcpNoDelay", true);
// setting buffer size can improve I/O
bootstrap.setOption("child.sendBufferSize", 1048576);
bootstrap.setOption("child.receiveBufferSize", 1048576);
// better to have an receive buffer predictor
bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory());
// if the server is sending 1000 messages per sec, optimum write buffer water marks will
// prevent unnecessary throttling, Check NioSocketChannelConfig doc
bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.keepAlive", true);
bootstrap.setOption("child.reuseAddress", true);
bootstrap.setOption("child.connectTimeoutMillis", 60000);
// *** Start the Netty running ***
// Add the parent channel to the group
Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
channelGroup.add(channel);
// Compacter handler
ChannelFactory comFactory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-worker-%d").build()),
(Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
ChannelGroup comGroup = new DefaultChannelGroup("compacter");
final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("decoder", new ObjectDecoder());
pipeline.addLast("encoder", new ObjectEncoder());
pipeline.addLast("handler", comHandler);
return pipeline;
}
});
comBootstrap.setOption("tcpNoDelay", true);
comBootstrap.setOption("child.tcpNoDelay", true);
comBootstrap.setOption("child.keepAlive", true);
comBootstrap.setOption("child.reuseAddress", true);
comBootstrap.setOption("child.connectTimeoutMillis", 100);
comBootstrap.setOption("readWriteFair", true);
channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));
synchronized (lock) {
while (!finish) {
try {
lock.wait();
} catch (InterruptedException e) {
break;
}
}
}
// timestampOracle.stop();
handler.stop();
comHandler.stop();
state.stop();
state = null;
// *** Start the Netty shutdown ***
// Now close all channels
LOG.info("End of channel group");
channelGroup.close().awaitUninterruptibly();
comGroup.close().awaitUninterruptibly();
// Close the executor for Pipeline
LOG.info("End of pipeline executor");
pipelineExecutor.shutdownNow();
// Now release resources
LOG.info("End of resources");
factory.releaseExternalResources();
comFactory.releaseExternalResources();
comBootstrap.releaseExternalResources();
} | #vulnerable code
@Override
public void run() {
// *** Start the Netty configuration ***
// Start server with Nb of active threads = 2*NB CPU + 1 as maximum.
ChannelFactory factory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("worker-%d").build()), (Runtime
.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap bootstrap = new ServerBootstrap(factory);
// Create the global ChannelGroup
ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
// threads max
// int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
int maxThreads = 5;
// Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824,
100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
@Override
public int estimateSize(Object o) {
return 1000;
}
}, new ThreadFactoryBuilder().setNameFormat("executor-%d").build());
// TODO use dependency injection
if (config.getFsLog() != null) {
state = FileSystemTimestampOnlyStateBuilder.getState(config);
} else {
state = BookKeeperStateBuilder.getState(this.config);
}
if (state == null) {
LOG.error("Couldn't build state");
return;
}
state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() {
@Override
public void addRecordComplete(int rc, Object ctx) {
}
}, null);
String metricsConfig = config.getMetrics();
if (metricsConfig != null) {
MetricsUtils.initMetrics(metricsConfig);
}
TSOState.BATCH_SIZE = config.getBatchSize();
LOG.info("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS);
LOG.info("PARAM BATCH_SIZE: " + TSOState.BATCH_SIZE);
LOG.info("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR);
LOG.info("PARAM MAX_THREADS: " + maxThreads);
final TSOHandler handler = new TSOHandler(channelGroup, state);
handler.start();
bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
bootstrap.setOption("tcpNoDelay", true);
// setting buffer size can improve I/O
bootstrap.setOption("child.sendBufferSize", 1048576);
bootstrap.setOption("child.receiveBufferSize", 1048576);
// better to have an receive buffer predictor
bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory());
// if the server is sending 1000 messages per sec, optimum write buffer water marks will
// prevent unnecessary throttling, Check NioSocketChannelConfig doc
bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.keepAlive", true);
bootstrap.setOption("child.reuseAddress", true);
bootstrap.setOption("child.connectTimeoutMillis", 60000);
// *** Start the Netty running ***
// Add the parent channel to the group
Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
channelGroup.add(channel);
// Compacter handler
ChannelFactory comFactory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-boss-%d").build()),
Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("compacter-worker-%d").build()),
(Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
ChannelGroup comGroup = new DefaultChannelGroup("compacter");
final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("decoder", new ObjectDecoder());
pipeline.addLast("encoder", new ObjectEncoder());
pipeline.addLast("handler", comHandler);
return pipeline;
}
});
comBootstrap.setOption("tcpNoDelay", true);
comBootstrap.setOption("child.tcpNoDelay", true);
comBootstrap.setOption("child.keepAlive", true);
comBootstrap.setOption("child.reuseAddress", true);
comBootstrap.setOption("child.connectTimeoutMillis", 100);
comBootstrap.setOption("readWriteFair", true);
channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));
synchronized (lock) {
while (!finish) {
try {
lock.wait();
} catch (InterruptedException e) {
break;
}
}
}
// timestampOracle.stop();
handler.stop();
comHandler.stop();
state.stop();
state = null;
// *** Start the Netty shutdown ***
// Now close all channels
LOG.info("End of channel group");
channelGroup.close().awaitUninterruptibly();
comGroup.close().awaitUninterruptibly();
// Close the executor for Pipeline
LOG.info("End of pipeline executor");
pipelineExecutor.shutdownNow();
// Now release resources
LOG.info("End of resources");
factory.releaseExternalResources();
comFactory.releaseExternalResources();
comBootstrap.releaseExternalResources();
}
#location 50
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testCrashAfterCommit() throws Exception {
CommitTable.Client commitTableClient = spy(getTSO().getCommitTable().getClient().get());
TSOClient client = TSOClient.newBuilder().withConfiguration(getTSO().getClientConfiguration())
.build();
TransactionManager tm = spy(TransactionManager.newBuilder()
.withConfiguration(hbaseConf)
.withCommitTableClient(commitTableClient)
.withTSOClient(client).build());
doNothing().when(tm).postCommit(any(Transaction.class));
TTable table = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
// Test shadow cell are created properly
Put put = new Put(row);
put.add(family, qualifier, data1);
table.put(t1, put);
tm.commit(t1);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertFalse("Shadow cell should not be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
Transaction t2 = tm.begin();
Get get = new Get(row);
get.addColumn(family, qualifier);
Result getResult = table.get(t2, get);
assertTrue("Values should be the same", Arrays.equals(data1, getResult.getValue(family, qualifier)));
verify(commitTableClient, times(1)).getCommitTimestamp(anyLong());
} | #vulnerable code
@Test
public void testCrashAfterCommit() throws Exception {
CommitTable.Client commitTableClient = spy(getTSO().getCommitTable().getClient().get());
TSOClient client = TSOClient.newBuilder().withConfiguration(getTSO().getClientConfiguration())
.withCommitTableClient(commitTableClient).build();
TransactionManager tm = spy(TransactionManager.newBuilder()
.withConfiguration(hbaseConf).withTSOClient(client).build());
doNothing().when(tm).postCommit(any(Transaction.class));
TTable table = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
// Test shadow cell are created properly
Put put = new Put(row);
put.add(family, qualifier, data1);
table.put(t1, put);
tm.commit(t1);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertFalse("Shadow cell should not be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
Transaction t2 = tm.begin();
Get get = new Get(row);
get.addColumn(family, qualifier);
Result getResult = table.get(t2, get);
assertTrue("Values should be the same", Arrays.equals(data1, getResult.getValue(family, qualifier)));
verify(commitTableClient, times(1)).getCommitTimestamp(anyLong());
}
#location 6
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % bucketNumber)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
} | #vulnerable code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % BKT_NUMBER)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void stop() {
for (NotificationClient client : clients.values()) {
client.stop();
}
} | #vulnerable code
public void stop() {
if (clientThread != null) {
clientThread.interrupt();
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testBasicBehaviour() throws Throwable {
HBaseCommitTable commitTable = new HBaseCommitTable(hbaseConf, TEST_TABLE);
ListenableFuture<Writer> futureWriter = commitTable.getWriter();
Writer writer = futureWriter.get();
ListenableFuture<Client> futureClient = commitTable.getClient();
Client client = futureClient.get();
// Test that the first time the table is empty
assertEquals("Rows should be 0!", 0, rowCount());
// Test the successful creation of 1000 txs in the table
for (int i = 0; i < 1000; i++) {
writer.addCommittedTransaction(i, i + 1);
}
writer.flush().get();
assertEquals("Rows should be 1000!", 1000, rowCount());
// Test the we get the right commit timestamps for each previously inserted tx
for (long i = 0; i < 1000; i++) {
ListenableFuture<Optional<Long>> ctf = client.getCommitTimestamp(i);
Optional<Long> optional = ctf.get();
Long ct = optional.get();
assertEquals("Commit timestamp should be " + (i + 1), (i + 1), (long) ct);
}
assertEquals("Rows should be 1000!", 1000, rowCount());
// Test the successful deletion of the 1000 txs
Future<Void> f = null;
for (long i = 0; i < 1000; i++) {
f = client.completeTransaction(i);
}
f.get();
assertEquals("Rows should be 0!", 0, rowCount());
// Test we don't get a commit timestamp for a non-existent transaction id in the table
ListenableFuture<Optional<Long>> ctf = client.getCommitTimestamp(0);
Optional<Long> optional = ctf.get();
assertFalse("Commit timestamp should not be present", optional.isPresent());
} | #vulnerable code
@Test
public void testBasicBehaviour() throws Throwable {
HTable table = new HTable(hbaseConf, TEST_TABLE);
HBaseCommitTable commitTable = new HBaseCommitTable(table);
ListenableFuture<Writer> futureWriter = commitTable.getWriter();
Writer writer = futureWriter.get();
ListenableFuture<Client> futureClient = commitTable.getClient();
Client client = futureClient.get();
// Test that the first time the table is empty
assertEquals("Rows should be 0!", 0, rowCount());
// Test the successful creation of 1000 txs in the table
for (int i = 0; i < 1000; i++) {
writer.addCommittedTransaction(i, i + 1);
}
writer.flush().get();
assertEquals("Rows should be 1000!", 1000, rowCount());
// Test the we get the right commit timestamps for each previously inserted tx
for (long i = 0; i < 1000; i++) {
ListenableFuture<Optional<Long>> ctf = client.getCommitTimestamp(i);
Optional<Long> optional = ctf.get();
Long ct = optional.get();
assertEquals("Commit timestamp should be " + (i + 1), (i + 1), (long) ct);
}
assertEquals("Rows should be 1000!", 1000, rowCount());
// Test the successful deletion of the 1000 txs
Future<Void> f = null;
for (long i = 0; i < 1000; i++) {
f = client.completeTransaction(i);
}
f.get();
assertEquals("Rows should be 0!", 0, rowCount());
// Test we don't get a commit timestamp for a non-existent transaction id in the table
ListenableFuture<Optional<Long>> ctf = client.getCommitTimestamp(0);
Optional<Long> optional = ctf.get();
assertFalse("Commit timestamp should not be present", optional.isPresent());
}
#location 8
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testShadowCellsBasics() throws Exception {
TransactionManager tm = newTransactionManager();
TTable table = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
// Test shadow cell are created properly
Put put = new Put(row);
put.add(family, qualifier, data1);
table.put(t1, put);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertFalse("Shadow cell shouldn't be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
tm.commit(t1);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertTrue("Shadow cell should be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
// Test that we can make a valid read after adding a shadow cell without hitting the commit table
CommitTable.Client commitTableClient = spy(getTSO().getCommitTable().getClient().get());
TSOClient client = TSOClient.newBuilder().withConfiguration(getTSO().getClientConfiguration())
.build();
TransactionManager tm2 = TransactionManager.newBuilder()
.withConfiguration(hbaseConf).withTSOClient(client)
.withCommitTableClient(commitTableClient).build();
Transaction t2 = tm2.begin();
Get get = new Get(row);
get.addColumn(family, qualifier);
Result getResult = table.get(t2, get);
assertTrue("Values should be the same", Arrays.equals(data1, getResult.getValue(family, qualifier)));
verify(commitTableClient, never()).getCommitTimestamp(anyLong());
} | #vulnerable code
@Test
public void testShadowCellsBasics() throws Exception {
TransactionManager tm = newTransactionManager();
TTable table = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
// Test shadow cell are created properly
Put put = new Put(row);
put.add(family, qualifier, data1);
table.put(t1, put);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertFalse("Shadow cell shouldn't be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
tm.commit(t1);
assertTrue("Cell should be there",
hasCell(table, row, family, qualifier, t1.getStartTimestamp()));
assertTrue("Shadow cell should be there",
hasShadowCell(table, row, family, qualifier, t1.getStartTimestamp()));
// Test that we can make a valid read after adding a shadow cell without hitting the commit table
CommitTable.Client commitTableClient = spy(getTSO().getCommitTable().getClient().get());
TSOClient client = TSOClient.newBuilder().withConfiguration(getTSO().getClientConfiguration())
.withCommitTableClient(commitTableClient).build();
TransactionManager tm2 = TransactionManager.newBuilder()
.withConfiguration(hbaseConf).withTSOClient(client).build();
Transaction t2 = tm2.begin();
Get get = new Get(row);
get.addColumn(family, qualifier);
Result getResult = table.get(t2, get);
assertTrue("Values should be the same", Arrays.equals(data1, getResult.getValue(family, qualifier)));
verify(commitTableClient, never()).getCommitTimestamp(anyLong());
}
#location 17
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % bucketNumber)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
} | #vulnerable code
public Set<Long> raiseLargestDeletedTransaction(long id) {
if (firstUncommitedAbsolute > getAbsolutePosition(id))
return Collections.emptySet();
int maxBucket = getRelativePosition(id);
Set<Long> aborted = new TreeSet<Long>();
for (int i = firstUncommitedBucket; i != maxBucket ; i = (int)((i+1) % BKT_NUMBER)) {
Bucket bucket = buckets[i];
if (bucket != null) {
aborted.addAll(bucket.abortAllUncommited());
buckets[i] = null;
}
}
Bucket bucket = buckets[maxBucket];
if (bucket != null) {
aborted.addAll(bucket.abortUncommited(id));
}
increaseFirstUncommitedBucket();
return aborted;
}
#location 6
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static void main(String[] args) throws Exception {
Config config = new Config();
new JCommander(config, args);
Configuration hbaseConfig = HBaseConfiguration.create();
final KeyGenerator keygen;
if (config.fullRandomAlgo) {
keygen = new FullRandomKeyGenerator();
} else if (config.badRandomAlgo) {
keygen = new BadRandomKeyGenerator();
} else if (config.bucketingAlgo) {
keygen = new BucketKeyGenerator();
} else if (config.seqAlgo) {
keygen = new SeqKeyGenerator();
} else {
keygen = null;
assert (false);
}
CommitTable commitTable = new HBaseCommitTable(hbaseConfig,
COMMIT_TABLE_DEFAULT_NAME, keygen);
CommitTable.Writer writer = commitTable.getWriter().get();
MetricRegistry metrics = new MetricRegistry();
if (config.graphite != null) {
String parts[] = config.graphite.split(":");
String host = parts[0];
Integer port = Integer.valueOf(parts[1]);
final Graphite graphite = new Graphite(new InetSocketAddress(host, port));
final GraphiteReporter reporter = GraphiteReporter.forRegistry(metrics)
.prefixedWith("omid-hbase." + keygen.getClass().getSimpleName())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
reporter.start(10, TimeUnit.SECONDS);
}
final ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build();
reporter.start(10, TimeUnit.SECONDS);
Timer flushTimer = metrics.timer("flush");
Meter commitsMeter = metrics.meter("commits");
int i = 0;
long ts = 0;
while (true) {
writer.addCommittedTransaction(ts++, ts++);
if (i++ == config.batchSize) {
commitsMeter.mark(i);
long start = System.nanoTime();
writer.flush().get();
flushTimer.update((System.nanoTime() - start), TimeUnit.NANOSECONDS);
i = 0;
}
}
} | #vulnerable code
public static void main(String[] args) throws Exception {
Config config = new Config();
new JCommander(config, args);
Configuration hbaseConfig = HBaseConfiguration.create();
HTable commitHTable = new HTable(hbaseConfig, COMMIT_TABLE_DEFAULT_NAME);
if (config.writeBufferSize != -1) {
commitHTable.setWriteBufferSize(config.writeBufferSize);
}
final KeyGenerator keygen;
if (config.fullRandomAlgo) {
keygen = new FullRandomKeyGenerator();
} else if (config.badRandomAlgo) {
keygen = new BadRandomKeyGenerator();
} else if (config.bucketingAlgo) {
keygen = new BucketKeyGenerator();
} else if (config.seqAlgo) {
keygen = new SeqKeyGenerator();
} else {
keygen = null;
assert (false);
}
CommitTable commitTable = new HBaseCommitTable(commitHTable, keygen);
CommitTable.Writer writer = commitTable.getWriter().get();
MetricRegistry metrics = new MetricRegistry();
if (config.graphite != null) {
String parts[] = config.graphite.split(":");
String host = parts[0];
Integer port = Integer.valueOf(parts[1]);
final Graphite graphite = new Graphite(new InetSocketAddress(host, port));
final GraphiteReporter reporter = GraphiteReporter.forRegistry(metrics)
.prefixedWith("omid-hbase." + keygen.getClass().getSimpleName())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
reporter.start(10, TimeUnit.SECONDS);
}
final ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build();
reporter.start(10, TimeUnit.SECONDS);
Timer flushTimer = metrics.timer("flush");
Meter commitsMeter = metrics.meter("commits");
int i = 0;
long ts = 0;
while (true) {
writer.addCommittedTransaction(ts++, ts++);
if (i++ == config.batchSize) {
commitsMeter.mark(i);
long start = System.nanoTime();
writer.flush().get();
flushTimer.update((System.nanoTime() - start), TimeUnit.NANOSECONDS);
i = 0;
}
}
}
#location 22
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test(timeOut = 30_000)
public void runTestInterleaveScan() throws Exception {
TransactionManager tm = newTransactionManager();
TTable tt = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
LOG.info("Transaction created " + t1);
byte[] fam = Bytes.toBytes(TEST_FAMILY);
byte[] col = Bytes.toBytes("testdata");
byte[] data1 = Bytes.toBytes("testWrite-1");
byte[] data2 = Bytes.toBytes("testWrite-2");
byte[] startrow = Bytes.toBytes("test-scan" + 0);
byte[] stoprow = Bytes.toBytes("test-scan" + 9);
byte[] modrow = Bytes.toBytes("test-scan" + 3);
for (int i = 0; i < 10; i++) {
byte[] row = Bytes.toBytes("test-scan" + i);
Put p = new Put(row);
p.add(fam, col, data1);
tt.put(t1, p);
}
tm.commit(t1);
Transaction t2 = tm.begin();
Put p = new Put(modrow);
p.add(fam, col, data2);
tt.put(t2, p);
Transaction tscan = tm.begin();
ResultScanner rs = tt.getScanner(tscan, new Scan().setStartRow(startrow).setStopRow(stoprow));
Result r = rs.next();
int i = 0;
while (r != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan1 :" + Bytes.toString(r.getRow()) + " => " + Bytes.toString(r.getValue(fam, col)));
}
LOG.debug("" + ++i);
assertTrue(Bytes.equals(data1, r.getValue(fam, col)),
"Unexpected value for SI scan " + tscan + ": " + Bytes.toString(r.getValue(fam, col)));
r = rs.next();
}
tm.commit(t2);
int modifiedrows = 0;
tscan = tm.begin();
rs = tt.getScanner(tscan, new Scan().setStartRow(startrow).setStopRow(stoprow));
r = rs.next();
while (r != null) {
if (Bytes.equals(data2, r.getValue(fam, col))) {
if (LOG.isTraceEnabled()) {
LOG.trace("Modified :" + Bytes.toString(r.getRow()));
}
modifiedrows++;
}
r = rs.next();
}
assertTrue(modifiedrows == 1, "Expected 1 row modified, but " + modifiedrows + " are.");
} | #vulnerable code
@Test(timeOut = 30_000)
public void runTestInterleaveScan() throws Exception {
TransactionManager tm = newTransactionManager();
TTable tt = new TTable(hbaseConf, TEST_TABLE);
Transaction t1 = tm.begin();
LOG.info("Transaction created " + t1);
byte[] fam = Bytes.toBytes(TEST_FAMILY);
byte[] col = Bytes.toBytes("testdata");
byte[] data1 = Bytes.toBytes("testWrite-1");
byte[] data2 = Bytes.toBytes("testWrite-2");
byte[] startrow = Bytes.toBytes("test-scan" + 0);
byte[] stoprow = Bytes.toBytes("test-scan" + 9);
byte[] modrow = Bytes.toBytes("test-scan" + 3);
for (int i = 0; i < 10; i++) {
byte[] row = Bytes.toBytes("test-scan" + i);
Put p = new Put(row);
p.add(fam, col, data1);
tt.put(t1, p);
}
tm.commit(t1);
Transaction t2 = tm.begin();
Put p = new Put(modrow);
p.add(fam, col, data2);
tt.put(t2, p);
Transaction tscan = tm.begin();
ResultScanner rs = tt.getScanner(tscan, new Scan().setStartRow(startrow).setStopRow(stoprow));
Result r = rs.next();
int i = 0;
while (r != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan1 :" + Bytes.toString(r.getRow()) + " => " + Bytes.toString(r.getValue(fam, col)));
}
LOG.debug("" + ++i);
assertTrue(Bytes.equals(data1, r.getValue(fam, col)),
"Unexpected value for SI scan " + tscan + ": " + Bytes.toString(r.getValue(fam, col)));
r = rs.next();
}
tm.commit(t2);
int modifiedrows = 0;
tscan = tm.begin();
rs = tt.getScanner(tscan, new Scan().setStartRow(startrow).setStopRow(stoprow));
r = rs.next();
while (r != null) {
if (Bytes.equals(data2, r.getValue(fam, col))) {
if (LOG.isTraceEnabled()) {
LOG.trace("Modified :" + Bytes.toString(r.getRow()));
}
modifiedrows++;
}
r = rs.next();
}
assertTrue(modifiedrows == 1, "Expected 1 row modified, but " + modifiedrows + " are.");
}
#location 63
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public TSOState buildState()
throws LoggerException {
try{
CountDownLatch latch = new CountDownLatch(1);
this.zk = new ZooKeeper(config.getZkServers(),
Integer.parseInt(System.getProperty("SESSIONTIMEOUT", Integer.toString(10000))),
new LoggerWatcher(latch));
latch.await();
} catch (Exception e) {
LOG.error("Exception while starting zookeeper client", e);
this.zk = null;
throw LoggerException.create(Code.ZKOPFAILED);
}
LOG.info("Creating bookkeeper client");
try{
bk = new BookKeeper(new ClientConfiguration(), this.zk);
} catch (Exception e) {
LOG.error("Error while creating bookkeeper object", e);
return null;
}
/*
* Create ZooKeeper lock
*/
Context ctx = new Context();
ctx.config = this.config;
zk.create(LoggerConstants.OMID_LOCK_PATH,
new byte[0],
Ids.OPEN_ACL_UNSAFE,
CreateMode.EPHEMERAL,
new LockCreateCallback(),
ctx);
new BookKeeperStateLogger(zk).initialize(new LoggerInitCallback(){
public void loggerInitComplete(int rc, StateLogger sl, Object ctx){
if(rc == Code.OK){
if(LOG.isDebugEnabled()){
LOG.debug("Logger is ok.");
}
((Context) ctx).setLogger(sl);
} else {
LOG.error("Error when initializing logger: " + LoggerException.getMessage(rc));
}
}
}, ctx);
try{
synchronized(ctx){
if(!ctx.isReady()){
// TODO make configurable maximum waiting
ctx.wait();
}
}
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for state to build up.", e);
ctx.setState(null);
}
return ctx.state;
} | #vulnerable code
@Override
public TSOState buildState()
throws LoggerException {
try{
CountDownLatch latch = new CountDownLatch(1);
this.zk = new ZooKeeper(config.getZkServers(),
Integer.parseInt(System.getProperty("SESSIONTIMEOUT", Integer.toString(10000))),
new LoggerWatcher(latch));
latch.await();
} catch (Exception e) {
LOG.error("Exception while starting zookeeper client", e);
this.zk = null;
throw LoggerException.create(Code.ZKOPFAILED);
}
LOG.info("Creating bookkeeper client");
try{
bk = new BookKeeper(new ClientConfiguration(), this.zk);
} catch (Exception e) {
LOG.error("Error while creating bookkeeper object", e);
return null;
}
/*
* Create ZooKeeper lock
*/
Context ctx = new Context();
zk.create(LoggerConstants.OMID_LOCK_PATH,
new byte[0],
Ids.OPEN_ACL_UNSAFE,
CreateMode.EPHEMERAL,
new LockCreateCallback(),
ctx);
new BookKeeperStateLogger(zk).initialize(new LoggerInitCallback(){
public void loggerInitComplete(int rc, StateLogger sl, Object ctx){
if(rc == Code.OK){
if(LOG.isDebugEnabled()){
LOG.debug("Logger is ok.");
}
((Context) ctx).setLogger(sl);
} else {
LOG.error("Error when initializing logger: " + LoggerException.getMessage(rc));
}
}
}, ctx);
try{
synchronized(ctx){
while(!ctx.isReady()){
ctx.wait();
}
}
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for state to build up.", e);
ctx.setState(null);
}
return ctx.state;
}
#location 41
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public byte[] getChannelConfigurationBytes() throws InvalidArgumentException, TransactionException {
return getChannelConfigurationBytes(client.getUserContext());
} | #vulnerable code
public byte[] getChannelConfigurationBytes() throws TransactionException {
try {
final Block configBlock = getConfigBlock(getShuffledPeers());
Envelope envelopeRet = Envelope.parseFrom(configBlock.getData().getData(0));
Payload payload = Payload.parseFrom(envelopeRet.getPayload());
ConfigEnvelope configEnvelope = ConfigEnvelope.parseFrom(payload.getData());
return configEnvelope.getConfig().toByteArray();
} catch (Exception e) {
throw new TransactionException(e);
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private Channel reconstructChannel(String name, HFClient client, SampleOrg sampleOrg) throws Exception {
out("Reconstructing %s channel", name);
client.setUserContext(sampleOrg.getPeerAdmin());
Channel newChannel;
if (BAR_CHANNEL_NAME.equals(name)) { // bar channel was stored in samplestore in End2endIT testcase.
/**
* sampleStore.getChannel uses {@link HFClient#deSerializeChannel(byte[])}
*/
newChannel = sampleStore.getChannel(client, name);
if (!IS_FABRIC_V10) {
// Make sure there is one of each type peer at the very least. see End2end for how peers were constructed.
assertFalse(newChannel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).isEmpty());
assertFalse(newChannel.getPeers(PeerRole.NO_EVENT_SOURCE).isEmpty());
}
assertEquals(2, newChannel.getEventHubs().size());
out("Retrieved channel %s from sample store.", name);
} else {
// foo channel do manual reconstruction.
Properties clientTLSProperties = new Properties();
final String clientPEMTLSCertificate = sampleStore.getClientPEMTLSCertificate(sampleOrg);
if (clientPEMTLSCertificate != null) {
clientTLSProperties.put("clientCertBytes", clientPEMTLSCertificate.getBytes(UTF_8));
}
final String clientPEMTLSKey = sampleStore.getClientPEMTLSKey(sampleOrg);
if (clientPEMTLSKey != null) {
clientTLSProperties.put("clientKeyBytes", clientPEMTLSKey.getBytes(UTF_8));
}
newChannel = client.newChannel(name);
for (String ordererName : sampleOrg.getOrdererNames()) {
Properties ordererProperties = (Properties) clientTLSProperties.clone();
ordererProperties.putAll(testConfig.getOrdererProperties(ordererName));
newChannel.addOrderer(client.newOrderer(ordererName, sampleOrg.getOrdererLocation(ordererName),
ordererProperties));
}
boolean everyOther = false;
for (String peerName : sampleOrg.getPeerNames()) {
String peerLocation = sampleOrg.getPeerLocation(peerName);
Properties peerProperties = testConfig.getPeerProperties(peerName);
peerProperties.putAll(clientTLSProperties);
Peer peer = client.newPeer(peerName, peerLocation, peerProperties);
final PeerOptions peerEventingOptions = // we have two peers on one use block on other use filtered
everyOther ?
createPeerOptions().registerEventsForBlocks() :
createPeerOptions().registerEventsForFilteredBlocks();
newChannel.addPeer(peer, IS_FABRIC_V10 ?
createPeerOptions().setPeerRoles(PeerRole.NO_EVENT_SOURCE) : peerEventingOptions);
everyOther = !everyOther;
}
//For testing mix it up. For v1.1 use just peer eventing service for foo channel.
if (IS_FABRIC_V10) {
//Should have no peers with event sources.
assertTrue(newChannel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).isEmpty());
//Should have two peers with all roles but event source.
assertEquals(2, newChannel.getPeers(PeerRole.NO_EVENT_SOURCE).size());
for (String eventHubName : sampleOrg.getEventHubNames()) {
Properties eventhubProperties = (Properties) clientTLSProperties.clone();
eventhubProperties.putAll(testConfig.getEventHubProperties(eventHubName));
EventHub eventHub = client.newEventHub(eventHubName, sampleOrg.getEventHubLocation(eventHubName),
eventhubProperties);
newChannel.addEventHub(eventHub);
}
} else {
//Peers should have all roles. Do some sanity checks that they do.
//Should have two peers with event sources.
assertEquals(2, newChannel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).size());
//Check some other roles too..
assertEquals(2, newChannel.getPeers(EnumSet.of(PeerRole.CHAINCODE_QUERY, PeerRole.LEDGER_QUERY)).size());
assertEquals(2, newChannel.getPeers(PeerRole.ALL).size()); //really same as newChannel.getPeers()
}
assertEquals(IS_FABRIC_V10 ? sampleOrg.getEventHubNames().size() : 0, newChannel.getEventHubs().size());
}
//Just some sanity check tests
assertTrue(newChannel == client.getChannel(name));
assertTrue(client == TestUtils.getField(newChannel, "client"));
assertEquals(name, newChannel.getName());
assertEquals(2, newChannel.getPeers().size());
assertEquals(1, newChannel.getOrderers().size());
assertFalse(newChannel.isShutdown());
assertFalse(newChannel.isInitialized());
byte[] serializedChannelBytes = newChannel.serializeChannel();
//Just checks if channel can be serialized and deserialized .. otherwise this is just a waste :)
// Get channel back.
newChannel.shutdown(true);
newChannel = client.deSerializeChannel(serializedChannelBytes);
assertEquals(2, newChannel.getPeers().size());
assertEquals(1, newChannel.getOrderers().size());
assertNotNull(client.getChannel(name));
assertEquals(newChannel, client.getChannel(name));
assertFalse(newChannel.isInitialized());
assertFalse(newChannel.isShutdown());
newChannel.initialize();
assertTrue(newChannel.isInitialized());
assertFalse(newChannel.isShutdown());
//Begin tests with de-serialized channel.
//Query the actual peer for which channels it belongs to and check it belongs to this channel
for (Peer peer : newChannel.getPeers()) {
Set<String> channels = client.queryChannels(peer);
if (!channels.contains(name)) {
throw new AssertionError(format("Peer %s does not appear to belong to channel %s", peer.getName(), name));
}
}
//Just see if we can get channelConfiguration. Not required for the rest of scenario but should work.
final byte[] channelConfigurationBytes = newChannel.getChannelConfigurationBytes();
Configtx.Config channelConfig = Configtx.Config.parseFrom(channelConfigurationBytes);
assertNotNull(channelConfig);
Configtx.ConfigGroup channelGroup = channelConfig.getChannelGroup();
assertNotNull(channelGroup);
Map<String, Configtx.ConfigGroup> groupsMap = channelGroup.getGroupsMap();
assertNotNull(groupsMap.get("Orderer"));
assertNotNull(groupsMap.get("Application"));
//Before return lets see if we have the chaincode on the peers that we expect from End2endIT
//And if they were instantiated too.
for (Peer peer : newChannel.getPeers()) {
if (!checkInstalledChaincode(client, peer, CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_VERSION)) {
throw new AssertionError(format("Peer %s is missing chaincode name: %s, path:%s, version: %s",
peer.getName(), CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_PATH));
}
if (!checkInstantiatedChaincode(newChannel, peer, CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_VERSION)) {
throw new AssertionError(format("Peer %s is missing instantiated chaincode name: %s, path:%s, version: %s",
peer.getName(), CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_PATH));
}
}
assertTrue(newChannel.isInitialized());
assertFalse(newChannel.isShutdown());
out("Finished reconstructing channel %s.", name);
return newChannel;
} | #vulnerable code
private Channel reconstructChannel(String name, HFClient client, SampleOrg sampleOrg) throws Exception {
out("Reconstructing %s channel", name);
client.setUserContext(sampleOrg.getPeerAdmin());
Channel newChannel;
if (BAR_CHANNEL_NAME.equals(name)) { // bar channel was stored in samplestore in End2endIT testcase.
/**
* sampleStore.getChannel uses {@link HFClient#deSerializeChannel(byte[])}
*/
newChannel = sampleStore.getChannel(client, name);
if (!IS_FABRIC_V10) {
// Make sure there is one of each type peer at the very least. see End2end for how peers were constructed.
assertFalse(newChannel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).isEmpty());
assertFalse(newChannel.getPeers(PeerRole.NO_EVENT_SOURCE).isEmpty());
}
assertEquals(2, newChannel.getEventHubs().size());
out("Retrieved channel %s from sample store.", name);
} else {
// foo channel do manual reconstruction.
newChannel = client.newChannel(name);
for (String ordererName : sampleOrg.getOrdererNames()) {
newChannel.addOrderer(client.newOrderer(ordererName, sampleOrg.getOrdererLocation(ordererName),
testConfig.getOrdererProperties(ordererName)));
}
boolean everyOther = false;
for (String peerName : sampleOrg.getPeerNames()) {
String peerLocation = sampleOrg.getPeerLocation(peerName);
Properties peerProperties = testConfig.getPeerProperties(peerName);
Peer peer = client.newPeer(peerName, peerLocation, peerProperties);
final PeerOptions peerEventingOptions = // we have two peers on one use block on other use filtered
everyOther ?
createPeerOptions().registerEventsForBlocks() :
createPeerOptions().registerEventsForFilteredBlocks();
newChannel.addPeer(peer, IS_FABRIC_V10 ?
createPeerOptions().setPeerRoles(PeerRole.NO_EVENT_SOURCE) : peerEventingOptions);
everyOther = !everyOther;
}
//For testing mix it up. For v1.1 use just peer eventing service for foo channel.
if (IS_FABRIC_V10) {
//Should have no peers with event sources.
assertTrue(newChannel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).isEmpty());
//Should have two peers with all roles but event source.
assertEquals(2, newChannel.getPeers(PeerRole.NO_EVENT_SOURCE).size());
for (String eventHubName : sampleOrg.getEventHubNames()) {
EventHub eventHub = client.newEventHub(eventHubName, sampleOrg.getEventHubLocation(eventHubName),
testConfig.getEventHubProperties(eventHubName));
newChannel.addEventHub(eventHub);
}
} else {
//Peers should have all roles. Do some sanity checks that they do.
//Should have two peers with event sources.
assertEquals(2, newChannel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).size());
//Check some other roles too..
assertEquals(2, newChannel.getPeers(EnumSet.of(PeerRole.CHAINCODE_QUERY, PeerRole.LEDGER_QUERY)).size());
assertEquals(2, newChannel.getPeers(PeerRole.ALL).size()); //really same as newChannel.getPeers()
}
assertEquals(IS_FABRIC_V10 ? sampleOrg.getEventHubNames().size() : 0, newChannel.getEventHubs().size());
}
//Just some sanity check tests
assertTrue(newChannel == client.getChannel(name));
assertTrue(client == TestUtils.getField(newChannel, "client"));
assertEquals(name, newChannel.getName());
assertEquals(2, newChannel.getPeers().size());
assertEquals(1, newChannel.getOrderers().size());
assertFalse(newChannel.isShutdown());
assertFalse(newChannel.isInitialized());
byte[] serializedChannelBytes = newChannel.serializeChannel();
//Just checks if channel can be serialized and deserialized .. otherwise this is just a waste :)
// Get channel back.
newChannel.shutdown(true);
newChannel = client.deSerializeChannel(serializedChannelBytes);
assertEquals(2, newChannel.getPeers().size());
assertEquals(1, newChannel.getOrderers().size());
assertNotNull(client.getChannel(name));
assertEquals(newChannel, client.getChannel(name));
assertFalse(newChannel.isInitialized());
assertFalse(newChannel.isShutdown());
newChannel.initialize();
assertTrue(newChannel.isInitialized());
assertFalse(newChannel.isShutdown());
//Begin tests with de-serialized channel.
//Query the actual peer for which channels it belongs to and check it belongs to this channel
for (Peer peer : newChannel.getPeers()) {
Set<String> channels = client.queryChannels(peer);
if (!channels.contains(name)) {
throw new AssertionError(format("Peer %s does not appear to belong to channel %s", peer.getName(), name));
}
}
//Just see if we can get channelConfiguration. Not required for the rest of scenario but should work.
final byte[] channelConfigurationBytes = newChannel.getChannelConfigurationBytes();
Configtx.Config channelConfig = Configtx.Config.parseFrom(channelConfigurationBytes);
assertNotNull(channelConfig);
Configtx.ConfigGroup channelGroup = channelConfig.getChannelGroup();
assertNotNull(channelGroup);
Map<String, Configtx.ConfigGroup> groupsMap = channelGroup.getGroupsMap();
assertNotNull(groupsMap.get("Orderer"));
assertNotNull(groupsMap.get("Application"));
//Before return lets see if we have the chaincode on the peers that we expect from End2endIT
//And if they were instantiated too.
for (Peer peer : newChannel.getPeers()) {
if (!checkInstalledChaincode(client, peer, CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_VERSION)) {
throw new AssertionError(format("Peer %s is missing chaincode name: %s, path:%s, version: %s",
peer.getName(), CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_PATH));
}
if (!checkInstantiatedChaincode(newChannel, peer, CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_VERSION)) {
throw new AssertionError(format("Peer %s is missing instantiated chaincode name: %s, path:%s, version: %s",
peer.getName(), CHAIN_CODE_NAME, CHAIN_CODE_PATH, CHAIN_CODE_PATH));
}
}
assertTrue(newChannel.isInitialized());
assertFalse(newChannel.isShutdown());
out("Finished reconstructing channel %s.", name);
return newChannel;
}
#location 21
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
Orderer(String name, String url, Properties properties) throws InvalidArgumentException {
if (StringUtil.isNullOrEmpty(name)) {
throw new InvalidArgumentException("Invalid name for orderer");
}
Exception e = checkGrpcUrl(url);
if (e != null) {
throw new InvalidArgumentException(e);
}
this.name = name;
this.url = url;
this.properties = properties == null ? null : (Properties) properties.clone(); //keep our own copy.
} | #vulnerable code
Ab.BroadcastResponse sendTransaction(Common.Envelope transaction) throws Exception {
if (shutdown) {
throw new TransactionException(format("Orderer %s was shutdown.", name));
}
logger.debug(format("Order.sendTransaction name: %s, url: %s", name, url));
OrdererClient localOrdererClient = ordererClient;
if (localOrdererClient == null || !localOrdererClient.isChannelActive()) {
ordererClient = new OrdererClient(this, new Endpoint(url, properties).getChannelBuilder());
localOrdererClient = ordererClient;
}
try {
Ab.BroadcastResponse resp = localOrdererClient.sendTransaction(transaction);
return resp;
} catch (TransactionException e) { //For any error lets start with a fresh connection.
ordererClient = null;
throw e;
} catch (Throwable t) {
ordererClient = null;
throw t;
}
}
#location 11
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
Peer(String name, String grpcURL, Properties properties) throws InvalidArgumentException {
Exception e = checkGrpcUrl(grpcURL);
if (e != null) {
throw new InvalidArgumentException("Bad peer url.", e);
}
if (StringUtil.isNullOrEmpty(name)) {
throw new InvalidArgumentException("Invalid name for peer");
}
this.url = grpcURL;
this.name = name;
this.properties = properties == null ? null : (Properties) properties.clone(); //keep our own copy.
} | #vulnerable code
ListenableFuture<FabricProposalResponse.ProposalResponse> sendProposalAsync(FabricProposal.SignedProposal proposal)
throws PeerException, InvalidArgumentException {
checkSendProposal(proposal);
logger.debug(format("peer.sendProposalAsync name:%s, url: %s", name, url));
return endorserClent.sendProposalAsync(proposal);
}
#location 7
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public byte[] getChannelConfigurationBytes() throws TransactionException {
try {
final Block configBlock = getConfigBlock(getRandomPeer());
Envelope envelopeRet = Envelope.parseFrom(configBlock.getData().getData(0));
Payload payload = Payload.parseFrom(envelopeRet.getPayload());
ConfigEnvelope configEnvelope = ConfigEnvelope.parseFrom(payload.getData());
return configEnvelope.getConfig().toByteArray();
} catch (Exception e) {
throw new TransactionException(e);
}
} | #vulnerable code
public byte[] getChannelConfigurationBytes() throws TransactionException {
try {
final Block configBlock = getConfigurationBlock();
Envelope envelopeRet = Envelope.parseFrom(configBlock.getData().getData(0));
Payload payload = Payload.parseFrom(envelopeRet.getPayload());
ConfigEnvelope configEnvelope = ConfigEnvelope.parseFrom(payload.getData());
return configEnvelope.getConfig().toByteArray();
} catch (Exception e) {
throw new TransactionException(e);
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testValidateInvalidCertificate() throws IOException, CertificateException {
assertFalse(crypto.validateCertificate(invalidPemCert));
} | #vulnerable code
@Test
public void testValidateInvalidCertificate() throws IOException, CertificateException {
BufferedInputStream pem = new BufferedInputStream(new ByteArrayInputStream(invalidPemCert));
assertFalse(crypto.validateCertificate(invalidPemCert));
}
#location 3
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
protected void parseConfigBlock() throws TransactionException {
try {
Block parseFrom = getConfigBlock(getRandomPeer());
// final Block configBlock = getConfigurationBlock();
logger.debug(format("Channel %s Got config block getting MSP data and anchorPeers data", name));
Envelope envelope = Envelope.parseFrom(parseFrom.getData().getData(0));
Payload payload = Payload.parseFrom(envelope.getPayload());
ConfigEnvelope configEnvelope = ConfigEnvelope.parseFrom(payload.getData());
ConfigGroup channelGroup = configEnvelope.getConfig().getChannelGroup();
Map<String, MSP> newMSPS = traverseConfigGroupsMSP("", channelGroup, new HashMap<>(20));
msps = Collections.unmodifiableMap(newMSPS);
// anchorPeers = Collections.unmodifiableSet(traverseConfigGroupsAnchors("", channelGroup, new HashSet<>()));
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new TransactionException(e);
}
} | #vulnerable code
protected void parseConfigBlock() throws TransactionException {
try {
final Block configBlock = getConfigurationBlock();
logger.debug(format("Channel %s Got config block getting MSP data and anchorPeers data", name));
Envelope envelope = Envelope.parseFrom(configBlock.getData().getData(0));
Payload payload = Payload.parseFrom(envelope.getPayload());
ConfigEnvelope configEnvelope = ConfigEnvelope.parseFrom(payload.getData());
ConfigGroup channelGroup = configEnvelope.getConfig().getChannelGroup();
Map<String, MSP> newMSPS = traverseConfigGroupsMSP("", channelGroup, new HashMap<>(20));
msps = Collections.unmodifiableMap(newMSPS);
// anchorPeers = Collections.unmodifiableSet(traverseConfigGroupsAnchors("", channelGroup, new HashSet<>()));
} catch (TransactionException e) {
logger.error(e.getMessage(), e);
throw e;
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new TransactionException(e);
}
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
boolean hasConnected() {
return connected;
} | #vulnerable code
void initiateEventing(TransactionContext transactionContext, PeerOptions peersOptions) throws TransactionException {
this.transactionContext = transactionContext.retryTransactionSameContext();
if (peerEventingClient == null) {
//PeerEventServiceClient(Peer peer, ManagedChannelBuilder<?> channelBuilder, Properties properties)
// peerEventingClient = new PeerEventServiceClient(this, new HashSet<Channel>(Arrays.asList(new Channel[] {channel})));
peerEventingClient = new PeerEventServiceClient(this, Endpoint.createEndpoint(url, properties), properties, peersOptions);
peerEventingClient.connect(transactionContext);
}
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
synchronized boolean connect(final TransactionContext transactionContext) throws EventHubException {
if (connected) {
logger.warn(format("%s already connected.", toString()));
return true;
}
eventStream = null;
final CountDownLatch finishLatch = new CountDownLatch(1);
logger.debug(format("EventHub %s is connecting.", name));
lastConnectedAttempt = System.currentTimeMillis();
Endpoint endpoint = new Endpoint(url, properties);
managedChannel = endpoint.getChannelBuilder().build();
clientTLSCertificateDigest = endpoint.getClientTLSCertificateDigest();
events = EventsGrpc.newStub(managedChannel);
final ArrayList<Throwable> threw = new ArrayList<>();
final StreamObserver<PeerEvents.Event> eventStreamLocal = new StreamObserver<PeerEvents.Event>() {
@Override
public void onNext(PeerEvents.Event event) {
logger.debug(format("EventHub %s got event type: %s", EventHub.this.name, event.getEventCase().name()));
if (event.getEventCase() == PeerEvents.Event.EventCase.BLOCK) {
try {
eventQue.addBEvent(new BlockEvent(EventHub.this, event)); //add to channel queue
} catch (InvalidProtocolBufferException e) {
EventHubException eventHubException = new EventHubException(format("%s onNext error %s", this, e.getMessage()), e);
logger.error(eventHubException.getMessage());
threw.add(eventHubException);
}
} else if (event.getEventCase() == PeerEvents.Event.EventCase.REGISTER) {
connected = true;
connectedTime = System.currentTimeMillis();
finishLatch.countDown();
}
}
@Override
public void onError(Throwable t) {
if (shutdown) { //IF we're shutdown don't try anything more.
logger.trace(format("%s was shutdown.", EventHub.this.toString()));
connected = false;
eventStream = null;
finishLatch.countDown();
return;
}
final boolean isTerminated = managedChannel.isTerminated();
final boolean isChannelShutdown = managedChannel.isShutdown();
logger.error(format("%s terminated is %b shutdown is %b has error %s ", EventHub.this.toString(), isTerminated, isChannelShutdown,
t.getMessage()), new EventHubException(t));
threw.add(t);
finishLatch.countDown();
// logger.error("Error in stream: " + t.getMessage(), new EventHubException(t));
if (t instanceof StatusRuntimeException) {
StatusRuntimeException sre = (StatusRuntimeException) t;
Status sreStatus = sre.getStatus();
logger.error(format("%s :StatusRuntimeException Status %s. Description %s ", EventHub.this, sreStatus + "", sreStatus.getDescription()));
if (sre.getStatus().getCode() == Status.Code.INTERNAL || sre.getStatus().getCode() == Status.Code.UNAVAILABLE) {
connected = false;
eventStream = null;
disconnectedTime = System.currentTimeMillis();
try {
if (!isChannelShutdown) {
managedChannel.shutdownNow();
}
if (null != disconnectedHandler) {
try {
disconnectedHandler.disconnected(EventHub.this);
} catch (Exception e) {
logger.warn(format("Eventhub %s %s", EventHub.this.name, e.getMessage()), e);
eventQue.eventError(e);
}
}
} catch (Exception e) {
logger.warn(format("Eventhub %s Failed shutdown msg: %s", EventHub.this.name, e.getMessage()), e);
}
}
}
}
@Override
public void onCompleted() {
logger.warn(format("Stream completed %s", EventHub.this.toString()));
finishLatch.countDown();
}
};
sender = events.chat(eventStreamLocal);
try {
blockListen(transactionContext);
} catch (CryptoException e) {
throw new EventHubException(e);
}
try {
if (!finishLatch.await(EVENTHUB_CONNECTION_WAIT_TIME, TimeUnit.MILLISECONDS)) {
EventHubException evh = new EventHubException(format("EventHub %s failed to connect in %s ms.", name, EVENTHUB_CONNECTION_WAIT_TIME));
logger.debug(evh.getMessage(), evh);
throw evh;
}
logger.trace(format("Eventhub %s Done waiting for reply!", name));
} catch (InterruptedException e) {
logger.error(e);
}
if (!threw.isEmpty()) {
eventStream = null;
connected = false;
Throwable t = threw.iterator().next();
EventHubException evh = new EventHubException(t.getMessage(), t);
logger.error(format("EventHub %s Error in stream. error: " + t.getMessage(), toString()), evh);
throw evh;
}
logger.debug(format("Eventhub %s connect is done with connect status: %b ", name, connected));
if (connected) {
eventStream = eventStreamLocal;
}
return connected;
} | #vulnerable code
synchronized boolean connect(final TransactionContext transactionContext) throws EventHubException {
if (connected) {
logger.warn(format("%s already connected.", toString()));
return true;
}
eventStream = null;
final CountDownLatch finishLatch = new CountDownLatch(1);
logger.debug(format("EventHub %s is connecting.", name));
lastConnectedAttempt = System.currentTimeMillis();
managedChannel = new Endpoint(url, properties).getChannelBuilder().build();
events = EventsGrpc.newStub(managedChannel);
final ArrayList<Throwable> threw = new ArrayList<>();
final StreamObserver<PeerEvents.Event> eventStreamLocal = new StreamObserver<PeerEvents.Event>() {
@Override
public void onNext(PeerEvents.Event event) {
logger.debug(format("EventHub %s got event type: %s", EventHub.this.name, event.getEventCase().name()));
if (event.getEventCase() == PeerEvents.Event.EventCase.BLOCK) {
try {
eventQue.addBEvent(new BlockEvent(EventHub.this, event)); //add to channel queue
} catch (InvalidProtocolBufferException e) {
EventHubException eventHubException = new EventHubException(format("%s onNext error %s", this, e.getMessage()), e);
logger.error(eventHubException.getMessage());
threw.add(eventHubException);
}
} else if (event.getEventCase() == PeerEvents.Event.EventCase.REGISTER) {
connected = true;
connectedTime = System.currentTimeMillis();
finishLatch.countDown();
}
}
@Override
public void onError(Throwable t) {
if (shutdown) { //IF we're shutdown don't try anything more.
logger.trace(format("%s was shutdown.", EventHub.this.toString()));
connected = false;
eventStream = null;
finishLatch.countDown();
return;
}
final boolean isTerminated = managedChannel.isTerminated();
final boolean isChannelShutdown = managedChannel.isShutdown();
logger.error(format("%s terminated is %b shutdown is %b has error %s ", EventHub.this.toString(), isTerminated, isChannelShutdown,
t.getMessage()), new EventHubException(t));
threw.add(t);
finishLatch.countDown();
// logger.error("Error in stream: " + t.getMessage(), new EventHubException(t));
if (t instanceof StatusRuntimeException) {
StatusRuntimeException sre = (StatusRuntimeException) t;
Status sreStatus = sre.getStatus();
logger.error(format("%s :StatusRuntimeException Status %s. Description %s ", EventHub.this, sreStatus + "", sreStatus.getDescription()));
if (sre.getStatus().getCode() == Status.Code.INTERNAL || sre.getStatus().getCode() == Status.Code.UNAVAILABLE) {
connected = false;
eventStream = null;
disconnectedTime = System.currentTimeMillis();
try {
if (!isChannelShutdown) {
managedChannel.shutdownNow();
}
if (null != disconnectedHandler) {
try {
disconnectedHandler.disconnected(EventHub.this);
} catch (Exception e) {
logger.warn(format("Eventhub %s %s", EventHub.this.name, e.getMessage()), e);
eventQue.eventError(e);
}
}
} catch (Exception e) {
logger.warn(format("Eventhub %s Failed shutdown msg: %s", EventHub.this.name, e.getMessage()), e);
}
}
}
}
@Override
public void onCompleted() {
logger.warn(format("Stream completed %s", EventHub.this.toString()));
finishLatch.countDown();
}
};
sender = events.chat(eventStreamLocal);
try {
blockListen(transactionContext);
} catch (CryptoException e) {
throw new EventHubException(e);
}
try {
if (!finishLatch.await(EVENTHUB_CONNECTION_WAIT_TIME, TimeUnit.MILLISECONDS)) {
EventHubException evh = new EventHubException(format("EventHub %s failed to connect in %s ms.", name, EVENTHUB_CONNECTION_WAIT_TIME));
logger.debug(evh.getMessage(), evh);
throw evh;
}
logger.trace(format("Eventhub %s Done waiting for reply!", name));
} catch (InterruptedException e) {
logger.error(e);
}
if (!threw.isEmpty()) {
eventStream = null;
connected = false;
Throwable t = threw.iterator().next();
EventHubException evh = new EventHubException(t.getMessage(), t);
logger.error(format("EventHub %s Error in stream. error: " + t.getMessage(), toString()), evh);
throw evh;
}
logger.debug(format("Eventhub %s connect is done with connect status: %b ", name, connected));
if (connected) {
eventStream = eventStreamLocal;
}
return connected;
}
#location 14
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
boolean hasConnected() {
return connected;
} | #vulnerable code
void setTLSCertificateKeyPair(TLSCertificateKeyPair tlsCertificateKeyPair) {
properties.put("clientKeyBytes", tlsCertificateKeyPair.getKeyPemBytes());
properties.put("clientCertBytes", tlsCertificateKeyPair.getCertPEMBytes());
Endpoint endpoint = Endpoint.createEndpoint(url, properties);
foundClientTLSCertificateDigest = true;
clientTLSCertificateDigest = endpoint.getClientTLSCertificateDigest();
endorserClent = new EndorserClient(endpoint.getChannelBuilder());
}
#location 6
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
Orderer(String name, String url, Properties properties) throws InvalidArgumentException {
if (StringUtil.isNullOrEmpty(name)) {
throw new InvalidArgumentException("Invalid name for orderer");
}
Exception e = checkGrpcUrl(url);
if (e != null) {
throw new InvalidArgumentException(e);
}
this.name = name;
this.url = url;
this.properties = properties == null ? null : (Properties) properties.clone(); //keep our own copy.
} | #vulnerable code
DeliverResponse[] sendDeliver(Common.Envelope transaction) throws TransactionException {
if (shutdown) {
throw new TransactionException(format("Orderer %s was shutdown.", name));
}
OrdererClient localOrdererClient = ordererClient;
logger.debug(format("Order.sendDeliver name: %s, url: %s", name, url));
if (localOrdererClient == null || !localOrdererClient.isChannelActive()) {
localOrdererClient = new OrdererClient(this, new Endpoint(url, properties).getChannelBuilder());
ordererClient = localOrdererClient;
}
try {
return localOrdererClient.sendDeliver(transaction);
} catch (Throwable t) {
ordererClient = null;
throw t;
}
}
#location 17
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void updateChannelConfiguration(UpdateChannelConfiguration updateChannelConfiguration, Orderer orderer, byte[]... signers) throws TransactionException, InvalidArgumentException {
updateChannelConfiguration(client.getUserContext(), updateChannelConfiguration, orderer, signers);
} | #vulnerable code
public void updateChannelConfiguration(UpdateChannelConfiguration updateChannelConfiguration, Orderer orderer, byte[]... signers) throws TransactionException, InvalidArgumentException {
checkChannelState();
checkOrderer(orderer);
try {
final long startLastConfigIndex = getLastConfigIndex(orderer);
logger.trace(format("startLastConfigIndex: %d. Channel config wait time is: %d",
startLastConfigIndex, CHANNEL_CONFIG_WAIT_TIME));
sendUpdateChannel(updateChannelConfiguration.getUpdateChannelConfigurationAsBytes(), signers, orderer);
long currentLastConfigIndex = -1;
final long nanoTimeStart = System.nanoTime();
//Try to wait to see the channel got updated but don't fail if we don't see it.
do {
currentLastConfigIndex = getLastConfigIndex(orderer);
if (currentLastConfigIndex == startLastConfigIndex) {
final long duration = TimeUnit.MILLISECONDS.convert(System.nanoTime() - nanoTimeStart, TimeUnit.NANOSECONDS);
if (duration > CHANNEL_CONFIG_WAIT_TIME) {
logger.warn(format("Channel %s did not get updated last config after %d ms, Config wait time: %d ms. startLastConfigIndex: %d, currentLastConfigIndex: %d ",
name, duration, CHANNEL_CONFIG_WAIT_TIME, startLastConfigIndex, currentLastConfigIndex));
//waited long enough ..
currentLastConfigIndex = startLastConfigIndex - 1L; // just bail don't throw exception.
} else {
try {
Thread.sleep(ORDERER_RETRY_WAIT_TIME); //try again sleep
} catch (InterruptedException e) {
TransactionException te = new TransactionException("update channel thread Sleep", e);
logger.warn(te.getMessage(), te);
}
}
}
logger.trace(format("currentLastConfigIndex: %d", currentLastConfigIndex));
} while (currentLastConfigIndex == startLastConfigIndex);
} catch (TransactionException e) {
logger.error(format("Channel %s error: %s", name, e.getMessage()), e);
throw e;
} catch (Exception e) {
String msg = format("Channel %s error: %s", name, e.getMessage());
logger.error(msg, e);
throw new TransactionException(msg, e);
}
}
#location 12
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testGetInfo() throws Exception {
if (testConfig.isRunningAgainstFabric10()) {
HFCAInfo info = client.info();
assertNull(info.getVersion());
}
if (!testConfig.isRunningAgainstFabric10()) {
HFCAInfo info = client.info();
assertNotNull("client.info returned null.", info);
String version = info.getVersion();
assertNotNull("client.info.getVersion returned null.", version);
assertTrue(format("Version '%s' didn't match expected pattern", version), version.matches("^\\d+\\.\\d+\\.\\d+($|-.*)"));
}
} | #vulnerable code
@Test
public void testGetInfo() throws Exception {
if (testConfig.isRunningAgainstFabric10()) {
HFCAInfo info = client.info();
assertNull(info.getVersion());
}
if (!testConfig.isRunningAgainstFabric10()) {
HFCAInfo info = client.info();
assertTrue(info.getVersion().contains("1.1.0"));
}
}
#location 11
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
PeerEventServiceClient(Peer peer, Endpoint endpoint, Properties properties, PeerOptions peerOptions) {
this.channelBuilder = endpoint.getChannelBuilder();
this.filterBlock = peerOptions.isRegisterEventsForFilteredBlocks();
this.peer = peer;
name = peer.getName();
url = peer.getUrl();
channelName = peer.getChannel().getName();
this.peerOptions = peerOptions;
clientTLSCertificateDigest = endpoint.getClientTLSCertificateDigest();
this.channelEventQue = peer.getChannel().getChannelEventQue();
if (null == properties) {
peerEventRegistrationWaitTimeMilliSecs = PEER_EVENT_REGISTRATION_WAIT_TIME;
} else {
String peerEventRegistrationWaitTime = properties.getProperty("peerEventRegistrationWaitTime", Long.toString(PEER_EVENT_REGISTRATION_WAIT_TIME));
long tempPeerWaitTimeMilliSecs = PEER_EVENT_REGISTRATION_WAIT_TIME;
try {
tempPeerWaitTimeMilliSecs = Long.parseLong(peerEventRegistrationWaitTime);
} catch (NumberFormatException e) {
logger.warn(format("Peer event service registration %s wait time %s not parsable.", name, peerEventRegistrationWaitTime), e);
}
peerEventRegistrationWaitTimeMilliSecs = tempPeerWaitTimeMilliSecs;
}
} | #vulnerable code
void connectEnvelope(Envelope envelope) throws TransactionException {
if (shutdown) {
logger.warn(format("Peer %s not connecting is shutdown ", peer));
return;
}
ManagedChannel lmanagedChannel = managedChannel;
if (lmanagedChannel == null || lmanagedChannel.isTerminated() || lmanagedChannel.isShutdown()) {
lmanagedChannel = channelBuilder.build();
managedChannel = lmanagedChannel;
}
try {
DeliverGrpc.DeliverStub broadcast = DeliverGrpc.newStub(lmanagedChannel);
// final DeliverResponse[] ret = new DeliverResponse[1];
// final List<DeliverResponse> retList = new ArrayList<>();
final List<Throwable> throwableList = new ArrayList<>();
final CountDownLatch finishLatch = new CountDownLatch(1);
so = new StreamObserver<DeliverResponse>() {
@Override
public void onNext(DeliverResponse resp) {
// logger.info("Got Broadcast response: " + resp);
logger.trace(format("DeliverResponse channel %s peer %s resp status value:%d status %s, typecase %s ",
channelName, peer.getName(), resp.getStatusValue(), resp.getStatus(), resp.getTypeCase()));
final DeliverResponse.TypeCase typeCase = resp.getTypeCase();
if (typeCase == STATUS) {
logger.debug(format("DeliverResponse channel %s peer %s setting done.",
channelName, peer.getName()));
if (resp.getStatus() == Common.Status.SUCCESS) { // unlike you may think this only happens when all blocks are fetched.
peer.setLastConnectTime(System.currentTimeMillis());
peer.resetReconnectCount();
} else {
throwableList.add(new TransactionException(format("Channel %s peer %s Status returned failure code %d (%s) during peer service event registration",
channelName, peer.getName(), resp.getStatusValue(), resp.getStatus().name())));
}
} else if (typeCase == FILTERED_BLOCK || typeCase == BLOCK) {
if (typeCase == BLOCK) {
logger.trace(format("Channel %s peer %s got event block hex hashcode: %016x, block number: %d",
channelName, peer.getName(), resp.getBlock().hashCode(), resp.getBlock().getHeader().getNumber()));
} else {
logger.trace(format("Channel %s peer %s got event block hex hashcode: %016x, block number: %d",
channelName, peer.getName(), resp.getFilteredBlock().hashCode(), resp.getFilteredBlock().getNumber()));
}
peer.setLastConnectTime(System.currentTimeMillis());
long reconnectCount = peer.getReconnectCount();
if (reconnectCount > 1) {
logger.info(format("Peer eventing service reconnected after %d attempts on channel %s, peer %s, url %s",
reconnectCount, channelName, name, url));
}
peer.resetReconnectCount();
BlockEvent blockEvent = new BlockEvent(peer, resp);
peer.setLastBlockSeen(blockEvent);
channelEventQue.addBEvent(blockEvent);
} else {
logger.error(format("Channel %s peer %s got event block with unknown type: %s, %d",
channelName, peer.getName(), typeCase.name(), typeCase.getNumber()));
throwableList.add(new TransactionException(format("Channel %s peer %s Status got unknown type %s, %d",
channelName, peer.getName(), typeCase.name(), typeCase.getNumber())));
}
finishLatch.countDown();
}
@Override
public void onError(Throwable t) {
ManagedChannel llmanagedChannel = managedChannel;
if (llmanagedChannel != null) {
llmanagedChannel.shutdownNow();
managedChannel = null;
}
if (!shutdown) {
final long reconnectCount = peer.getReconnectCount();
if (PEER_EVENT_RECONNECTION_WARNING_RATE > 1 && reconnectCount % PEER_EVENT_RECONNECTION_WARNING_RATE == 1) {
logger.warn(format("Received error on peer eventing service on channel %s, peer %s, url %s, attempts %d. %s",
channelName, name, url, reconnectCount, t.getMessage()));
} else {
logger.trace(format("Received error on peer eventing service on channel %s, peer %s, url %s, attempts %d. %s",
channelName, name, url, reconnectCount, t.getMessage()));
}
peer.reconnectPeerEventServiceClient(PeerEventServiceClient.this, t);
}
finishLatch.countDown();
}
@Override
public void onCompleted() {
logger.debug(format("DeliverResponse onCompleted channel %s peer %s setting done.",
channelName, peer.getName()));
// done = true;
//There should have been a done before this...
finishLatch.countDown();
}
};
nso = filterBlock ? broadcast.deliverFiltered(so) : broadcast.deliver(so);
nso.onNext(envelope);
// try {
if (!finishLatch.await(peerEventRegistrationWaitTimeMilliSecs, TimeUnit.MILLISECONDS)) {
TransactionException ex = new TransactionException(format(
"Channel %s connect time exceeded for peer eventing service %s, timed out at %d ms.", channelName, name, peerEventRegistrationWaitTimeMilliSecs));
throwableList.add(0, ex);
}
logger.trace("Done waiting for reply!");
if (!throwableList.isEmpty()) {
ManagedChannel llmanagedChannel = managedChannel;
if (llmanagedChannel != null) {
llmanagedChannel.shutdownNow();
managedChannel = null;
}
Throwable throwable = throwableList.get(0);
peer.reconnectPeerEventServiceClient(this, throwable);
}
} catch (InterruptedException e) {
ManagedChannel llmanagedChannel = managedChannel;
if (llmanagedChannel != null) {
llmanagedChannel.shutdownNow();
managedChannel = null;
}
logger.error(e); // not likely
peer.reconnectPeerEventServiceClient(this, e);
} finally {
if (null != nso) {
try {
nso.onCompleted();
} catch (Exception e) { //Best effort only report on debug
logger.debug(format("Exception completing connect with channel %s, name %s, url %s %s",
channelName, name, url, e.getMessage()), e);
}
}
}
}
#location 141
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
TBSCertList.CRLEntry[] getRevokes(Date r) throws Exception {
String crl = client.generateCRL(admin, r, null, null, null);
return parseCRL(crl);
} | #vulnerable code
TBSCertList.CRLEntry[] parseCRL(String crl) throws Exception {
Base64.Decoder b64dec = Base64.getDecoder();
final byte[] decode = b64dec.decode(crl.getBytes(UTF_8));
ByteArrayInputStream inStream = new ByteArrayInputStream(decode);
ASN1InputStream asnInputStream = new ASN1InputStream(inStream);
return CertificateList.getInstance(asnInputStream.readObject()).getRevokedCertificates();
}
#location 9
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
Orderer(String name, String url, Properties properties) throws InvalidArgumentException {
if (StringUtil.isNullOrEmpty(name)) {
throw new InvalidArgumentException("Invalid name for orderer");
}
Exception e = checkGrpcUrl(url);
if (e != null) {
throw new InvalidArgumentException(e);
}
this.name = name;
this.url = url;
this.properties = properties == null ? null : (Properties) properties.clone(); //keep our own copy.
} | #vulnerable code
DeliverResponse[] sendDeliver(Common.Envelope transaction) throws TransactionException {
if (shutdown) {
throw new TransactionException(format("Orderer %s was shutdown.", name));
}
OrdererClient localOrdererClient = ordererClient;
logger.debug(format("Order.sendDeliver name: %s, url: %s", name, url));
if (localOrdererClient == null || !localOrdererClient.isChannelActive()) {
localOrdererClient = new OrdererClient(this, new Endpoint(url, properties).getChannelBuilder());
ordererClient = localOrdererClient;
}
try {
DeliverResponse[] response = localOrdererClient.sendDeliver(transaction);
return response;
} catch (TransactionException e) { //For any error lets start with a fresh connection.
ordererClient = null;
throw e;
} catch (Throwable t) {
ordererClient = null;
throw t;
}
}
#location 11
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void updateChannelConfiguration(UpdateChannelConfiguration updateChannelConfiguration, byte[]... signers) throws TransactionException, InvalidArgumentException {
updateChannelConfiguration(client.getUserContext(), updateChannelConfiguration, getRandomOrderer(), signers);
} | #vulnerable code
public void updateChannelConfiguration(UpdateChannelConfiguration updateChannelConfiguration, byte[]... signers) throws TransactionException, InvalidArgumentException {
updateChannelConfiguration(updateChannelConfiguration, getRandomOrderer(), signers);
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
boolean hasConnected() {
return connected;
} | #vulnerable code
void setTLSCertificateKeyPair(TLSCertificateKeyPair tlsCertificateKeyPair) {
properties.put("clientKeyBytes", tlsCertificateKeyPair.getKeyPemBytes());
properties.put("clientCertBytes", tlsCertificateKeyPair.getCertPEMBytes());
Endpoint endpoint = Endpoint.createEndpoint(url, properties);
foundClientTLSCertificateDigest = true;
clientTLSCertificateDigest = endpoint.getClientTLSCertificateDigest();
endorserClent = new EndorserClient(endpoint.getChannelBuilder());
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
TBSCertList.CRLEntry[] getRevokes(Date r) throws Exception {
String crl = client.generateCRL(admin, r, null, null, null);
return parseCRL(crl);
} | #vulnerable code
TBSCertList.CRLEntry[] getRevokes(Date r) throws Exception {
String crl = client.generateCRL(admin, r, null, null, null);
Base64.Decoder b64dec = Base64.getDecoder();
final byte[] decode = b64dec.decode(crl.getBytes(UTF_8));
ByteArrayInputStream inStream = new ByteArrayInputStream(decode);
ASN1InputStream asnInputStream = new ASN1InputStream(inStream);
return CertificateList.getInstance(asnInputStream.readObject()).getRevokedCertificates();
}
#location 11
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
boolean hasConnected() {
return connected;
} | #vulnerable code
void setProperties(Properties properties) {
this.properties = properties;
}
#location 2
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
ServiceDiscovery(Channel channel, Collection<Peer> serviceDiscoveryPeers, TransactionContext transactionContext) {
this.serviceDiscoveryPeers = serviceDiscoveryPeers;
this.channel = channel;
this.channelName = channel.getName();
this.transactionContext = transactionContext.retryTransactionSameContext();
} | #vulnerable code
Map<String, SDChaindcode> discoverEndorserEndpoints(TransactionContext transactionContext, List<List<ServiceDiscoveryChaincodeCalls>> chaincodeNames) throws ServiceDiscoveryException {
if (null == chaincodeNames) {
logger.warn("Discover of chaincode names was null.");
return Collections.emptyMap();
}
if (chaincodeNames.isEmpty()) {
logger.warn("Discover of chaincode names was empty.");
return Collections.emptyMap();
}
if (DEBUG) {
StringBuilder cns = new StringBuilder(1000);
String sep = "";
cns.append("[");
for (List<ServiceDiscoveryChaincodeCalls> s : chaincodeNames) {
ServiceDiscoveryChaincodeCalls n = s.get(0);
cns.append(sep).append(n.write(s.subList(1, s.size())));
sep = ", ";
}
cns.append("]");
logger.debug(format("Channel %s doing discovery for chaincodes: %s", channelName, cns.toString()));
}
ArrayList<Peer> speers = new ArrayList<>(serviceDiscoveryPeers);
Collections.shuffle(speers);
final Map<String, SDChaindcode> ret = new HashMap<>();
SDNetwork sdNetwork = networkDiscovery(transactionContext, false);
ServiceDiscoveryException serviceDiscoveryException = null;
for (Peer serviceDiscoveryPeer : speers) {
serviceDiscoveryException = null;
try {
logger.debug(format("Channel %s doing discovery for chaincodes on peer: %s", channelName, serviceDiscoveryPeer.toString()));
TransactionContext ltransactionContext = transactionContext.retryTransactionSameContext();
final byte[] clientTLSCertificateDigest = serviceDiscoveryPeer.getClientTLSCertificateDigest();
if (null == clientTLSCertificateDigest) {
logger.warn(format("Channel %s peer %s requires mutual tls for service discovery.", channelName, serviceDiscoveryPeer.toString()));
continue;
}
ByteString clientIdent = ltransactionContext.getIdentity().toByteString();
ByteString tlshash = ByteString.copyFrom(clientTLSCertificateDigest);
Protocol.AuthInfo authentication = Protocol.AuthInfo.newBuilder().setClientIdentity(clientIdent).setClientTlsCertHash(tlshash).build();
List<Protocol.Query> fq = new ArrayList<>(chaincodeNames.size());
for (List<ServiceDiscoveryChaincodeCalls> chaincodeName : chaincodeNames) {
if (ret.containsKey(chaincodeName.get(0).getName())) {
continue;
}
LinkedList<Protocol.ChaincodeCall> chaincodeCalls = new LinkedList<>();
chaincodeName.forEach(serviceDiscoveryChaincodeCalls -> chaincodeCalls.add(serviceDiscoveryChaincodeCalls.build()));
List<Protocol.ChaincodeInterest> cinn = new ArrayList<>(1);
chaincodeName.forEach(ServiceDiscoveryChaincodeCalls::build);
Protocol.ChaincodeInterest cci = Protocol.ChaincodeInterest.newBuilder().addAllChaincodes(chaincodeCalls).build();
cinn.add(cci);
Protocol.ChaincodeQuery chaincodeQuery = Protocol.ChaincodeQuery.newBuilder().addAllInterests(cinn).build();
fq.add(Protocol.Query.newBuilder().setChannel(channelName).setCcQuery(chaincodeQuery).build());
}
if (fq.size() == 0) {
//this would be odd but lets take care of it.
break;
}
Protocol.Request request = Protocol.Request.newBuilder().addAllQueries(fq).setAuthentication(authentication).build();
ByteString payloadBytes = request.toByteString();
ByteString signatureBytes = ltransactionContext.signByteStrings(payloadBytes);
Protocol.SignedRequest sr = Protocol.SignedRequest.newBuilder()
.setPayload(payloadBytes).setSignature(signatureBytes).build();
if (IS_TRACE_LEVEL && null != diagnosticFileDumper) { // dump protobuf we sent
logger.trace(format("Service discovery channel %s %s service chaincode query sent %s", channelName, serviceDiscoveryPeer,
diagnosticFileDumper.createDiagnosticProtobufFile(sr.toByteArray())));
}
logger.debug(format("Channel %s peer %s sending chaincode query request", channelName, serviceDiscoveryPeer.toString()));
final Protocol.Response response = serviceDiscoveryPeer.sendDiscoveryRequestAsync(sr).get(SERVICE_DISCOVERY_WAITTIME, TimeUnit.MILLISECONDS);
if (IS_TRACE_LEVEL && null != diagnosticFileDumper) { // dump protobuf we get
logger.trace(format("Service discovery channel %s %s service chaincode query returned %s", channelName, serviceDiscoveryPeer,
diagnosticFileDumper.createDiagnosticProtobufFile(response.toByteArray())));
}
logger.debug(format("Channel %s peer %s completed chaincode query request", channelName, serviceDiscoveryPeer.toString()));
serviceDiscoveryPeer.hasConnected();
for (Protocol.QueryResult queryResult : response.getResultsList()) {
if (queryResult.getResultCase().getNumber() == Protocol.QueryResult.ERROR_FIELD_NUMBER) {
ServiceDiscoveryException discoveryException = new ServiceDiscoveryException(format("Error %s", queryResult.getError().getContent()));
logger.error(discoveryException.getMessage());
continue;
}
if (queryResult.getResultCase().getNumber() != Protocol.QueryResult.CC_QUERY_RES_FIELD_NUMBER) {
ServiceDiscoveryException discoveryException = new ServiceDiscoveryException(format("Error expected chaincode endorsement query but got %s : ", queryResult.getResultCase().toString()));
logger.error(discoveryException.getMessage());
continue;
}
Protocol.ChaincodeQueryResult ccQueryRes = queryResult.getCcQueryRes();
if (ccQueryRes.getContentList().isEmpty()) {
throw new ServiceDiscoveryException(format("Error %s", queryResult.getError().getContent()));
}
for (Protocol.EndorsementDescriptor es : ccQueryRes.getContentList()) {
final String chaincode = es.getChaincode();
List<SDLayout> layouts = new LinkedList<>();
for (Protocol.Layout layout : es.getLayoutsList()) {
SDLayout sdLayout = null;
Map<String, Integer> quantitiesByGroupMap = layout.getQuantitiesByGroupMap();
for (Map.Entry<String, Integer> qmap : quantitiesByGroupMap.entrySet()) {
final String key = qmap.getKey();
final int quantity = qmap.getValue();
if (quantity < 1) {
continue;
}
Protocol.Peers peers = es.getEndorsersByGroupsMap().get(key);
if (peers == null || peers.getPeersCount() == 0) {
continue;
}
List<SDEndorser> sdEndorsers = new LinkedList<>();
for (Protocol.Peer pp : peers.getPeersList()) {
SDEndorser ppp = new SDEndorser(pp, null, null, asLocalhost);
final String endPoint = ppp.getEndpoint();
SDEndorser nppp = sdNetwork.getEndorserByEndpoint(endPoint);
if (null == nppp) {
sdNetwork = networkDiscovery(transactionContext, true);
if (null == sdNetwork) {
throw new ServiceDiscoveryException("Failed to discover network resources.");
}
nppp = sdNetwork.getEndorserByEndpoint(ppp.getEndpoint());
if (null == nppp) {
throw new ServiceDiscoveryException(format("Failed to discover peer endpoint information %s for chaincode %s ", endPoint, chaincode));
}
}
sdEndorsers.add(nppp);
}
if (sdLayout == null) {
sdLayout = new SDLayout();
layouts.add(sdLayout);
}
sdLayout.addGroup(key, quantity, sdEndorsers);
}
}
if (layouts.isEmpty()) {
logger.warn(format("Channel %s chaincode %s discovered no layouts!", channelName, chaincode));
} else {
if (DEBUG) {
StringBuilder sb = new StringBuilder(1000);
sb.append("Channel ").append(channelName)
.append(" found ").append(layouts.size()).append(" layouts for chaincode: ").append(es.getChaincode());
sb.append(", layouts: [");
String sep = "";
for (SDLayout layout : layouts) {
sb.append(sep).append(layout);
sep = ", ";
}
sb.append("]");
logger.debug(sb.toString());
}
ret.put(es.getChaincode(), new SDChaindcode(es.getChaincode(), layouts));
}
}
}
if (ret.size() == chaincodeNames.size()) {
break; // found them all.
}
} catch (ServiceDiscoveryException e) {
logger.warn(format("Service discovery error on peer %s. Error: %s", serviceDiscoveryPeer.toString(), e.getMessage()));
serviceDiscoveryException = e;
} catch (Exception e) {
logger.warn(format("Service discovery error on peer %s. Error: %s", serviceDiscoveryPeer.toString(), e.getMessage()));
serviceDiscoveryException = new ServiceDiscoveryException(e);
}
}
if (null != serviceDiscoveryException) {
throw serviceDiscoveryException;
}
if (ret.size() != chaincodeNames.size()) {
logger.warn((format("Channel %s failed to find all layouts for chaincodes. Expected: %d and found: %d", channelName, chaincodeNames.size(), ret.size())));
}
return ret;
}
#location 43
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
Peer(String name, String grpcURL, Properties properties) throws InvalidArgumentException {
Exception e = checkGrpcUrl(grpcURL);
if (e != null) {
throw new InvalidArgumentException("Bad peer url.", e);
}
if (StringUtil.isNullOrEmpty(name)) {
throw new InvalidArgumentException("Invalid name for peer");
}
this.url = grpcURL;
this.name = name;
this.properties = properties == null ? null : (Properties) properties.clone(); //keep our own copy.
} | #vulnerable code
FabricProposalResponse.ProposalResponse sendProposal(FabricProposal.SignedProposal proposal)
throws PeerException, InvalidArgumentException {
checkSendProposal(proposal);
logger.debug(format("peer.sendProposalAsync name: %s, url: %s", name, url));
return endorserClent.sendProposal(proposal);
}
#location 7
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private static final Collection<MapArea> createAreasForSimpleMultipolygon(OsmRelation relation,
TLongObjectMap<MapNode> nodeIdMap, OsmEntityProvider db) throws EntityNotFoundException {
assert isSimpleMultipolygon(relation, db);
OsmEntity tagSource = null;
List<MapNode> outerNodes = null;
List<List<MapNode>> holes = new ArrayList<List<MapNode>>();
for (OsmRelationMember member : membersAsList(relation)) {
if (member.getType() == EntityType.Way) {
OsmWay way = db.getWay(member.getId());
if ("inner".equals(member.getRole())) {
List<MapNode> hole = new ArrayList<MapNode>(way.getNumberOfNodes());
for (long nodeId : nodesAsList(way).toArray()) {
hole.add(nodeIdMap.get(nodeId));
}
holes.add(hole);
} else if ("outer".equals(member.getRole())) {
tagSource = relation.getNumberOfTags() > 1 ? relation : way;
outerNodes = new ArrayList<MapNode>(way.getNumberOfNodes());
for (long nodeId : nodesAsList(way).toArray()) {
outerNodes.add(nodeIdMap.get(nodeId));
}
}
}
}
return singleton(new MapArea(tagSource.getId(), tagSource instanceof OsmRelation,
OSMToMapDataConverter.tagsOfEntity(tagSource), outerNodes, holes));
} | #vulnerable code
private static final Collection<MapArea> createAreasForSimpleMultipolygon(OsmRelation relation,
TLongObjectMap<MapNode> nodeIdMap, OsmEntityProvider db) throws EntityNotFoundException {
assert isSimpleMultipolygon(relation, db);
OsmEntity tagSource = null;
List<MapNode> outerNodes = null;
List<List<MapNode>> holes = new ArrayList<List<MapNode>>();
for (OsmRelationMember member : membersAsList(relation)) {
if (member.getType() == EntityType.Way) {
OsmWay way = db.getWay(member.getId());
if ("inner".equals(member.getRole())) {
List<MapNode> hole = new ArrayList<MapNode>(way.getNumberOfNodes());
for (long nodeId : nodesAsList(way).toArray()) {
hole.add(nodeIdMap.get(nodeId));
}
holes.add(hole);
} else if ("outer".equals(member.getRole())) {
tagSource = relation.getNumberOfTags() > 1 ? relation : way;
outerNodes = new ArrayList<MapNode>(way.getNumberOfNodes());
for (long nodeId : nodesAsList(way).toArray()) {
outerNodes.add(nodeIdMap.get(nodeId));
}
}
}
}
return singleton(new MapArea(tagSource, outerNodes, holes));
}
#location 39
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static void main(String[] unparsedArgs) {
/* assume --gui if no parameters are given */
if (unparsedArgs.length == 0) {
System.out.println("No parameters, running graphical interface.\n"
+ "If you want to use the command line, use the --help"
+ " parameter for a list of available parameters.");
unparsedArgs = new String[]{"--gui"};
}
/* parse command line arguments */
CLIArguments args = null;
try {
args = parseArguments(unparsedArgs);
} catch (Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
/* parse lines from parameter file (if one exists) */
List<CLIArguments> argumentsList = Collections.singletonList(args);
if (args.isParameterFile()) {
argumentsList = new ArrayList<CLIArguments>();
try {
List<String[]> unparsedArgsLines = CLIArgumentsUtil
.getUnparsedParameterGroups(args.getParameterFile());
for (String[] unparsedArgsLine : unparsedArgsLines) {
try {
argumentsList.add(parseArguments(unparsedArgsLine));
} catch (Exception e) {
System.err.println("Could not parse parameters from file:");
System.err.println(unparsedArgsLine);
System.err.println("Ignoring it. Reason:");
System.err.println(e.getMessage());
}
}
} catch (IOException e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
/* collect parameter groups into compatible groups
* (groups of parameter groups that use the same input and config files) */
List<CLIArgumentsGroup> argumentsGroups = new ArrayList<CLIArgumentsGroup>();
for (CLIArguments arguments : argumentsList) {
boolean added = false;
for (CLIArgumentsGroup compatibleGroup : argumentsGroups) {
if (compatibleGroup.isCompatible(arguments)) {
// add to existing compatible group
compatibleGroup.addCLIArguments(arguments);
added = true;
break;
}
}
if (!added) {
// start a new compatible group
argumentsGroups.add(new CLIArgumentsGroup(arguments));
}
}
/* execute conversions */
for (CLIArgumentsGroup argumentsGroup : argumentsGroups) {
if (argumentsList.size() > 1) {
System.out.print("executing conversion for these parameter lines: ");
for (CLIArguments p : argumentsGroup.getCLIArgumentsList()) {
System.out.print(argumentsList.indexOf(p) + " ");
}
System.out.print("\n");
}
executeArgumentsGroup(argumentsGroup);
}
} | #vulnerable code
public static void main(String[] unparsedArgs) {
ProgramMode programMode;
CLIArguments args = null;
/* parse command line arguments */
if (unparsedArgs.length > 0) {
try {
args = CliFactory.parseArguments(CLIArguments.class, unparsedArgs);
} catch (ArgumentValidationException e) {
System.err.println(e.getMessage());
System.exit(1);
}
if (!CLIArgumentsUtil.isValid(args)) {
System.err.println(CLIArgumentsUtil.getErrorString(args));
System.exit(1);
}
programMode = CLIArgumentsUtil.getProgramMode(args);
} else {
System.out.println("No parameters, running graphical interface.\n"
+ "If you want to use the command line, use the --help"
+ " parameter for a list of available parameters.");
programMode = ProgramMode.GUI;
}
/* load configuration file */
Configuration config = new BaseConfiguration();
if (args != null && args.isConfig()) {
try {
config = new PropertiesConfiguration(args.getConfig());
} catch (ConfigurationException e) {
System.err.println("could not read config, ignoring it: ");
System.err.println(e);
}
}
/* run selected mode */
switch (programMode) {
case HELP:
//parser.printHelp();
System.out.println(
CliFactory.createCli(CLIArguments.class).getHelpMessage()
+ "\n\nFor more information, see " + GlobalValues.WIKI_URI);
break;
case VERSION:
System.out.println("OSM2World " + VERSION_STRING);
break;
case GUI:
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch(Exception e) {
System.out.println("Error setting native look and feel: " + e);
}
new ViewerFrame(new Data(), new MessageManager(),
new RenderOptions(), config).setVisible(true);
break;
case CONVERT:
try {
Output.output(config, args);
} catch (IOException e) {
e.printStackTrace();
}
break;
}
}
#location 73
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
protected void addPrimitiveToValueBuffer(BufferT buffer,
Primitive primitive) {
/*
* rearrange the lists of vertices, normals and texture coordinates
* to turn triangle strips and triangle fans into separate triangles
*/
List<VectorXYZ> primVertices = primitive.vertices;
List<VectorXYZ> primNormals = primitive.normals;
List<List<VectorXZ>> primTexCoordLists = primitive.texCoordLists;
if (primitive.type == Type.TRIANGLE_STRIP) {
primVertices = triangleVertexListFromTriangleStrip(primVertices);
primNormals = triangleNormalListFromTriangleStrip(primNormals);
if (primTexCoordLists != null) {
List<List<VectorXZ>> newPrimTexCoordLists = new ArrayList<List<VectorXZ>>();
for (List<VectorXZ> primTexCoordList : primTexCoordLists) {
newPrimTexCoordLists.add(triangleVertexListFromTriangleStrip(primTexCoordList));
}
primTexCoordLists = newPrimTexCoordLists;
}
} else if (primitive.type == Type.TRIANGLE_FAN) {
primVertices = triangleVertexListFromTriangleFan(primVertices);
primNormals = triangleVertexListFromTriangleFan(primNormals);
if (primTexCoordLists != null) {
List<List<VectorXZ>> newPrimTexCoordLists = new ArrayList<List<VectorXZ>>();
for (List<VectorXZ> primTexCoordList : primTexCoordLists) {
newPrimTexCoordLists.add(triangleVertexListFromTriangleFan(primTexCoordList));
}
primTexCoordLists = newPrimTexCoordLists;
}
}
List<VectorXYZW> primTangents = null;
if (material.hasBumpMap()) {
primTangents = calculateTangentVectorsForTexLayer(primVertices, primNormals, primTexCoordLists.get(material.getBumpMapInd()));
}
/* put the values into the buffer, in the right order */
for (int i = 0; i < primVertices.size(); i++) {
int count = 0;
assert (primTexCoordLists == null
&& material.getNumTextureLayers() == 0)
|| (primTexCoordLists != null
&& primTexCoordLists.size() == material.getNumTextureLayers())
: "WorldModules need to provide the correct number of tex coords";
if (primTexCoordLists == null && material.getNumTextureLayers() > 0) {
System.out.println(material);
}
for (int t = 0; t < material.getNumTextureLayers(); t++) {
if (!material.hasBumpMap() || t != material.getBumpMapInd()) {
VectorXZ textureCoord = primTexCoordLists.get(t).get(i);
put(buffer, textureCoord);
//System.out.println("put tex coord");
count += 2;
}
}
put(buffer, primNormals.get(i));
count += 3;
if (material.hasBumpMap()) {
put(buffer, primTangents.get(i));
count += 4;
put(buffer, primTexCoordLists.get(material.getBumpMapInd()).get(i));
count += 2;
}
put(buffer, primVertices.get(i));
count += 3;
if (count != JOGLRendererVBO.getValuesPerVertex(material)) {
throw new RuntimeException("put: "+count +" values:" + JOGLRendererVBO.getValuesPerVertex(material));
}
}
} | #vulnerable code
@Override
protected void addPrimitiveToValueBuffer(BufferT buffer,
Primitive primitive) {
/*
* rearrange the lists of vertices, normals and texture coordinates
* to turn triangle strips and triangle fans into separate triangles
*/
List<VectorXYZ> primVertices = primitive.vertices;
List<VectorXYZ> primNormals = primitive.normals;
List<List<VectorXZ>> primTexCoordLists = primitive.texCoordLists;
if (primitive.type == Type.TRIANGLE_STRIP) {
primVertices = triangleVertexListFromTriangleStrip(primVertices);
primNormals = triangleNormalListFromTriangleStrip(primNormals);
if (primTexCoordLists != null) {
List<List<VectorXZ>> newPrimTexCoordLists = new ArrayList<List<VectorXZ>>();
for (List<VectorXZ> primTexCoordList : primTexCoordLists) {
newPrimTexCoordLists.add(triangleVertexListFromTriangleStrip(primTexCoordList));
}
primTexCoordLists = newPrimTexCoordLists;
}
} else if (primitive.type == Type.TRIANGLE_FAN) {
primVertices = triangleVertexListFromTriangleFan(primVertices);
primNormals = triangleVertexListFromTriangleFan(primNormals);
if (primTexCoordLists != null) {
List<List<VectorXZ>> newPrimTexCoordLists = new ArrayList<List<VectorXZ>>();
for (List<VectorXZ> primTexCoordList : primTexCoordLists) {
newPrimTexCoordLists.add(triangleVertexListFromTriangleFan(primTexCoordList));
}
primTexCoordLists = newPrimTexCoordLists;
}
}
List<VectorXYZW> primTangents = null;
if (material.hasBumpMap()) {
primTangents = calculateTangentVectorsForTexLayer(primVertices, primNormals, primTexCoordLists.get(material.getBumpMapInd()));
} else if (material.getNumTextureLayers() > 0) {
primTangents = calculateTangentVectorsForTexLayer(primVertices, primNormals, primTexCoordLists.get(0));
}
/* put the values into the buffer, in the right order */
for (int i = 0; i < primVertices.size(); i++) {
int count = 0;
assert (primTexCoordLists == null
&& material.getNumTextureLayers() == 0)
|| (primTexCoordLists != null
&& primTexCoordLists.size() == material.getNumTextureLayers())
: "WorldModules need to provide the correct number of tex coords";
if (primTexCoordLists == null && material.getNumTextureLayers() > 0) {
System.out.println(material);
}
for (int t = 0; t < material.getNumTextureLayers(); t++) {
if (!material.hasBumpMap() || t != material.getBumpMapInd()) {
VectorXZ textureCoord = primTexCoordLists.get(t).get(i);
put(buffer, textureCoord);
//System.out.println("put tex coord");
count += 2;
}
}
put(buffer, primNormals.get(i));
count += 3;
//System.out.println("put normals");
if (material.getNumTextureLayers() > 0) {
put(buffer, primTangents.get(i));
count += 4;
//System.out.println("put tangent");
}
if (material.hasBumpMap()) {
put(buffer, primTexCoordLists.get(material.getBumpMapInd()).get(i));
count += 2;
//System.out.println("put bm coord");
}
put(buffer, primVertices.get(i));
count += 3;
//System.out.println("put vertices");
if (count != JOGLRendererVBO.getValuesPerVertex(material)) {
throw new RuntimeException("put: "+count +" values:" + JOGLRendererVBO.getValuesPerVertex(material));
}
//System.out.println("values per Material:" + JOGLRendererVBO.getValuesPerVertex(material));
}
}
#location 77
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static void main(String[] unparsedArgs) {
ProgramMode programMode;
CLIArguments args = null;
/* parse command line arguments */
if (unparsedArgs.length > 0) {
try {
args = CliFactory.parseArguments(CLIArguments.class, unparsedArgs);
} catch (ArgumentValidationException e) {
System.err.println(e.getMessage());
System.exit(1);
}
if (!CLIArgumentsUtil.isValid(args)) {
System.err.println(CLIArgumentsUtil.getErrorString(args));
System.exit(1);
}
programMode = CLIArgumentsUtil.getProgramMode(args);
} else {
System.out.println("No parameters, running graphical interface.\n"
+ "If you want to use the command line, use the --help"
+ " parameter for a list of available parameters.");
programMode = ProgramMode.GUI;
}
/* load configuration file */
Configuration config = new BaseConfiguration();
if (args.isConfig()) {
try {
config = new PropertiesConfiguration(args.getConfig());
} catch (ConfigurationException e) {
System.err.println("could not read config, ignoring it: ");
System.err.println(e);
}
}
/* run selected mode */
switch (programMode) {
case HELP:
//parser.printHelp();
System.out.println(
CliFactory.createCli(CLIArguments.class).getHelpMessage()
+ "\n\nFor more information, see " + GlobalValues.WIKI_URI);
break;
case VERSION:
System.out.println("OSM2World " + VERSION_STRING);
break;
case GUI:
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch(Exception e) {
System.out.println("Error setting native look and feel: " + e);
}
new ViewerFrame(new Data(), new MessageManager(),
new RenderOptions(), config).setVisible(true);
break;
case CONVERT:
try {
Output.output(config, args);
} catch (IOException e) {
e.printStackTrace();
}
break;
}
} | #vulnerable code
public static void main(String[] unparsedArgs) {
ProgramMode programMode;
CLIArguments args = null;
if (unparsedArgs.length > 0) {
try {
args = CliFactory.parseArguments(CLIArguments.class, unparsedArgs);
} catch (ArgumentValidationException e) {
System.err.println(e.getMessage());
System.exit(1);
}
if (!CLIArgumentsUtil.isValid(args)) {
System.err.println(CLIArgumentsUtil.getErrorString(args));
System.exit(1);
}
programMode = CLIArgumentsUtil.getProgramMode(args);
} else {
System.out.println("No parameters, running graphical interface.\n"
+ "If you want to use the command line, use the --help"
+ " parameter for a list of available parameters.");
programMode = ProgramMode.GUI;
}
switch (programMode) {
case HELP:
//parser.printHelp();
System.out.println(
CliFactory.createCli(CLIArguments.class).getHelpMessage()
+ "\n\nFor more information, see " + GlobalValues.WIKI_URI);
break;
case VERSION:
System.out.println("OSM2World " + VERSION_STRING);
break;
case GUI:
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch(Exception e) {
System.out.println("Error setting native look and feel: " + e);
}
new ViewerFrame(new Data(), new MessageManager(), new RenderOptions())
.setVisible(true);
break;
case CONVERT:
try {
Output.output(args);
} catch (IOException e) {
e.printStackTrace();
}
break;
}
}
#location 56
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public void beginObject(WorldObject object) {
finishCurrentObject();
/* start a new object */
currentObject = object;
currentTriangles = new HashMap<Material, FrontendPbfTarget.TriangleData>();
} | #vulnerable code
@Override
public void beginObject(WorldObject object) {
/* build the previous object (if it's inside the bounding box) */
boolean isInsideBbox = true;
if (currentObject != null && currentObject.getPrimaryMapElement() != null) {
MapElement mapElement = currentObject.getPrimaryMapElement();
VectorXZ center = null;
if (mapElement instanceof MapNode) {
center = ((MapNode) mapElement).getPos();
} else if (mapElement instanceof MapWaySegment) {
center = ((MapWaySegment) mapElement).getCenter();
} else if (mapElement instanceof MapArea) {
center = ((MapArea) mapElement).getOuterPolygon().getCenter();
}
isInsideBbox = bbox.contains(center);
}
if (!currentTriangles.isEmpty() && isInsideBbox) {
objects.add(buildCurrentObject());
}
/* start a new object */
currentObject = object;
currentTriangles = new HashMap<Material, FrontendPbfTarget.TriangleData>();
}
#location 22
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static final boolean isJOSMGenerated(File file) {
try {
BufferedReader reader = new BufferedReader(new FileReader(file));
for (int i=0; i<100; i++) {
String line = reader.readLine();
if (line != null) {
if (line.contains("generator='JOSM'")) {
reader.close();
return true;
}
}
}
reader.close();
} catch (IOException e) { }
return false;
} | #vulnerable code
public static final boolean isJOSMGenerated(File file) {
try {
BufferedReader reader = new BufferedReader(new FileReader(file));
for (int i=0; i<100; i++) {
String line = reader.readLine();
if (line != null) {
if (line.contains("generator='JOSM'")) {
return true;
}
}
}
reader.close();
} catch (IOException e) { }
return false;
}
#location 11
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static int createFragmentShader(GL3 gl, String filename) {
int fragShader = gl.glCreateShader(GL3.GL_FRAGMENT_SHADER);
if (fragShader == 0)
return 0;
String[] fragCode = new String[1];
fragCode[0] = "";
String line;
InputStream stream = loadShaderFile(filename);
if (stream == null) {
throw new RuntimeException("Fragment shader not found in classpath: \""+ filename +"\"");
}
try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) {
while ((line = reader.readLine()) != null) {
fragCode[0] += line + "\n";
}
gl.glShaderSource(fragShader, 1, fragCode, null);
gl.glCompileShader(fragShader);
// acquire compilation status
IntBuffer shaderStatus = IntBuffer.allocate(1);
gl.glGetShaderiv(fragShader, GL3.GL_COMPILE_STATUS, shaderStatus);
// check whether compilation was successful
if (shaderStatus.get() == GL.GL_FALSE) {
printShaderInfoLog(gl, fragShader);
throw new IllegalStateException("compilation error for shader ["
+ filename + "].");
}
printShaderInfoLog(gl, fragShader);
return fragShader;
} catch (IOException e) {
throw new RuntimeException("Failed reading fragment shader \"" + filename + "\".",e);
}
} | #vulnerable code
public static int createFragmentShader(GL3 gl, String filename) {
int fragShader = gl.glCreateShader(GL3.GL_FRAGMENT_SHADER);
if (fragShader == 0)
return 0;
String[] fragCode = new String[1];
fragCode[0] = "";
String line;
InputStream stream = loadShaderFile(filename);
if (stream == null) {
throw new RuntimeException("Fragment shader not found in classpath: \""+ filename +"\"");
}
BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
try {
while ((line = reader.readLine()) != null) {
fragCode[0] += line + "\n";
}
} catch (IOException e) {
throw new RuntimeException("Failed reading fragment shader \"" + filename + "\".",e);
}
gl.glShaderSource(fragShader, 1, fragCode, null);
gl.glCompileShader(fragShader);
// acquire compilation status
IntBuffer shaderStatus = IntBuffer.allocate(1);
gl.glGetShaderiv(fragShader, GL3.GL_COMPILE_STATUS, shaderStatus);
// check whether compilation was successful
if (shaderStatus.get() == GL.GL_FALSE) {
printShaderInfoLog(gl, fragShader);
throw new IllegalStateException("compilation error for shader ["
+ filename + "].");
}
printShaderInfoLog(gl, fragShader);
return fragShader;
}
#location 20
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void createComponents(){
for (MapElement element : elements){
IndoorObjectData data = new IndoorObjectData(buildingPart, element);
switch (element.getTags().getValue("indoor")){
case "wall":
walls.add(new IndoorWall(data));
break;
case "room":
rooms.add(new IndoorRoom(data));
break;
case "area":
areas.add(new IndoorArea(data));
break;
case "corridor":
corridors.add(new Corridor(data));
break;
}
}
} | #vulnerable code
private void createComponents(){
for (MapElement element : elements){
switch (element.getTags().getValue("indoor")){
case "wall":
walls.add(new IndoorWall(buildingPart, element));
break;
case "room":
rooms.add(new IndoorRoom((MapArea) element, buildingPart));
break;
case "area":
areas.add(new IndoorArea(buildingPart, (MapArea) element));
break;
case "corridor":
corridors.add(new Corridor(buildingPart, (MapArea) element));
break;
}
}
}
#location 5
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static final boolean isJOSMGenerated(File file) {
try (BufferedReader reader = new BufferedReader(new FileReader(file))) {
for (int i=0; i<100; i++) {
String line = reader.readLine();
if (line != null) {
if (line.contains("generator='JOSM'")) {
return true;
}
}
}
} catch (IOException e) { }
return false;
} | #vulnerable code
public static final boolean isJOSMGenerated(File file) {
try {
BufferedReader reader = new BufferedReader(new FileReader(file));
for (int i=0; i<100; i++) {
String line = reader.readLine();
if (line != null) {
if (line.contains("generator='JOSM'")) {
return true;
}
}
}
reader.close();
} catch (IOException e) { }
return false;
}
#location 11
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static final List<String[]> getUnparsedParameterGroups(
File parameterFile) throws IOException {
try (BufferedReader in = new BufferedReader(new FileReader(parameterFile))) {
List<String[]> result = new ArrayList<>();
String line;
while ((line = in.readLine()) != null) {
if (line.startsWith("#")) continue;
if (line.trim().isEmpty()) continue;
List<String> argList = new ArrayList<>();
Pattern regex = Pattern.compile("[^\\s\"']+|\"([^\"]*)\"|'([^']*)'");
Matcher matcher = regex.matcher(line);
while (matcher.find()) {
if (matcher.group(1) != null) {
// Add double-quoted string without the quotes
argList.add(matcher.group(1));
} else if (matcher.group(2) != null) {
// Add single-quoted string without the quotes
argList.add(matcher.group(2));
} else {
// Add unquoted word
argList.add(matcher.group());
}
}
result.add(argList.toArray(new String[argList.size()]));
}
return result;
}
} | #vulnerable code
public static final List<String[]> getUnparsedParameterGroups(
File parameterFile) throws IOException {
List<String[]> result = new ArrayList<String[]>();
BufferedReader in = new BufferedReader(new FileReader(parameterFile));
String line;
while ((line = in.readLine()) != null) {
if (line.startsWith("#")) continue;
if (line.trim().isEmpty()) continue;
List<String> argList = new ArrayList<String>();
Pattern regex = Pattern.compile("[^\\s\"']+|\"([^\"]*)\"|'([^']*)'");
Matcher matcher = regex.matcher(line);
while (matcher.find()) {
if (matcher.group(1) != null) {
// Add double-quoted string without the quotes
argList.add(matcher.group(1));
} else if (matcher.group(2) != null) {
// Add single-quoted string without the quotes
argList.add(matcher.group(2));
} else {
// Add unquoted word
argList.add(matcher.group());
}
}
result.add(argList.toArray(new String[argList.size()]));
}
in.close();
return result;
}
#location 38
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static final boolean isJOSMGenerated(File file) {
try (BufferedReader reader = new BufferedReader(new FileReader(file))) {
for (int i=0; i<100; i++) {
String line = reader.readLine();
if (line != null) {
if (line.contains("generator='JOSM'")) {
return true;
}
}
}
} catch (IOException e) { }
return false;
} | #vulnerable code
public static final boolean isJOSMGenerated(File file) {
try {
BufferedReader reader = new BufferedReader(new FileReader(file));
for (int i=0; i<100; i++) {
String line = reader.readLine();
if (line != null) {
if (line.contains("generator='JOSM'")) {
return true;
}
}
}
reader.close();
} catch (IOException e) { }
return false;
}
#location 18
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static void main(String[] unparsedArgs) {
ProgramMode programMode;
CLIArguments args = null;
/* parse command line arguments */
if (unparsedArgs.length > 0) {
try {
args = CliFactory.parseArguments(CLIArguments.class, unparsedArgs);
} catch (ArgumentValidationException e) {
System.err.println(e.getMessage());
System.exit(1);
}
if (!CLIArgumentsUtil.isValid(args)) {
System.err.println(CLIArgumentsUtil.getErrorString(args));
System.exit(1);
}
programMode = CLIArgumentsUtil.getProgramMode(args);
} else {
System.out.println("No parameters, running graphical interface.\n"
+ "If you want to use the command line, use the --help"
+ " parameter for a list of available parameters.");
programMode = ProgramMode.GUI;
}
/* load configuration file */
Configuration config = new BaseConfiguration();
if (args != null && args.isConfig()) {
try {
config = new PropertiesConfiguration(args.getConfig());
} catch (ConfigurationException e) {
System.err.println("could not read config, ignoring it: ");
System.err.println(e);
}
}
/* run selected mode */
switch (programMode) {
case HELP:
//parser.printHelp();
System.out.println(
CliFactory.createCli(CLIArguments.class).getHelpMessage()
+ "\n\nFor more information, see " + GlobalValues.WIKI_URI);
break;
case VERSION:
System.out.println("OSM2World " + VERSION_STRING);
break;
case GUI:
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch(Exception e) {
System.out.println("Error setting native look and feel: " + e);
}
new ViewerFrame(new Data(), new MessageManager(),
new RenderOptions(), config).setVisible(true);
break;
case CONVERT:
try {
Output.output(config, args);
} catch (IOException e) {
e.printStackTrace();
}
break;
}
} | #vulnerable code
public static void main(String[] unparsedArgs) {
ProgramMode programMode;
CLIArguments args = null;
/* parse command line arguments */
if (unparsedArgs.length > 0) {
try {
args = CliFactory.parseArguments(CLIArguments.class, unparsedArgs);
} catch (ArgumentValidationException e) {
System.err.println(e.getMessage());
System.exit(1);
}
if (!CLIArgumentsUtil.isValid(args)) {
System.err.println(CLIArgumentsUtil.getErrorString(args));
System.exit(1);
}
programMode = CLIArgumentsUtil.getProgramMode(args);
} else {
System.out.println("No parameters, running graphical interface.\n"
+ "If you want to use the command line, use the --help"
+ " parameter for a list of available parameters.");
programMode = ProgramMode.GUI;
}
/* load configuration file */
Configuration config = new BaseConfiguration();
if (args.isConfig()) {
try {
config = new PropertiesConfiguration(args.getConfig());
} catch (ConfigurationException e) {
System.err.println("could not read config, ignoring it: ");
System.err.println(e);
}
}
/* run selected mode */
switch (programMode) {
case HELP:
//parser.printHelp();
System.out.println(
CliFactory.createCli(CLIArguments.class).getHelpMessage()
+ "\n\nFor more information, see " + GlobalValues.WIKI_URI);
break;
case VERSION:
System.out.println("OSM2World " + VERSION_STRING);
break;
case GUI:
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch(Exception e) {
System.out.println("Error setting native look and feel: " + e);
}
new ViewerFrame(new Data(), new MessageManager(),
new RenderOptions(), config).setVisible(true);
break;
case CONVERT:
try {
Output.output(config, args);
} catch (IOException e) {
e.printStackTrace();
}
break;
}
}
#location 37
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
protected void slaveDown(ClusterSlotRange slotRange, String host, int port) {
MasterSlaveEntry entry = getEntry(slotRange);
slaveDown(entry, host, port);
} | #vulnerable code
protected void slaveDown(ClusterSlotRange slotRange, String host, int port) {
Collection<RedisPubSubConnection> allPubSubConnections = getEntry(slotRange).slaveDown(host, port);
// reattach listeners to other channels
for (Entry<String, PubSubConnectionEntry> mapEntry : name2PubSubConnection.entrySet()) {
for (RedisPubSubConnection redisPubSubConnection : allPubSubConnections) {
PubSubConnectionEntry entry = mapEntry.getValue();
final String channelName = mapEntry.getKey();
if (!entry.getConnection().equals(redisPubSubConnection)) {
continue;
}
synchronized (entry) {
entry.close();
final Collection<RedisPubSubListener> listeners = entry.getListeners(channelName);
if (entry.getConnection().getPatternChannels().get(channelName) != null) {
Codec subscribeCodec = punsubscribe(channelName);
if (!listeners.isEmpty()) {
Future<PubSubConnectionEntry> future = psubscribe(channelName, subscribeCodec);
future.addListener(new FutureListener<PubSubConnectionEntry>() {
@Override
public void operationComplete(Future<PubSubConnectionEntry> future)
throws Exception {
PubSubConnectionEntry newEntry = future.getNow();
for (RedisPubSubListener redisPubSubListener : listeners) {
newEntry.addListener(channelName, redisPubSubListener);
}
log.debug("resubscribed listeners for '{}' channel-pattern", channelName);
}
});
}
} else {
Codec subscribeCodec = unsubscribe(channelName);
if (!listeners.isEmpty()) {
Future<PubSubConnectionEntry> future = subscribe(channelName, subscribeCodec);
future.addListener(new FutureListener<PubSubConnectionEntry>() {
@Override
public void operationComplete(Future<PubSubConnectionEntry> future)
throws Exception {
PubSubConnectionEntry newEntry = future.getNow();
for (RedisPubSubListener redisPubSubListener : listeners) {
newEntry.addListener(channelName, redisPubSubListener);
}
log.debug("resubscribed listeners for '{}' channel", channelName);
}
});
}
}
}
}
}
}
#location 2
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public boolean compareAndSet(long expect, long update) {
RedisConnection<String, Object> conn = connectionManager.connection();
try {
while (true) {
conn.watch(getName());
Long value = ((Number) conn.get(getName())).longValue();
if (value != expect) {
conn.discard();
return false;
}
conn.multi();
conn.set(getName(), update);
if (conn.exec().size() == 1) {
return true;
}
}
} finally {
connectionManager.release(conn);
}
} | #vulnerable code
@Override
public boolean compareAndSet(long expect, long update) {
RedisConnection<String, Object> conn = connectionManager.connection();
try {
while (true) {
conn.watch(getName());
Long value = (Long) conn.get(getName());
if (value != expect) {
conn.discard();
return false;
}
conn.multi();
conn.set(getName(), update);
if (conn.exec().size() == 1) {
return true;
}
}
} finally {
connectionManager.release(conn);
}
}
#location 8
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public boolean remove(Object o) {
return remove(o, 1);
} | #vulnerable code
@Override
public boolean remove(Object o) {
RedisConnection<String, Object> connection = connectionManager.connectionWriteOp();
try {
return connection.lrem(getName(), 1, o) > 0;
} finally {
connectionManager.release(connection);
}
}
#location 5
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testReplaceDepartments(){
List<Department> departments = Departments.defaultDepartments().list();
File tmpDir = Files.createTempDir();
File groups = new File(tmpDir, "departments.csv");
try {
PrintWriter groupPrintWriter = new PrintWriter(new BufferedWriter(new FileWriter(groups, false)));
groupPrintWriter.println("部门名称,部门ID,父部门ID,排序");
for (Department department : departments) {
groupPrintWriter.print(csv(department));
}
groupPrintWriter.close();
String job = Jobs.defaultJobs().replaceDepartments(groups);
while (true) {
JobResult jobResult = Jobs.defaultJobs().getResult(job);
if (3 == jobResult.getStatus()) {
if (100 == jobResult.getPercentage()) {
Assert.assertTrue(true);
}
break;
} else {
System.out.println("正在同步:" + jobResult.getPercentage());
sleep(100);
}
}
} catch(Exception e) {
e.printStackTrace();
Assert.fail("test failed.");
} finally {
try {
FileUtils.forceDelete(tmpDir);
} catch (IOException e) {
}
}
} | #vulnerable code
@Test
public void testReplaceDepartments(){
List<Department> departments = Departments.defaultDepartments().list();
File tmpDir = Files.createTempDir();
File groups = new File(tmpDir, "departments.csv");
try {
PrintWriter groupPrintWriter = new PrintWriter(new BufferedWriter(new FileWriter(groups, false)));
groupPrintWriter.append("部门名称,部门ID,父部门ID,排序").append("\n");
for (Department department : departments) {
groupPrintWriter.append(csv(department));
}
groupPrintWriter.close();
String job = Jobs.defaultJobs().replaceDepartments(groups);
while (true) {
JobResult jobResult = Jobs.defaultJobs().getResult(job);
if (3 == jobResult.getStatus()) {
if (100 == jobResult.getPercentage()) {
Assert.assertTrue(true);
}
break;
} else {
System.out.println("正在同步:" + jobResult.getPercentage());
sleep(100);
}
}
} catch(Exception e) {
e.printStackTrace();
Assert.fail("test failed.");
} finally {
try {
FileUtils.forceDelete(tmpDir);
} catch (IOException e) {
}
}
}
#location 30
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testReplaceUsers(){
List<ReadUser> allUsers = Users.defaultUsers().list();
File tmpDir = Files.createTempDir();
File users = new File(tmpDir, "users.csv");
try {
PrintWriter userPrintWriter = new PrintWriter(new BufferedWriter(new FileWriter(users, false)));
userPrintWriter.println("姓名,帐号,微信号,手机号,邮箱,所在部门,职位");
for(ReadUser user: allUsers) {
CreateUser createUser = new CreateUser();
createUser.setName(user.getName());
createUser.setMobile(user.getMobile());
createUser.setUserId(user.getUserId());
createUser.setPosition(user.getPosition());
createUser.setWeixinId(user.getWeixinId());
createUser.setDepartment(user.getDepartment());
createUser.setEmail(user.getEmail());
userPrintWriter.print(csv(createUser));
}
userPrintWriter.close();
String job = Jobs.defaultJobs().replaceUsers(users);
while (true) {
JobResult jobResult = Jobs.defaultJobs().getResult(job);
if (3 == jobResult.getStatus()) {
if (100 == jobResult.getPercentage()) {
Assert.assertTrue(true);
}
break;
} else {
System.out.println("正在同步:" + jobResult.getPercentage());
sleep(10);
}
}
} catch(Exception e) {
e.printStackTrace();
Assert.fail("test failed.");
} finally {
try {
FileUtils.forceDelete(tmpDir);
} catch (IOException e) {
}
}
} | #vulnerable code
@Test
public void testReplaceUsers(){
List<ReadUser> allUsers = Users.defaultUsers().list();
File tmpDir = Files.createTempDir();
File users = new File(tmpDir, "users.csv");
try {
PrintWriter userPrintWriter = new PrintWriter(new BufferedWriter(new FileWriter(users, false)));
userPrintWriter.append("姓名,帐号,微信号,手机号,邮箱,所在部门,职位").append("\n");
for(ReadUser user: allUsers) {
CreateUser createUser = new CreateUser();
createUser.setName(user.getName());
createUser.setMobile(user.getMobile());
createUser.setUserId(user.getUserId());
createUser.setPosition(user.getPosition());
createUser.setWeixinId(user.getWeixinId());
createUser.setDepartment(user.getDepartment());
createUser.setEmail(user.getEmail());
userPrintWriter.append(csv(createUser));
}
userPrintWriter.close();
String job = Jobs.defaultJobs().replaceUsers(users);
while (true) {
JobResult jobResult = Jobs.defaultJobs().getResult(job);
if (3 == jobResult.getStatus()) {
if (100 == jobResult.getPercentage()) {
Assert.assertTrue(true);
}
break;
} else {
System.out.println("正在同步:" + jobResult.getPercentage());
sleep(10);
}
}
} catch(Exception e) {
e.printStackTrace();
Assert.fail("test failed.");
} finally {
try {
FileUtils.forceDelete(tmpDir);
} catch (IOException e) {
}
}
}
#location 39
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadMainDict() {
// 建立一个主词典实例
_MainDict = new DictSegment((char) 0);
// 读取主词典文件
InputStream is = this.getClass().getClassLoader().getResourceAsStream(cfg.getMainDictionary());
if (is == null) {
throw new RuntimeException("Main Dictionary not found!!!");
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Main Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
// 加载扩展词典
this.loadExtDict();
} | #vulnerable code
private void loadMainDict() {
//建立一个主词典实例
_MainDict = new DictSegment((char) 0);
//读取主词典文件
InputStream is = this.getClass().getClassLoader().getResourceAsStream(cfg.getMainDictionary());
if (is == null) {
throw new RuntimeException("Main Dictionary not found!!!");
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Main Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
//加载扩展词典
this.loadExtDict();
}
#location 20
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadStopWordDict() {
// 建立一个主词典实例
_StopWordDict = new DictSegment((char) 0);
// 加载扩展停止词典
List<String> extStopWordDictFiles = cfg.getExtStopWordDictionarys();
if (extStopWordDictFiles != null) {
InputStream is;
for (String extStopWordDictName : extStopWordDictFiles) {
System.out.println("加载扩展停止词典:" + extStopWordDictName);
// 读取扩展词典文件
is = this.getClass().getClassLoader().getResourceAsStream(extStopWordDictName);
// 如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
// System.out.println(theWord);
// 加载扩展停止词典数据到内存中
_StopWordDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Extension Stop word Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
} | #vulnerable code
private void loadStopWordDict() {
//建立一个主词典实例
_StopWordDict = new DictSegment((char) 0);
//加载扩展停止词典
List<String> extStopWordDictFiles = cfg.getExtStopWordDictionarys();
if (extStopWordDictFiles != null) {
InputStream is;
for (String extStopWordDictName : extStopWordDictFiles) {
System.out.println("加载扩展停止词典:" + extStopWordDictName);
//读取扩展词典文件
is = this.getClass().getClassLoader().getResourceAsStream(extStopWordDictName);
//如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
//System.out.println(theWord);
//加载扩展停止词典数据到内存中
_StopWordDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Extension Stop word Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
#location 28
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
static UpdateKeeper getInstance() {
return UpdateKeeper.Builder.singleton;
} | #vulnerable code
static UpdateKeeper getInstance() {
if (singleton == null) {
synchronized (UpdateKeeper.class) {
if (singleton == null) {
singleton = new UpdateKeeper();
return singleton;
}
}
}
return singleton;
}
#location 6
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadExtDict() {
// 加载扩展词典配置
List<String> extDictFiles = cfg.getExtDictionarys();
if (extDictFiles != null) {
InputStream is;
for (String extDictName : extDictFiles) {
// 读取扩展词典文件
System.out.println("加载扩展词典:" + extDictName);
is = this.getClass().getClassLoader().getResourceAsStream(extDictName);
// 如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
// 加载扩展词典数据到主内存词典中
// System.out.println(theWord);
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Extension Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
} | #vulnerable code
private void loadExtDict() {
//加载扩展词典配置
List<String> extDictFiles = cfg.getExtDictionarys();
if (extDictFiles != null) {
InputStream is;
for (String extDictName : extDictFiles) {
//读取扩展词典文件
System.out.println("加载扩展词典:" + extDictName);
is = this.getClass().getClassLoader().getResourceAsStream(extDictName);
//如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
//加载扩展词典数据到主内存词典中
//System.out.println(theWord);
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Extension Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
#location 26
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadQuantifierDict() {
// 建立一个量词典实例
_QuantifierDict = new DictSegment((char) 0);
// 读取量词词典文件
InputStream is = this.getClass().getClassLoader().getResourceAsStream(cfg.getQuantifierDicionary());
if (is == null) {
throw new RuntimeException("Quantifier Dictionary not found!!!");
}
try {
readDict(is, _QuantifierDict);
} catch (IOException ioe) {
System.err.println("Quantifier Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
} | #vulnerable code
private void loadQuantifierDict() {
// 建立一个量词典实例
_QuantifierDict = new DictSegment((char) 0);
// 读取量词词典文件
InputStream is = this.getClass().getClassLoader().getResourceAsStream(cfg.getQuantifierDicionary());
if (is == null) {
throw new RuntimeException("Quantifier Dictionary not found!!!");
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_QuantifierDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Quantifier Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
#location 19
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadStopWordDict() {
// 建立一个主词典实例
_StopWordDict = new DictSegment((char) 0);
// 加载扩展停止词典
List<String> extStopWordDictFiles = cfg.getExtStopWordDictionarys();
if (extStopWordDictFiles != null) {
InputStream is;
for (String extStopWordDictName : extStopWordDictFiles) {
System.out.println("Load stopwords dictionary:" + extStopWordDictName);
// 读取扩展词典文件
is = this.getClass().getClassLoader().getResourceAsStream(extStopWordDictName);
// 如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
readDict(is, _StopWordDict);
} catch (IOException ioe) {
System.err.println("Extension Stop word Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
} | #vulnerable code
private void loadStopWordDict() {
// 建立一个主词典实例
_StopWordDict = new DictSegment((char) 0);
// 加载扩展停止词典
List<String> extStopWordDictFiles = cfg.getExtStopWordDictionarys();
if (extStopWordDictFiles != null) {
InputStream is;
for (String extStopWordDictName : extStopWordDictFiles) {
System.out.println("Load stopwords dictionary:" + extStopWordDictName);
// 读取扩展词典文件
is = this.getClass().getClassLoader().getResourceAsStream(extStopWordDictName);
// 如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
// System.out.println(theWord);
// 加载扩展停止词典数据到内存中
_StopWordDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Extension Stop word Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
#location 28
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadQuantifierDict() {
// 建立一个量词典实例
_QuantifierDict = new DictSegment((char) 0);
// 读取量词词典文件
InputStream is = this.getClass().getClassLoader().getResourceAsStream(cfg.getQuantifierDicionary());
if (is == null) {
throw new RuntimeException("Quantifier Dictionary not found!!!");
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_QuantifierDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Quantifier Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
} | #vulnerable code
private void loadQuantifierDict() {
//建立一个量词典实例
_QuantifierDict = new DictSegment((char) 0);
//读取量词词典文件
InputStream is = this.getClass().getClassLoader().getResourceAsStream(cfg.getQuantifierDicionary());
if (is == null) {
throw new RuntimeException("Quantifier Dictionary not found!!!");
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_QuantifierDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Quantifier Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
#location 19
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private void loadExtDict() {
// 加载扩展词典配置
List<String> extDictFiles = cfg.getExtDictionarys();
if (extDictFiles != null) {
InputStream is;
for (String extDictName : extDictFiles) {
// 读取扩展词典文件
System.out.println("Load extended dictionary:" + extDictName);
is = this.getClass().getClassLoader().getResourceAsStream(extDictName);
// 如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
readDict(is, _MainDict);
} catch (IOException ioe) {
System.err.println("Extension Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
} | #vulnerable code
private void loadExtDict() {
// 加载扩展词典配置
List<String> extDictFiles = cfg.getExtDictionarys();
if (extDictFiles != null) {
InputStream is;
for (String extDictName : extDictFiles) {
// 读取扩展词典文件
System.out.println("Load extended dictionary:" + extDictName);
is = this.getClass().getClassLoader().getResourceAsStream(extDictName);
// 如果找不到扩展的字典,则忽略
if (is == null) {
continue;
}
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
// 加载扩展词典数据到主内存词典中
// System.out.println(theWord);
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Extension Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
#location 26
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static void reloadDic(List<InputStream> inputStreamList) {
// 如果本类单例尚未实例化,则先进行初始化操作
if (singleton == null) {
Configuration cfg = DefaultConfig.getInstance();
initial(cfg);
}
// 对词典流集合进行循环读取,将读取到的词语加载到主词典中
for (InputStream is : inputStreamList) {
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
singleton._MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Other Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
if (is != null) {
is.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | #vulnerable code
public static void reloadDic(List<InputStream> inputStreamList) {
if (singleton == null) {
Configuration cfg = DefaultConfig.getInstance();
initial(cfg);
}
for (InputStream is : inputStreamList) {
try {
BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
String theWord;
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
singleton._MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
}
} while (theWord != null);
} catch (IOException ioe) {
System.err.println("Other Dictionary loading exception.");
ioe.printStackTrace();
} finally {
try {
if (is != null) {
is.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
#location 16
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public List<DiscoveryNode> buildDynamicNodes() {
if (refreshInterval.millis() != 0) {
if (cachedDiscoNodes != null &&
(refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list");
return cachedDiscoNodes;
}
lastRefresh = System.currentTimeMillis();
}
logger.debug("start building nodes list using GCE API");
cachedDiscoNodes = Lists.newArrayList();
String ipAddress = null;
try {
InetAddress inetAddress = networkService.resolvePublishHostAddress(null);
if (inetAddress != null) {
ipAddress = inetAddress.getHostAddress();
}
} catch (IOException e) {
// We can't find the publish host address... Hmmm. Too bad :-(
// We won't simply filter it
}
try {
Collection<Instance> instances = gceComputeService.instances();
for (Instance instance : instances) {
String name = instance.getName();
String type = instance.getMachineType();
String image = instance.getImage();
String status = instance.getStatus();
logger.trace("gce instance {} with status {} found.", name, status);
// We don't want to connect to TERMINATED status instances
// See https://github.com/elasticsearch/elasticsearch-cloud-gce/issues/3
if (Status.TERMINATED.equals(status)) {
logger.debug("node {} is TERMINATED. Ignoring", name);
continue;
}
// see if we need to filter by tag
boolean filterByTag = false;
if (tags.length > 0) {
logger.trace("start filtering instance {} with tags {}.", name, tags);
if (instance.getTags() == null || instance.getTags().isEmpty()) {
// If this instance have no tag, we filter it
logger.trace("no tags for this instance but we asked for tags. {} won't be part of the cluster.", name);
filterByTag = true;
} else {
// check that all tags listed are there on the instance
logger.trace("comparing instance tags {} with tags filter {}.", instance.getTags().getItems(), tags);
for (String tag : tags) {
boolean found = false;
for (String instancetag : instance.getTags().getItems()) {
if (instancetag.equals(tag)) {
found = true;
break;
}
}
if (!found) {
filterByTag = true;
break;
}
}
}
}
if (filterByTag) {
logger.trace("*** filtering out instance {} based tags {}, not part of {}", name, tags,
instance.getTags() == null ? "" : instance.getTags().getItems());
continue;
} else {
logger.trace("*** instance {} with tags {} is added to discovery", name, tags);
}
String ip_public = null;
String ip_private = null;
List<NetworkInterface> interfaces = instance.getNetworkInterfaces();
for (NetworkInterface networkInterface : interfaces) {
if (ip_public == null) {
// Trying to get Public IP Address (For future use)
if (networkInterface.getAccessConfigs() != null) {
for (AccessConfig accessConfig : networkInterface.getAccessConfigs()) {
if (Strings.hasText(accessConfig.getNatIP())) {
ip_public = accessConfig.getNatIP();
break;
}
}
}
}
if (ip_private == null) {
ip_private = networkInterface.getNetworkIP();
}
// If we have both public and private, we can stop here
if (ip_private != null && ip_public != null) break;
}
try {
if (ip_private.equals(ipAddress)) {
// We found the current node.
// We can ignore it in the list of DiscoveryNode
logger.trace("current node found. Ignoring {} - {}", name, ip_private);
} else {
TransportAddress[] addresses = transportService.addressesFromString(ip_private);
// we only limit to 1 addresses, makes no sense to ping 100 ports
for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) {
logger.trace("adding {}, type {}, image {}, address {}, transport_address {}, status {}", name, type,
image, ip_private, addresses[i], status);
cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + name + "-" + i, addresses[i]));
}
}
} catch (Exception e) {
logger.warn("failed to add {}, address {}", e, name, ip_private);
}
}
} catch (Throwable e) {
logger.warn("Exception caught during discovery {} : {}", e.getClass().getName(), e.getMessage());
logger.trace("Exception caught during discovery", e);
}
logger.debug("using dynamic discovery nodes {}", cachedDiscoNodes);
return cachedDiscoNodes;
} | #vulnerable code
@Override
public List<DiscoveryNode> buildDynamicNodes() {
if (refreshInterval.millis() != 0) {
if (cachedDiscoNodes != null &&
(refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list");
return cachedDiscoNodes;
}
lastRefresh = System.currentTimeMillis();
}
logger.debug("start building nodes list using GCE API");
cachedDiscoNodes = Lists.newArrayList();
String ipAddress = null;
try {
InetAddress inetAddress = networkService.resolvePublishHostAddress(null);
if (inetAddress != null) {
ipAddress = inetAddress.getHostAddress();
}
} catch (IOException e) {
// We can't find the publish host address... Hmmm. Too bad :-(
// We won't simply filter it
}
try {
Collection<Instance> instances = gceComputeService.instances();
for (Instance instance : instances) {
String name = instance.getName();
String type = instance.getMachineType();
String image = instance.getImage();
String status = instance.getStatus();
// We don't want to connect to TERMINATED status instances
// See https://github.com/elasticsearch/elasticsearch-cloud-gce/issues/3
if (Status.TERMINATED.equals(status)) {
logger.debug("node {} is TERMINATED. Ignoring", name);
continue;
}
// see if we need to filter by tag
boolean filterByTag = false;
if (tags.length > 0) {
if (instance.getTags() == null || instance.getTags().isEmpty()) {
// If this instance have no tag, we filter it
filterByTag = true;
} else {
// check that all tags listed are there on the instance
for (String tag : tags) {
boolean found = false;
for (String instancetag : instance.getTags().getItems()) {
if (instancetag.equals(tag)) {
found = true;
break;
}
}
if (!found) {
filterByTag = true;
break;
}
}
}
}
if (filterByTag) {
logger.trace("filtering out instance {} based tags {}, not part of {}", name, tags,
instance.getTags().getItems());
continue;
}
String ip_public = null;
String ip_private = null;
List<NetworkInterface> interfaces = instance.getNetworkInterfaces();
for (NetworkInterface networkInterface : interfaces) {
if (ip_public == null) {
// Trying to get Public IP Address (For future use)
for (AccessConfig accessConfig : networkInterface.getAccessConfigs()) {
if (Strings.hasText(accessConfig.getNatIP())) {
ip_public = accessConfig.getNatIP();
break;
}
}
}
if (ip_private == null) {
ip_private = networkInterface.getNetworkIP();
}
// If we have both public and private, we can stop here
if (ip_private != null && ip_public != null) break;
}
try {
if (ip_private.equals(ipAddress)) {
// We found the current node.
// We can ignore it in the list of DiscoveryNode
logger.trace("current node found. Ignoring {} - {}", name, ip_private);
} else {
TransportAddress[] addresses = transportService.addressesFromString(ip_private);
// we only limit to 1 addresses, makes no sense to ping 100 ports
for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) {
logger.trace("adding {}, type {}, image {}, address {}, transport_address {}, status {}", name, type,
image, ip_private, addresses[i], status);
cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + name + "-" + i, addresses[i]));
}
}
} catch (Exception e) {
logger.warn("failed to add {}, address {}", e, name, ip_private);
}
}
} catch (Throwable e) {
logger.warn("Exception caught during discovery {} : {}", e.getClass().getName(), e.getMessage());
logger.trace("Exception caught during discovery", e);
}
logger.debug("using dynamic discovery nodes {}", cachedDiscoNodes);
return cachedDiscoNodes;
}
#location 67
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 30
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 19
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void harvest(IceMediaStream mediaStream, PortManager portManager, Selector selector) throws HarvestException, NoCandidatesGatheredException {
// Ask each harvester to gather candidates for the media stream
// HOST candidates take precedence and are mandatory
CandidateHarvester hostHarvester = harvesters.get(CandidateType.HOST);
if (hostHarvester != null) {
hostHarvester.harvest(portManager, mediaStream, selector);
} else {
throw new HarvestException("No HOST harvester registered!");
}
// Then comes the SRFLX, which depends on HOST candidates
CandidateHarvester srflxHarvester = harvesters.get(CandidateType.SRFLX);
if (srflxHarvester != null) {
srflxHarvester.harvest(portManager, mediaStream, selector);
}
// RELAY candidates come last
CandidateHarvester relayHarvester = harvesters.get(CandidateType.RELAY);
if (relayHarvester != null) {
relayHarvester.harvest(portManager, mediaStream, selector);
}
// Verify at least one candidate was gathered
if (!mediaStream.hasLocalRtpCandidates()) {
throw new NoCandidatesGatheredException("No RTP candidates were gathered for " + mediaStream.getName() + " stream");
}
// After harvesting all possible candidates, ask the media stream to
// select its default local candidates
mediaStream.getRtpComponent().selectDefaultLocalCandidate();
if (mediaStream.supportsRtcp() && !mediaStream.isRtcpMux()) {
mediaStream.getRtcpComponent().selectDefaultLocalCandidate();
}
} | #vulnerable code
public void harvest(IceMediaStream mediaStream, PortManager portManager, Selector selector) throws HarvestException, NoCandidatesGatheredException {
// Safe copy of currently registered harvesters
Map<CandidateType, CandidateHarvester> copy;
synchronized (this.harvesters) {
copy = new HashMap<CandidateType, CandidateHarvester>(this.harvesters);
}
// Ask each harvester to gather candidates for the media stream
// HOST candidates take precedence and are mandatory
CandidateHarvester hostHarvester = copy.get(CandidateType.HOST);
if (hostHarvester != null) {
hostHarvester.harvest(portManager, mediaStream, selector);
} else {
throw new HarvestException("No HOST harvester registered!");
}
// Then comes the SRFLX, which depends on HOST candidates
CandidateHarvester srflxHarvester = copy.get(CandidateType.SRFLX);
if (srflxHarvester != null) {
srflxHarvester.harvest(portManager, mediaStream, selector);
}
// RELAY candidates come last
CandidateHarvester relayHarvester = copy.get(CandidateType.RELAY);
if (relayHarvester != null) {
relayHarvester.harvest(portManager, mediaStream, selector);
}
// Verify at least one candidate was gathered
if (!mediaStream.hasLocalRtpCandidates()) {
throw new NoCandidatesGatheredException("No RTP candidates were gathered for " + mediaStream.getName() + " stream");
}
// After harvesting all possible candidates, ask the media stream to
// select its default local candidates
mediaStream.getRtpComponent().selectDefaultLocalCandidate();
if (mediaStream.supportsRtcp() && !mediaStream.isRtcpMux()) {
mediaStream.getRtcpComponent().selectDefaultLocalCandidate();
}
}
#location 34
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void leaveRtpSession() {
if (this.joined.get()) {
this.joined.set(false);
/*
* When the participant decides to leave the system, tp is reset to tc, the current time, members and pmembers are
* initialized to 1, initial is set to 1, we_sent is set to false, senders is set to 0, and avg_rtcp_size is set to
* the size of the compound BYE packet.
*
* The calculated interval T is computed. The BYE packet is then scheduled for time tn = tc + T.
*/
this.tp = this.statistics.getCurrentTime();
this.statistics.resetMembers();
this.initial.set(true);
this.statistics.clearSenders();
// XXX Sending the BYE packet NOW, since channel will be closed - hrosa
// long t = this.statistics.rtcpInterval(initial);
// this.tn = resolveDelay(t);
// this.scheduleRtcp(this.tn, RtcpPacketType.RTCP_BYE);
// cancel scheduled task and schedule BYE now
if(this.reportTaskFuture != null) {
this.reportTaskFuture.cancel(true);
}
// Send BYE
// Do not run in separate thread so channel can be properly closed by the owner of this handler
this.statistics.setRtcpPacketType(RtcpPacketType.RTCP_BYE);
this.scheduledTask = new TxTask(RtcpPacketType.RTCP_BYE);
this.scheduledTask.run();
}
} | #vulnerable code
public void leaveRtpSession() {
if (this.joined.get()) {
this.joined.set(false);
/*
* When the participant decides to leave the system, tp is reset to tc, the current time, members and pmembers are
* initialized to 1, initial is set to 1, we_sent is set to false, senders is set to 0, and avg_rtcp_size is set to
* the size of the compound BYE packet.
*
* The calculated interval T is computed. The BYE packet is then scheduled for time tn = tc + T.
*/
this.tp = this.statistics.getCurrentTime();
this.statistics.resetMembers();
this.initial.set(true);
this.statistics.clearSenders();
// XXX Sending the BYE packet NOW, since channel will be closed - hrosa
// long t = this.statistics.rtcpInterval(initial);
// this.tn = resolveDelay(t);
// this.scheduleRtcp(this.tn, RtcpPacketType.RTCP_BYE);
// cancel scheduled task and schedule BYE now
if(this.reportTaskFuture != null) {
this.reportTaskFuture.cancel(true);
}
scheduleNow(RtcpPacketType.RTCP_BYE);
}
}
#location 26
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 17
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testCodec() throws Exception {
boolean testPassed = false;
try {
OpusJni opus = new OpusJni();
opus.initNative();
final int packetSize = 480;
File outputFile = File.createTempFile("opustest", ".tmp");
byte[] output = new byte[2 * packetSize];
try (FileInputStream inputStream = new FileInputStream("src\\test\\resources\\test_sound_mono_48.pcm");
FileOutputStream outputStream = new FileOutputStream(outputFile, false)) {
byte[] input = new byte[packetSize];
short[] inputData = new short[packetSize];
while (inputStream.read(input) == 2 * packetSize) {
ByteBuffer.wrap(input).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(inputData);
byte[] encodedData = opus.encodeNative(inputData);
short[] decodedData = opus.decodeNative(encodedData);
ByteBuffer.wrap(output).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(decodedData);
outputStream.write(output);
}
testPassed = true;
}
opus.closeNative();
outputFile.delete();
} catch (IOException exc) {
log.error("IOException: " + exc.getMessage());
fail("Opus test file access error");
}
assertTrue(testPassed);
} | #vulnerable code
@Test
public void testCodec() throws Exception {
boolean testPassed = false;
try {
final int packetSize = 480;
File outputFile = File.createTempFile("opustest", ".tmp");
FileInputStream inputStream = new FileInputStream("src\\test\\resources\\test_sound_mono_48.pcm");
FileOutputStream outputStream = new FileOutputStream(outputFile, false);
OpusJni opus = new OpusJni();
opus.initNative();
try {
byte[] input = new byte[packetSize];
short[] inputData = new short[packetSize];
byte[] output = new byte[2 * packetSize];
while (inputStream.read(input) == 2 * packetSize) {
ByteBuffer.wrap(input).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(inputData);
byte[] encodedData = opus.encodeNative(inputData);
short[] decodedData = opus.decodeNative(encodedData);
ByteBuffer.wrap(output).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(decodedData);
outputStream.write(output);
}
testPassed = true;
} finally {
inputStream.close();
outputStream.close();
opus.closeNative();
outputFile.delete();
}
} catch (IOException exc) {
log.error("IOException: " + exc.getMessage());
fail("Opus test file access error");
}
assertTrue(testPassed);
}
#location 33
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void harvest(IceMediaStream mediaStream, PortManager portManager, Selector selector) throws HarvestException, NoCandidatesGatheredException {
// Ask each harvester to gather candidates for the media stream
// HOST candidates take precedence and are mandatory
CandidateHarvester hostHarvester = harvesters.get(CandidateType.HOST);
if (hostHarvester != null) {
hostHarvester.harvest(portManager, mediaStream, selector);
} else {
throw new HarvestException("No HOST harvester registered!");
}
// Then comes the SRFLX, which depends on HOST candidates
CandidateHarvester srflxHarvester = harvesters.get(CandidateType.SRFLX);
if (srflxHarvester != null) {
srflxHarvester.harvest(portManager, mediaStream, selector);
}
// RELAY candidates come last
CandidateHarvester relayHarvester = harvesters.get(CandidateType.RELAY);
if (relayHarvester != null) {
relayHarvester.harvest(portManager, mediaStream, selector);
}
// Verify at least one candidate was gathered
if (!mediaStream.hasLocalRtpCandidates()) {
throw new NoCandidatesGatheredException("No RTP candidates were gathered for " + mediaStream.getName() + " stream");
}
// After harvesting all possible candidates, ask the media stream to
// select its default local candidates
mediaStream.getRtpComponent().selectDefaultLocalCandidate();
if (mediaStream.supportsRtcp() && !mediaStream.isRtcpMux()) {
mediaStream.getRtcpComponent().selectDefaultLocalCandidate();
}
} | #vulnerable code
public void harvest(IceMediaStream mediaStream, PortManager portManager, Selector selector) throws HarvestException, NoCandidatesGatheredException {
// Safe copy of currently registered harvesters
Map<CandidateType, CandidateHarvester> copy;
synchronized (this.harvesters) {
copy = new HashMap<CandidateType, CandidateHarvester>(this.harvesters);
}
// Ask each harvester to gather candidates for the media stream
// HOST candidates take precedence and are mandatory
CandidateHarvester hostHarvester = copy.get(CandidateType.HOST);
if (hostHarvester != null) {
hostHarvester.harvest(portManager, mediaStream, selector);
} else {
throw new HarvestException("No HOST harvester registered!");
}
// Then comes the SRFLX, which depends on HOST candidates
CandidateHarvester srflxHarvester = copy.get(CandidateType.SRFLX);
if (srflxHarvester != null) {
srflxHarvester.harvest(portManager, mediaStream, selector);
}
// RELAY candidates come last
CandidateHarvester relayHarvester = copy.get(CandidateType.RELAY);
if (relayHarvester != null) {
relayHarvester.harvest(portManager, mediaStream, selector);
}
// Verify at least one candidate was gathered
if (!mediaStream.hasLocalRtpCandidates()) {
throw new NoCandidatesGatheredException("No RTP candidates were gathered for " + mediaStream.getName() + " stream");
}
// After harvesting all possible candidates, ask the media stream to
// select its default local candidates
mediaStream.getRtpComponent().selectDefaultLocalCandidate();
if (mediaStream.supportsRtcp() && !mediaStream.isRtcpMux()) {
mediaStream.getRtcpComponent().selectDefaultLocalCandidate();
}
}
#location 36
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 16
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 35
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 23
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public Frame read(long timestamp) {
try {
LOCK.lock();
if (queue.size() == 0) {
this.ready = false;
return null;
}
//extract packet
Frame frame = queue.remove(0);
//buffer empty now? - change ready flag.
if (queue.size() == 0) {
this.ready = false;
//arrivalDeadLine = 0;
//set it as 1 ms since otherwise will be dropped by pipe
frame.setDuration(1);
}
arrivalDeadLine = rtpClock.convertToRtpTime(frame.getTimestamp() + frame.getDuration());
//convert duration to nanoseconds
frame.setDuration(frame.getDuration() * 1000000L);
frame.setTimestamp(frame.getTimestamp() * 1000000L);
return frame;
} finally {
LOCK.unlock();
}
} | #vulnerable code
public Frame read(long timestamp) {
try {
if (queue.size() == 0) {
this.ready = false;
return null;
}
//extract packet
Frame frame = queue.remove(0);
//buffer empty now? - change ready flag.
if (queue.size() == 0) {
this.ready = false;
//arrivalDeadLine = 0;
//set it as 1 ms since otherwise will be dropped by pipe
frame.setDuration(1);
}
arrivalDeadLine = rtpClock.convertToRtpTime(frame.getTimestamp() + frame.getDuration());
//convert duration to nanoseconds
frame.setDuration(frame.getDuration() * 1000000L);
frame.setTimestamp(frame.getTimestamp() * 1000000L);
return frame;
} finally {
LOCK.unlock();
}
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 109
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 33
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 9
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 52
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 29
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public String open(String sdp) throws MgcpConnectionException {
synchronized (this.stateLock) {
switch (this.state) {
case CLOSED:
case HALF_OPEN:
// Update state
this.state = MgcpConnectionState.OPEN;
// Parse remote SDP
try {
this.remoteSdp = SessionDescriptionParser.parse(sdp);
} catch (SdpException e) {
throw new MgcpConnectionException(e.getMessage(), e);
}
// Open connection
openConnection();
if(log.isDebugEnabled()) {
log.debug("Connection " + getHexIdentifier() + " state is " + this.state.name());
}
// Submit timer
if (this.timeout > 0) {
expireIn(this.timeout);
}
break;
default:
throw new MgcpConnectionException("Cannot open connection " + this.getHexIdentifier() + " because state is " + this.state.name());
}
}
return this.localSdp.toString();
} | #vulnerable code
@Override
public String open(String sdp) throws MgcpConnectionException {
synchronized (this.stateLock) {
switch (this.state) {
case CLOSED:
case HALF_OPEN:
// Update state
this.state = MgcpConnectionState.OPEN;
// Parse remote SDP
try {
this.remoteSdp = SessionDescriptionParser.parse(sdp);
} catch (SdpException e) {
throw new MgcpConnectionException(e.getMessage(), e);
}
// Open connection
openConnection();
if(log.isDebugEnabled()) {
log.debug("Connection " + getHexIdentifier() + " state is " + this.state.name());
}
// Submit timer
if (this.timeout > 0) {
expireIn(this.timeout);
}
break;
default:
throw new MgcpConnectionException(
"Cannot open connection " + this.getHexIdentifier() + " because state is " + this.state.name());
}
}
return this.localSdp.toString();
}
#location 35
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 34
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 15
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Test
public void testCodec() throws Exception {
boolean testPassed = false;
try {
OpusJni opus = new OpusJni();
opus.initNative();
final int packetSize = 480;
File outputFile = File.createTempFile("opustest", ".tmp");
byte[] output = new byte[2 * packetSize];
try (FileInputStream inputStream = new FileInputStream("src\\test\\resources\\test_sound_mono_48.pcm");
FileOutputStream outputStream = new FileOutputStream(outputFile, false)) {
byte[] input = new byte[packetSize];
short[] inputData = new short[packetSize];
while (inputStream.read(input) == 2 * packetSize) {
ByteBuffer.wrap(input).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(inputData);
byte[] encodedData = opus.encodeNative(inputData);
short[] decodedData = opus.decodeNative(encodedData);
ByteBuffer.wrap(output).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(decodedData);
outputStream.write(output);
}
testPassed = true;
}
opus.closeNative();
outputFile.delete();
} catch (IOException exc) {
log.error("IOException: " + exc.getMessage());
fail("Opus test file access error");
}
assertTrue(testPassed);
} | #vulnerable code
@Test
public void testCodec() throws Exception {
boolean testPassed = false;
try {
final int packetSize = 480;
File outputFile = File.createTempFile("opustest", ".tmp");
FileInputStream inputStream = new FileInputStream("src\\test\\resources\\test_sound_mono_48.pcm");
FileOutputStream outputStream = new FileOutputStream(outputFile, false);
OpusJni opus = new OpusJni();
opus.initNative();
try {
byte[] input = new byte[packetSize];
short[] inputData = new short[packetSize];
byte[] output = new byte[2 * packetSize];
while (inputStream.read(input) == 2 * packetSize) {
ByteBuffer.wrap(input).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(inputData);
byte[] encodedData = opus.encodeNative(inputData);
short[] decodedData = opus.decodeNative(encodedData);
ByteBuffer.wrap(output).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(decodedData);
outputStream.write(output);
}
testPassed = true;
} finally {
inputStream.close();
outputStream.close();
opus.closeNative();
outputFile.delete();
}
} catch (IOException exc) {
log.error("IOException: " + exc.getMessage());
fail("Opus test file access error");
}
assertTrue(testPassed);
}
#location 33
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public Frame read(long timestamp) {
try {
LOCK.lock();
if (queue.size() == 0) {
this.ready = false;
return null;
}
//extract packet
Frame frame = queue.remove(0);
//buffer empty now? - change ready flag.
if (queue.size() == 0) {
this.ready = false;
//arrivalDeadLine = 0;
//set it as 1 ms since otherwise will be dropped by pipe
frame.setDuration(1);
}
arrivalDeadLine = rtpClock.convertToRtpTime(frame.getTimestamp() + frame.getDuration());
//convert duration to nanoseconds
frame.setDuration(frame.getDuration() * 1000000L);
frame.setTimestamp(frame.getTimestamp() * 1000000L);
return frame;
} finally {
LOCK.unlock();
}
} | #vulnerable code
public Frame read(long timestamp) {
try {
if (queue.size() == 0) {
this.ready = false;
return null;
}
//extract packet
Frame frame = queue.remove(0);
//buffer empty now? - change ready flag.
if (queue.size() == 0) {
this.ready = false;
//arrivalDeadLine = 0;
//set it as 1 ms since otherwise will be dropped by pipe
frame.setDuration(1);
}
arrivalDeadLine = rtpClock.convertToRtpTime(frame.getTimestamp() + frame.getDuration());
//convert duration to nanoseconds
frame.setDuration(frame.getDuration() * 1000000L);
frame.setTimestamp(frame.getTimestamp() * 1000000L);
return frame;
} finally {
LOCK.unlock();
}
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
@Override
public void stop() {
while (buffer.size() > 0) {
Frame frame = buffer.poll();
if(frame != null) {
frame.recycle();
}
}
super.stop();
} | #vulnerable code
@Override
public void stop() {
while (buffer.size() > 0) {
buffer.poll().recycle();
}
super.stop();
}
#location 4
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 30
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 14
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 41
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void write(RtpPacket packet, RTPFormat format) {
try {
LOCK.lock();
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
} finally {
LOCK.unlock();
}
} | #vulnerable code
public void write(RtpPacket packet, RTPFormat format) {
// checking format
if (format == null) {
logger.warn("No format specified. Packet dropped!");
return;
}
if (this.format == null || this.format.getID() != format.getID()) {
this.format = format;
logger.info("Format has been changed: " + this.format.toString());
}
// if this is first packet then synchronize clock
if (isn == -1) {
rtpClock.synchronize(packet.getTimestamp());
isn = packet.getSeqNumber();
initJitter(packet);
} else {
estimateJitter(packet);
}
// update clock rate
rtpClock.setClockRate(this.format.getClockRate());
// drop outstanding packets
// packet is outstanding if its timestamp of arrived packet is less
// then consumer media time
if (packet.getTimestamp() < this.arrivalDeadLine) {
logger.warn("drop packet: dead line=" + arrivalDeadLine + ", packet time=" + packet.getTimestamp() + ", seq=" + packet.getSeqNumber() + ", payload length=" + packet.getPayloadLength() + ", format=" + this.format.toString());
dropCount++;
// checking if not dropping too much
droppedInRaw++;
if (droppedInRaw == QUEUE_SIZE / 2 || queue.size() == 0) {
arrivalDeadLine = 0;
} else {
return;
}
}
Frame f = Memory.allocate(packet.getPayloadLength());
// put packet into buffer irrespective of its sequence number
f.setHeader(null);
f.setSequenceNumber(packet.getSeqNumber());
// here time is in milliseconds
f.setTimestamp(rtpClock.convertToAbsoluteTime(packet.getTimestamp()));
f.setOffset(0);
f.setLength(packet.getPayloadLength());
packet.getPayload(f.getData(), 0);
// set format
f.setFormat(this.format.getFormat());
// make checks only if have packet
if (f != null) {
LOCK.lock();
droppedInRaw = 0;
// find correct position to insert a packet
// use timestamp since its always positive
int currIndex = queue.size() - 1;
while (currIndex >= 0 && queue.get(currIndex).getTimestamp() > f.getTimestamp()) {
currIndex--;
}
// check for duplicate packet
if (currIndex >= 0 && queue.get(currIndex).getSequenceNumber() == f.getSequenceNumber()) {
LOCK.unlock();
return;
}
queue.add(currIndex + 1, f);
// recalculate duration of each frame in queue and overall duration
// since we could insert the frame in the middle of the queue
duration = 0;
if (queue.size() > 1) {
duration = queue.get(queue.size() - 1).getTimestamp() - queue.get(0).getTimestamp();
}
for (int i = 0; i < queue.size() - 1; i++) {
// duration measured by wall clock
long d = queue.get(i + 1).getTimestamp() - queue.get(i).getTimestamp();
// in case of RFC2833 event timestamp remains same
queue.get(i).setDuration(d > 0 ? d : 0);
}
// if overall duration is negative we have some mess here,try to
// reset
if (duration < 0 && queue.size() > 1) {
logger.warn("Something messy happened. Reseting jitter buffer!");
reset();
LOCK.unlock();
return;
}
// overflow?
// only now remove packet if overflow , possibly the same packet we just received
if (queue.size() > QUEUE_SIZE) {
logger.warn("Buffer overflow!");
dropCount++;
queue.remove(0).recycle();
}
LOCK.unlock();
// check if this buffer already full
if (!ready) {
ready = !useBuffer || (duration >= jitterBufferSize && queue.size() > 1);
if (ready && listener != null) {
listener.onFill();
}
}
}
}
#location 111
#vulnerability type THREAD_SAFETY_VIOLATION | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
private byte[] processRequest(StunRequest request, InetSocketAddress localPeer, InetSocketAddress remotePeer) throws IOException {
/*
* The agent MUST use a short-term credential to authenticate the
* request and perform a message integrity check.
*/
// Produce Binding Response
TransportAddress transportAddress = new TransportAddress(remotePeer.getAddress(), remotePeer.getPort(), TransportProtocol.UDP);
StunResponse response = StunMessageFactory.createBindingResponse(request, transportAddress);
byte[] transactionID = request.getTransactionId();
try {
response.setTransactionID(transactionID);
} catch (StunException e) {
throw new IOException("Illegal STUN Transaction ID: " + new String(transactionID), e);
}
UsernameAttribute remoteUnameAttribute = (UsernameAttribute) request.getAttribute(StunAttribute.USERNAME);
// Send binding error response if username is null
if (remoteUnameAttribute.getUsername()== null) {
response.setMessageType(StunMessage.BINDING_ERROR_RESPONSE);
response.addAttribute(StunAttributeFactory.createErrorCodeAttribute(ErrorCodeAttribute.BAD_REQUEST,
ErrorCodeAttribute.getDefaultReasonPhrase(ErrorCodeAttribute.BAD_REQUEST)));
return response.encode();
}
String remoteUsername = new String(remoteUnameAttribute.getUsername());
/*
* The agent MUST consider the username to be valid if it consists of
* two values separated by a colon, where the first value is equal to
* the username fragment generated by the agent in an offer or answer
* for a session in-progress.
*/
if(!this.iceAuthenticator.validateUsername(remoteUsername)) {
// TODO return error response
throw new IOException("Invalid username "+ remoteUsername);
}
/*
* The username for the credential is formed by concatenating the
* username fragment provided by the peer with the username fragment of
* the agent sending the request, separated by a colon (":").
*/
int colon = remoteUsername.indexOf(":");
String localUFrag = remoteUsername.substring(0, colon);
String remoteUfrag = remoteUsername.substring(colon);
/*
* An agent MUST include the PRIORITY attribute in its Binding request.
* This priority value will be computed identically to how the priority
* for the local candidate of the pair was computed, except that the
* type preference is set to the value for peer reflexive candidate
* types
*/
long priority = extractPriority(request);
/*
* Add USERNAME and MESSAGE-INTEGRITY attribute in the response. The
* responses utilize the same usernames and passwords as the requests
*/
String localUsername = remoteUfrag.concat(":").concat(localUFrag);
StunAttribute unameAttribute = StunAttributeFactory.createUsernameAttribute(localUsername);
response.addAttribute(unameAttribute);
byte[] localKey = this.iceAuthenticator.getLocalKey(localUFrag);
MessageIntegrityAttribute messageIntegrityAttribute = StunAttributeFactory.createMessageIntegrityAttribute(remoteUsername, localKey);
response.addAttribute(messageIntegrityAttribute);
// If the client issues a USE-CANDIDATE, tell ICE Agent to select the candidate
if (request.containsAttribute(StunAttribute.USE_CANDIDATE)) {
fireStunBindingEvent(localPeer, remotePeer);
}
// Pass response to the server
return response.encode();
} | #vulnerable code
private byte[] processRequest(StunRequest request, InetSocketAddress localPeer, InetSocketAddress remotePeer) throws IOException {
/*
* The agent MUST use a short-term credential to authenticate the
* request and perform a message integrity check.
*/
// Produce Binding Response
TransportAddress transportAddress = new TransportAddress(remotePeer.getAddress(), remotePeer.getPort(), TransportProtocol.UDP);
StunResponse response = StunMessageFactory.createBindingResponse(request, transportAddress);
byte[] transactionID = request.getTransactionId();
try {
response.setTransactionID(transactionID);
} catch (StunException e) {
throw new IOException("Illegal STUN Transaction ID: " + new String(transactionID), e);
}
UsernameAttribute remoteUnameAttribute;
String remoteUsername;
// Send binding error response if username is null
try {
remoteUnameAttribute = (UsernameAttribute) request.getAttribute(StunAttribute.USERNAME);
remoteUsername = new String(remoteUnameAttribute.getUsername());
}
catch(NullPointerException nullPointer) {
response.setMessageType(StunMessage.BINDING_ERROR_RESPONSE);
response.addAttribute(StunAttributeFactory.createErrorCodeAttribute(ErrorCodeAttribute.BAD_REQUEST,
ErrorCodeAttribute.getDefaultReasonPhrase(ErrorCodeAttribute.BAD_REQUEST)));
return response.encode();
}
/*
* The agent MUST consider the username to be valid if it consists of
* two values separated by a colon, where the first value is equal to
* the username fragment generated by the agent in an offer or answer
* for a session in-progress.
*/
if(!this.iceAuthenticator.validateUsername(remoteUsername)) {
// TODO return error response
throw new IOException("Invalid username "+ remoteUsername);
}
/*
* The username for the credential is formed by concatenating the
* username fragment provided by the peer with the username fragment of
* the agent sending the request, separated by a colon (":").
*/
int colon = remoteUsername.indexOf(":");
String localUFrag = remoteUsername.substring(0, colon);
String remoteUfrag = remoteUsername.substring(colon);
/*
* An agent MUST include the PRIORITY attribute in its Binding request.
* This priority value will be computed identically to how the priority
* for the local candidate of the pair was computed, except that the
* type preference is set to the value for peer reflexive candidate
* types
*/
long priority = extractPriority(request);
/*
* Add USERNAME and MESSAGE-INTEGRITY attribute in the response. The
* responses utilize the same usernames and passwords as the requests
*/
String localUsername = remoteUfrag.concat(":").concat(localUFrag);
StunAttribute unameAttribute = StunAttributeFactory.createUsernameAttribute(localUsername);
response.addAttribute(unameAttribute);
byte[] localKey = this.iceAuthenticator.getLocalKey(localUFrag);
MessageIntegrityAttribute messageIntegrityAttribute = StunAttributeFactory.createMessageIntegrityAttribute(remoteUsername, localKey);
response.addAttribute(messageIntegrityAttribute);
// If the client issues a USE-CANDIDATE, tell ICE Agent to select the candidate
if (request.containsAttribute(StunAttribute.USE_CANDIDATE)) {
fireStunBindingEvent(localPeer, remotePeer);
}
// Pass response to the server
return response.encode();
}
#location 22
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public void train(String dataFile, int maxite, float c) throws IOException {
fp = File.createTempFile("train-features", null, new File("./tmp/"));
buildInstanceList(dataFile);
LabelAlphabet postagAlphabet = factory.buildLabelAlphabet("postag");
SFGenerator generator = new SFGenerator();
Linear[] models = new Linear[postagAlphabet.size()];
for (int i = 0; i < postagAlphabet.size(); i++) {
String pos = postagAlphabet.lookupString(i);
InstanceSet instset = readInstanceSet(pos);
LabelAlphabet alphabet = factory.buildLabelAlphabet(pos);
int ysize = alphabet.size();
System.out.printf("Training with data: %s\n", pos);
System.out.printf("Number of labels: %d\n", ysize);
LinearMax solver = new LinearMax(generator, ysize);
ZeroOneLoss loss = new ZeroOneLoss();
Update update = new LinearMaxPAUpdate(loss);
OnlineTrainer trainer = new OnlineTrainer(solver, update, loss,
factory, maxite, c);
models[i] = trainer.train(instset, null);
instset = null;
solver = null;
loss = null;
trainer = null;
System.out.println();
}
factory.setStopIncrement(true);
saveModels(modelfile, models,factory);
fp.delete();
fp = null;
} | #vulnerable code
public void train(String dataFile, int maxite, float c) throws IOException {
fp = File.createTempFile("train-features", null, new File("./tmp/"));
buildInstanceList(dataFile);
LabelAlphabet postagAlphabet = factory.buildLabelAlphabet("postag");
IFeatureAlphabet features = factory.DefaultFeatureAlphabet();
SFGenerator generator = new SFGenerator();
Linear[] models = new Linear[postagAlphabet.size()];
int fsize = features.size();
for (int i = 0; i < postagAlphabet.size(); i++) {
String pos = postagAlphabet.lookupString(i);
InstanceSet instset = readInstanceSet(pos);
LabelAlphabet alphabet = factory.buildLabelAlphabet(pos);
int ysize = alphabet.size();
System.out.printf("Training with data: %s\n", pos);
System.out.printf("Number of labels: %d\n", ysize);
LinearMax solver = new LinearMax(generator, ysize);
ZeroOneLoss loss = new ZeroOneLoss();
Update update = new LinearMaxPAUpdate(loss);
OnlineTrainer trainer = new OnlineTrainer(solver, update, loss,
fsize, maxite, c);
models[i] = trainer.train(instset, null);
instset = null;
solver = null;
loss = null;
trainer = null;
System.out.println();
}
factory.setStopIncrement(true);
saveModels(modelfile, models,factory);
fp.delete();
fp = null;
}
#location 13
#vulnerability type NULL_DEREFERENCE | Below is the vulnerable code, please generate the patch based on the following information. |
#fixed code
public static void main(String[] args)
{
try
{
String ls_1;
Process process =null;
// File handle = new File("../tmp/ctb_v1/data");
File handle = new File("../tmp/ctb_v6/data/bracketed");
BufferedWriter bout = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream("../tmp/malt.train"), "UTF-8"));
for (File sub : Arrays.asList(handle.listFiles())){
String file = sub.getAbsolutePath();
if(!file.endsWith(".fid"))
continue;
clean(file);
process = Runtime.getRuntime().exec("cmd /c java -jar ../tmp/Penn2Malt.jar "+file+" ../tmp/headrules.txt 3 2 chtb");
BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(process.getInputStream()));
while ( (ls_1=bufferedReader.readLine()) != null)
{
System.out.println(ls_1);
}
bufferedReader = new BufferedReader(
new InputStreamReader(process.getErrorStream()));
while ( (ls_1=bufferedReader.readLine()) != null)
{
System.out.println(ls_1);
}
}
}
catch(IOException e)
{
System.err.println(e);
}
} | #vulnerable code
public static void main(String[] args)
{
try
{
String ls_1;
Process process =null;
File handle = new File("./tmpdata/ctb/data3");
BufferedWriter bout = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream("./tmpdata/malt.train"), "UTF-8"));
for (File sub : Arrays.asList(handle.listFiles())){
String str = sub.getAbsolutePath();
process = Runtime.getRuntime().exec("cmd /c java -jar ./tmpdata/ctb/Penn2Malt.jar "+str+" ./tmpdata/ctb/headrules.txt 3 2 chtb");
BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(process.getInputStream()));
while ( (ls_1=bufferedReader.readLine()) != null)
{
System.out.println(ls_1);
}
}
}
catch(IOException e)
{
System.err.println(e);
}
}
#location 21
#vulnerability type RESOURCE_LEAK | Below is the vulnerable code, please generate the patch based on the following information. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.