input
stringlengths 205
73.3k
| output
stringlengths 64
73.2k
| instruction
stringclasses 1
value |
---|---|---|
#vulnerable code
@Test
public void testPropertiesNotBeingCachedOnVertexOut() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Car", "name", "a");
Vertex v3 = this.sqlgGraph.addVertex(T.label, "Car", "name", "b");
Vertex v4 = this.sqlgGraph.addVertex(T.label, "Car", "name", "c");
v1.addEdge("car", v2);
v1.addEdge("car", v3);
v1.addEdge("car", v4);
this.sqlgGraph.tx().commit();
v1 = this.sqlgGraph.v(v1.id());
List<Vertex> cars = vertexTraversal(v1).out("car").toList();
Assert.assertEquals(3, cars.size());
}
#location 16
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testPropertiesNotBeingCachedOnVertexOut() {
Vertex v1 = this.sqlgGraph.addVertex(T.label, "Person");
Vertex v2 = this.sqlgGraph.addVertex(T.label, "Car", "name", "a");
Vertex v3 = this.sqlgGraph.addVertex(T.label, "Car", "name", "b");
Vertex v4 = this.sqlgGraph.addVertex(T.label, "Car", "name", "c");
v1.addEdge("car", v2);
v1.addEdge("car", v3);
v1.addEdge("car", v4);
this.sqlgGraph.tx().commit();
v1 = this.sqlgGraph.traversal().V(v1.id()).next();
List<Vertex> cars = vertexTraversal(v1).out("car").toList();
Assert.assertEquals(3, cars.size());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testIdNotLoadedAsProperty() throws Exception {
Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
this.sqlgGraph.tx().commit();
this.sqlgGraph.close();
try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) {
Vertex vv = sqlgGraph1.traversal().V(v.id()).next();
assertFalse(vv.property("ID").isPresent());
Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of(
sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString());
assertFalse(propertyTypeMap.containsKey("ID"));
sqlgGraph1.tx().rollback();
}
}
#location 9
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testIdNotLoadedAsProperty() throws Exception {
Vertex v = this.sqlgGraph.addVertex(T.label, "Person", "name", "a");
this.sqlgGraph.tx().commit();
this.sqlgGraph.close();
try (SqlgGraph sqlgGraph1 = SqlgGraph.open(configuration)) {
Vertex vv = sqlgGraph1.traversal().V(v.id()).next();
Assert.assertFalse(vv.property("ID").isPresent());
Map<String, PropertyType> propertyTypeMap = sqlgGraph1.getTopology().getAllTables().get(SchemaTable.of(
sqlgGraph1.getSqlDialect().getPublicSchema(), "V_Person").toString());
Assert.assertFalse(propertyTypeMap.containsKey("ID"));
sqlgGraph1.tx().rollback();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testLoadEdgeProperties() {
Vertex marko = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko");
Vertex john = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Edge friend = marko.addEdge("friend", john, "weight", 1);
this.sqlgGraph.tx().commit();
Assert.assertEquals(1, this.sqlgGraph.e(friend.id()).property("weight").value());
}
#location 7
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testLoadEdgeProperties() {
Vertex marko = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko");
Vertex john = this.sqlgGraph.addVertex(T.label, "Person", "name", "john");
Edge friend = marko.addEdge("friend", john, "weight", 1);
this.sqlgGraph.tx().commit();
Assert.assertEquals(1, this.sqlgGraph.traversal().E(friend.id()).next().property("weight").value());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testLoadingDatasourceFromJndi() throws Exception {
SqlgGraph g = SqlgGraph.open(configuration);
assertNotNull(g.getSqlDialect());
assertNotNull(g.getSqlgDataSource().get(configuration.getString("jdbc.url")));
}
#location 5
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testLoadingDatasourceFromJndi() throws Exception {
SqlgGraph g = SqlgGraph.open(configuration);
assertNotNull(g.getSqlDialect());
assertEquals(configuration.getString("jdbc.url"), g.getJdbcUrl());
assertNotNull(g.getConnection());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@POST
@Path(value = "test")
@Produces(MediaType.APPLICATION_JSON)
public Map<String, Object> test(@Context Repository repository,
@FormParam(Notifier.JENKINS_BASE) String jenkinsBase,
@FormParam(Notifier.CLONE_TYPE) String cloneType,
@FormParam(Notifier.CLONE_URL) String cloneUrl,
@FormParam(Notifier.IGNORE_CERTS) boolean ignoreCerts,
@FormParam(Notifier.OMIT_HASH_CODE) boolean omitHashCode) {
if (jenkinsBase == null || cloneType == null || (cloneType.equals("custom") && cloneUrl == null)) {
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", false);
map.put("message", "Settings must be configured");
return map;
}
permissionService.validateForRepository(repository, Permission.REPO_ADMIN);
log.debug("Triggering jenkins notification for repository {}/{}",
repository.getProject().getKey(), repository.getSlug());
/* @todo [email protected]: Send null instead of master and sha1 and
* handle this in notify
*/
NotificationResult result = notifier.notify(repository, jenkinsBase,
ignoreCerts, cloneType, cloneUrl, null, null, omitHashCode);
log.debug("Got response from jenkins: {}", result);
// Shouldn't have to do this but the result isn't being marshalled correctly
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", result.isSuccessful());
map.put("url", result.getUrl());
map.put("message", result.getMessage());
return map;
}
#location 31
#vulnerability type NULL_DEREFERENCE | #fixed code
@POST
@Path(value = "test")
@Produces(MediaType.APPLICATION_JSON)
public Map<String, Object> test(@Context Repository repository,
@FormParam(Notifier.JENKINS_BASE) String jenkinsBase,
@FormParam(Notifier.CLONE_TYPE) String cloneType,
@FormParam(Notifier.CLONE_URL) String cloneUrl,
@FormParam(Notifier.IGNORE_CERTS) boolean ignoreCerts,
@FormParam(Notifier.OMIT_HASH_CODE) boolean omitHashCode) {
if (jenkinsBase == null || cloneType == null || (cloneType.equals("custom") && cloneUrl == null)) {
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", false);
map.put("message", "Settings must be configured");
return map;
}
permissionService.validateForRepository(repository, Permission.REPO_ADMIN);
log.debug("Triggering jenkins notification for repository {}/{}",
repository.getProject().getKey(), repository.getSlug());
/* @todo [email protected]: Send null instead of master and sha1 and
* handle this in notify
*/
NotificationResult result = notifier.notify(repository, jenkinsBase,
ignoreCerts, cloneType, cloneUrl, null, null, omitHashCode, true);
log.debug("Got response from jenkins: {}", result);
// Shouldn't have to do this but the result isn't being marshalled correctly
Map<String, Object> map = new HashMap<String, Object>();
map.put("successful", result.isSuccessful());
map.put("url", result.getUrl());
map.put("message", result.getMessage());
return map;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public static void main(String[] args) throws Exception {
MongoCollection<Document> eventCollection =
new MongoClient(
new MongoClientURI("mongodb://localhost:27001,localhost:27002,localhost:27003/test?replicatSet=demo-dev")
).getDatabase("test").getCollection("events");
long i = 0;
while (true) {
Document doc = new Document();
doc.put("i", i++);
doc.put("even", i % 2);
eventCollection.insertOne(doc);
//System.out.println("inserted: " + doc);
Thread.sleep(2000L + (long)(1000*Math.random()));
}
}
#location 6
#vulnerability type RESOURCE_LEAK | #fixed code
public static void main(String[] args) throws Exception {
try (MongoClient client = new MongoClient(Connection.URI)) {
MongoCollection<Document> eventCollection =
client.getDatabase("test").getCollection("events");
long i = 0;
while (true) {
Document doc = new Document();
doc.put("i", i++);
doc.put("even", i % 2);
eventCollection.insertOne(doc);
//System.out.println("inserted: " + doc);
Thread.sleep(2000L + (long)(1000*Math.random()));
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public static void main(String[] args) throws Exception {
MongoCollection<Document> eventCollection =
new MongoClient(
new MongoClientURI("mongodb://localhost:27001,localhost:27002,localhost:27003/test?replicatSet=demo-dev")
).getDatabase("test").getCollection("events");
ChangeStreamIterable<Document> changes = eventCollection.watch(asList(
Aggregates.match( and( asList(
in("operationType", asList("insert")),
eq("fullDocument.even", 1L)))
)));
changes.forEach(new Block<ChangeStreamDocument<Document>>() {
@Override
public void apply(ChangeStreamDocument<Document> t) {
System.out.println("received: " + t.getFullDocument());
}
});
}
#location 6
#vulnerability type RESOURCE_LEAK | #fixed code
public static void main(String[] args) throws Exception {
try (MongoClient client = new MongoClient(Connection.URI)) {
MongoCollection<Document> eventCollection =
client.getDatabase("test").getCollection("events");
ChangeStreamIterable<Document> changes = eventCollection.watch(asList(
Aggregates.match( and( asList(
in("operationType", asList("insert")),
eq("fullDocument.even", 1L)))
)));
changes.iterator().forEachRemaining(
change -> System.out.println("received: " + change.getFullDocument())
);
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void start(int port, SSLContext context, boolean authReq) throws IOException {
SocketConnection socketConnection = new SocketConnection(authReq ? new AuthRequiredServer(server) : server);
socketConnection.connect(new InetSocketAddress(port), context);
}
#location 4
#vulnerability type RESOURCE_LEAK | #fixed code
@Override
public void start(int port, SSLContext context, boolean authReq) throws IOException {
ContainerSocketProcessor server = new ContainerSocketProcessor(this, count, select);
socketConnection = new SocketConnection(authReq ? new AuthRequiredServer(server) : server);
socketConnection.connect(new InetSocketAddress(port), context);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public FloatProcessor toFloat(int channelNumber, FloatProcessor fp)
{
int width = getWidth();
int height = getHeight();
long size = getNumPixels(this.imageData);
if (fp == null || fp.getWidth()!=width || fp.getHeight()!=height)
fp = new FloatProcessor(width, height, new float[(int)size], super.cm);
int[] origin = Index.create(0, 0, getPlanePosition());
int[] span = Span.singlePlane(width, height, this.imageData.getNumDimensions());
SetFloatValuesOperation<T> floatOp = new SetFloatValuesOperation<T>(this.imageData, origin, span, fp);
Operation.apply(floatOp);
fp.setRoi(getRoi());
fp.setMask(getMask());
fp.setMinAndMax(this.min, this.max);
fp.setThreshold(getMinThreshold(), getMaxThreshold(), ImageProcessor.NO_LUT_UPDATE);
return fp;
}
#location 12
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public FloatProcessor toFloat(int channelNumber, FloatProcessor fp)
{
int width = getWidth();
int height = getHeight();
long size = ImageUtils.getTotalSamples(this.imageData);
if (fp == null || fp.getWidth()!=width || fp.getHeight()!=height)
fp = new FloatProcessor(width, height, new float[(int)size], super.cm); // TODO : notice that we can't get more than 2 gig of floats
int[] origin = Index.create(0, 0, getPlanePosition());
int[] span = Span.singlePlane(width, height, this.imageData.getNumDimensions());
SetFloatValuesOperation<T> floatOp = new SetFloatValuesOperation<T>(this.imageData, origin, span, fp);
Operation.apply(floatOp);
fp.setRoi(getRoi());
fp.setMask(getMask());
fp.setMinAndMax(this.min, this.max);
fp.setThreshold(getMinThreshold(), getMaxThreshold(), ImageProcessor.NO_LUT_UPDATE);
return fp;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
void undoFilter() {
if (ip!=null) {
ip.reset();
updateAndDraw();
}
}
#location 4
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
void invertLookupTable() {
int nImages = getStackSize();
ip.invertLut();
if (nImages==1)
ip.invert();
else {
ImageStack stack2 = getStack();
for (int i=1; i<=nImages; i++)
stack2.getProcessor(i).invert();
stack2.setColorModel(ip.getColorModel());
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void setPixels(Object pixels)
{
int[] position = Index.create(0, 0, getPlanePosition());
setImagePlanePixels(this.imageData, position, pixels);
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public void setPixels(Object pixels)
{
setImagePlanePixels(this.imageData, getPlanePosition(), pixels);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public AttributeBuilder nodeAttributes(String groupId, String artifactId, String version, String scopes, String effectiveScope) {
Map<String, ? extends AbstractNode> scopedNodes = getScopedNodes();
AbstractNode node = scopedNodes.containsKey(effectiveScope) ? scopedNodes.get(effectiveScope) : getDefaultNode();
return node.createAttributes(groupId, artifactId, version, scopes, node != this.defaultNode && node != EMPTY_NODE);
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
public AttributeBuilder nodeAttributes(String groupId, String artifactId, String version, String scopes, String effectiveScope) {
AbstractNode node = this.scopeStyles.containsKey(effectiveScope) ? this.scopeStyles.get(effectiveScope) : this.defaultNode;
return node.createAttributes(groupId, artifactId, version, scopes, node != this.defaultNode);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private String downloadNzb(SearchResultEntity result) throws IOException {
Request request = new Request.Builder().url(result.getLink()).build();
Indexer indexerByName = searchModuleProvider.getIndexerByName(result.getIndexer().getName());
Integer timeout = indexerByName.getConfig().getTimeout().orElse(configProvider.getBaseConfig().getSearching().getTimeout());
try (Response response = clientHttpRequestFactory.getOkHttpClientBuilder(request.url().uri()).readTimeout(timeout, TimeUnit.SECONDS).connectTimeout(timeout, TimeUnit.SECONDS).build().newCall(request).execute()) {
return response.body().string();
}
}
#location 6
#vulnerability type NULL_DEREFERENCE | #fixed code
private String downloadNzb(SearchResultEntity result) throws IOException {
Request request = new Request.Builder().url(result.getLink()).build();
Indexer indexerByName = searchModuleProvider.getIndexerByName(result.getIndexer().getName());
Integer timeout = indexerByName.getConfig().getTimeout().orElse(configProvider.getBaseConfig().getSearching().getTimeout());
try (Response response = clientHttpRequestFactory.getOkHttpClientBuilder(request.url().uri()).readTimeout(timeout, TimeUnit.SECONDS).connectTimeout(timeout, TimeUnit.SECONDS).build().newCall(request).execute()) {
if (!response.isSuccessful()) {
throw new IOException("Unsuccessful NZB download from URL " + result.getLink() + ". Message: " + response.message());
}
ResponseBody body = response.body();
if (body == null || Strings.isNullOrEmpty(body.string())) {
throw new IOException("NZB downloaded from " + result.getLink() + " is empty");
}
String content = body.string();
return content;
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@PreDestroy
public void destroy() {
String osName = System.getProperty("os.name");
boolean isOsWindows = osName.toLowerCase().contains("windows");
if (isOsWindows) {
logger.debug("Initiating removal of windows tray icon (if it exists)");
try {
WindowsTrayIcon.remove();
} catch (Throwable e) {
//An exception might be thrown while shutting down, ignore this
}
}
logger.info("Shutting down");
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
@PreDestroy
public void destroy() {
boolean isOsWindows = isOsWindows();
if (isOsWindows) {
logger.debug("Initiating removal of windows tray icon (if it exists)");
try {
WindowsTrayIcon.remove();
} catch (Throwable e) {
//An exception might be thrown while shutting down, ignore this
}
}
logger.info("Shutting down");
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public void testExecute() throws Exception {
MockWebServer server = new MockWebServer();
Release draftReleaseResponse = new Release();
draftReleaseResponse.setUploadUrl(server.url("/repos/theotherp/nzbhydra2/releases/1/assets").toString());
draftReleaseResponse.setUrl(server.url("/repos/theotherp/nzbhydra2/releases/1").toString());
draftReleaseResponse.setDraft(true);
ArrayList<Asset> assets = new ArrayList<>();
assets.add(new Asset());
assets.add(new Asset());
draftReleaseResponse.setAssets(assets);
Release effectiveReleaseResponse = new Release();
effectiveReleaseResponse.setDraft(false);
MockResponse releaseMockResponse = new MockResponse()
.setResponseCode(200)
.setBody(objectMapper.writeValueAsString(draftReleaseResponse));
server.enqueue(releaseMockResponse);
server.enqueue(new MockResponse().setResponseCode(200)); //Windows asset upload
server.enqueue(new MockResponse().setResponseCode(200)); //Linux asset upload
server.enqueue(new MockResponse().setResponseCode(200).setBody(objectMapper.writeValueAsString(effectiveReleaseResponse))); //Setting the release effective
HttpUrl url = server.url("/repos/theotherp/nzbhydra2/releases");
//Here the magic happens
executePlugin(url);
//Creating the release
verifyDraftReleaseIsCreated(server);
//Uploading the assets
RecordedRequest windowsAssetUploadRequest = server.takeRequest(2, TimeUnit.SECONDS);
assertTrue(windowsAssetUploadRequest.getPath(), windowsAssetUploadRequest.getPath().endsWith("releases/1/assets?name=windowsAsset.txt"));
RecordedRequest linuxAssetUploadRequest = server.takeRequest(2, TimeUnit.SECONDS);
assertTrue(linuxAssetUploadRequest.getPath(), linuxAssetUploadRequest.getPath().endsWith("releases/1/assets?name=linuxAsset.txt"));
//Setting it effective
RecordedRequest setEffectiveRequest = server.takeRequest(2, TimeUnit.SECONDS);
assertTrue(setEffectiveRequest.getPath(), setEffectiveRequest.getPath().endsWith("releases/1"));
String body = new String(setEffectiveRequest.getBody().readByteArray());
Release bodyJson = objectMapper.readValue(body, Release.class);
assertFalse(bodyJson.isDraft());
}
#location 33
#vulnerability type NULL_DEREFERENCE | #fixed code
public void testExecute() throws Exception {
MockWebServer server = getMockWebServer();
HttpUrl url = server.url("/repos/theotherp/nzbhydra2/releases");
//Here the magic happens
File pom = getTestFile("/src/test/resources/org/nzbhydra/github/mavenreleaseplugin/pomWithToken.xml");
assertTrue(pom.exists());
ReleaseMojo releaseMojo = new ReleaseMojo();
releaseMojo = (ReleaseMojo) configureMojo(releaseMojo, extractPluginConfiguration("github-release-plugin", pom
));
releaseMojo.githubReleasesUrl = url.toString();
releaseMojo.windowsAsset = getTestFile("src/test/resources/org/nzbhydra/github/mavenreleaseplugin/windowsAsset.txt");
releaseMojo.linuxAsset = getTestFile("src/test/resources/org/nzbhydra/github/mavenreleaseplugin/linuxAsset.txt");
releaseMojo.execute();
verifyExecution(server);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@PostConstruct
private void addTrayIconIfApplicable() {
String osName = System.getProperty("os.name");
boolean isOsWindows = osName.toLowerCase().contains("windows");
if (isOsWindows) {
logger.info("Adding windows system tray icon");
try {
new WindowsTrayIcon();
} catch (HeadlessException e) {
logger.error("Can't add a windows tray icon because running headless");
}
}
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
@PostConstruct
private void addTrayIconIfApplicable() {
boolean isOsWindows = isOsWindows();
if (isOsWindows) {
logger.info("Adding windows system tray icon");
try {
new WindowsTrayIcon();
} catch (HeadlessException e) {
logger.error("Can't add a windows tray icon because running headless");
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public BaseConfig originalConfig() throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(BaseConfig.class.getResource("/config/baseConfig.yml").openStream()));
String applicationYmlContent = reader.lines().collect(Collectors.joining("\n"));
return objectMapper.readValue(applicationYmlContent, BaseConfig.class);
}
#location 3
#vulnerability type RESOURCE_LEAK | #fixed code
public BaseConfig originalConfig() throws IOException {
String applicationYmlContent;
try (BufferedReader reader = new BufferedReader(new InputStreamReader(BaseConfig.class.getResource("/config/baseConfig.yml").openStream()))) {
applicationYmlContent = reader.lines().collect(Collectors.joining("\n"));
}
return objectMapper.readValue(applicationYmlContent, BaseConfig.class);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void shouldSetEnabledOnDownloadEvent() {
testee.queueCheckEnabled = false;
testee.lastDownload = null;
testee.onNzbDownloadEvent(new FileDownloadEvent(null, null));
assertThat(testee.queueCheckEnabled).isTrue();
assertThat(testee.lastDownload).isNotNull();
}
#location 6
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void shouldSetEnabledOnDownloadEvent() {
testee.queueCheckEnabled = false;
testee.lastDownload = null;
testee.onNzbDownloadEvent(new FileDownloadEvent(new FileDownloadEntity(), new SearchResultEntity()));
assertThat(testee.queueCheckEnabled).isTrue();
assertThat(testee.lastDownload).isNotNull();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void shouldMigrate() {
HashMap<String, Object> input = new HashMap<>(ImmutableMap.of("main", new HashMap<>(ImmutableMap.of("configVersion", 1))));
HashMap<String, Object> afterMigration = new HashMap<>(ImmutableMap.of("main", new HashMap<>(ImmutableMap.of("configVersion", 2))));
when(configMigrationStepMock.forVersion()).thenReturn(1);
when(configMigrationStepMock.migrate(any())).thenReturn(afterMigration);
testee.steps = Arrays.asList(configMigrationStepMock);
testee.expectedConfigVersion = 2;
Map<String, Object> result = testee.migrate(input);
verify(configMigrationStepMock).migrate(input);
assertThat((int) ((Map<String, Object>) result.get("main")).get("configVersion")).isEqualTo(2);
}
#location 14
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void shouldMigrate() {
BaseConfig input = new BaseConfig();
input.getMain().setConfigVersion(1);
BaseConfig afterMigration = new BaseConfig();
afterMigration.getMain().setConfigVersion(2);
when(configMigrationStepMock.forVersion()).thenReturn(1);
when(configMigrationStepMock.migrate(any())).thenReturn(afterMigration);
testee.steps = Arrays.asList(configMigrationStepMock);
testee.expectedConfigVersion = 2;
BaseConfig result = testee.migrate(input);
verify(configMigrationStepMock).migrate(input);
assertThat(result.getMain().getConfigVersion()).isEqualTo(2);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@RequestMapping(value = {"/api", "/dognzb/api"}, produces = MediaType.TEXT_XML_VALUE)
public ResponseEntity<? extends Object> api(NewznabParameters params, HttpServletRequest request) throws Exception {
if (params.getT() == ActionAttribute.CAPS) {
//throw new RuntimeException("test");
return new ResponseEntity<Object>(NewznabMockBuilder.getCaps(), HttpStatus.OK);
}
if (params.getT() == ActionAttribute.GETNFO) {
NewznabXmlRoot rssRoot = new NewznabXmlRoot();
rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 1));
NewznabXmlItem item = new NewznabXmlItem();
item.setDescription("NFO for NZB with ID " + params.getId());
rssRoot.getRssChannel().getItems().add(item);
return ResponseEntity.ok(rssRoot);
}
String itemTitleBase = params.getApikey();
if (params.getQ() != null && params.getQ().contains("groups")) {
itemTitleBase = "";
}
if (params.getRid() != null && params.getQ() == null) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList());
logger.info("Returning no results for rid based search without query");
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
boolean doGenerateDuplicates = "duplicates".equals(params.getQ());
if (params.getQ() != null && params.getQ().equals("offsettest")) {
NewznabXmlRoot rssRoot = new NewznabXmlRoot();
rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 0));
if (params.getOffset() >= 40) {
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
int start = params.getOffset() == 0 ? 0 : params.getOffset();
int end = Math.min(start + 10 - 1, 40);
rssRoot = NewznabMockBuilder.generateResponse(start, end, "offsetTest", doGenerateDuplicates, Collections.emptyList());
rssRoot.getRssChannel().getNewznabResponse().setTotal(40);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("offsettest2")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("offsettest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("invalidxml")) {
String invalidXml = Resources.toString(Resources.getResource(MockNewznab.class, "invalidXml.xml"), Charsets.UTF_8);
return new ResponseEntity<Object>(invalidXml, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("slash")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("/").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("actualduplicates")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(10).titleBase("actualduplicates").offset(params.getOffset()).titleWords(Collections.emptyList()).total(10).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle(rssRoot.getRssChannel().getItems().get(0).getTitle()));
rssRoot.getRssChannel().getItems().forEach(x -> x.setLink(rssRoot.getRssChannel().getItems().get(0).getLink()));
rssRoot.getRssChannel().getItems().forEach(x -> x.setRssGuid(rssRoot.getRssChannel().getItems().get(0).getRssGuid()));
rssRoot.getRssChannel().getItems().forEach(x -> x.setNewznabAttributes(rssRoot.getRssChannel().getItems().get(0).getNewznabAttributes()));
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("oneresult")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(1).titleBase("oneresult").offset(params.getOffset()).titleWords(Collections.emptyList()).total(1).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("uitest")) {
if (params.getApikey().equals("1")) {
NewznabXmlItem result1 = RssItemBuilder.builder("indexer1-result1").pubDate(Instant.now().minus(1, ChronoUnit.DAYS)).hasNfo(false).grabs(1).size(mbToBytes(1)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5000")))).category("TV").build();
NewznabXmlItem result2 = RssItemBuilder.builder("indexer1-result2").pubDate(Instant.now().minus(2, ChronoUnit.DAYS)).hasNfo(true).grabs(2).size(mbToBytes(2)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5040")))).category("TV SD").build();
NewznabXmlItem result3 = RssItemBuilder.builder("indexer1-result3").pubDate(Instant.now().minus(3, ChronoUnit.DAYS)).comments("comments").grabs(3).size(mbToBytes(3)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5030")))).category("TV HD").build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result1, result2, result3), 0, 3);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
NewznabXmlItem result4 = RssItemBuilder.builder("indexer2-result1").pubDate(Instant.now().minus(4, ChronoUnit.DAYS)).grabs(4).size(mbToBytes(4)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2000")))).category("Movies").build();
NewznabXmlItem result5 = RssItemBuilder.builder("indexer2-result2").pubDate(Instant.now().minus(5, ChronoUnit.DAYS)).grabs(5).size(mbToBytes(5)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2040")))).category("Movies HD").build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result4, result5), 0, 2);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("dognzbtotaltest") && System.getProperty("nomockdognzb") == null) {
if (params.getOffset() >= 300) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList());
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("dognzbtotaltest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getNewznabResponse().setTotal(100);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ((params.getQ() != null && params.getQ().equals("noresults")) || (params.getTvdbid() != null && params.getTvdbid().equals("329089"))) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList());
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("sleep")) {
Thread.sleep(new Random().nextInt(5000));
}
if (params.getQ() != null && params.getQ().equals("sleep10")) {
Thread.sleep(10000);
}
if (params.getQ() != null && params.getQ().equals("sleepforever")) {
Thread.sleep(10000 * 10000);
}
if (params.getQ() != null && params.getQ().contains("movies")) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 100, itemTitleBase, false, Arrays.asList("cam", "ts", "blu-ray 2160p", "web-dl 1080p", "bluray 1080p", "3d bluray"));
rssRoot.getRssChannel().getNewznabResponse().setTotal(100);
rssRoot.getRssChannel().getItems().forEach(x -> x.getNewznabAttributes().add(new NewznabAttribute("coverurl", "https://i.omgwtfnzbs.me/tvdb/697fdaeb0fb1ac87d4d6af684b20593a/697fdaeb0fb1ac87d4d6af684b20593a.jpg")));
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ("oneduplicate".equals(params.getQ())) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder()
.numberOfResults(1)
.titleBase(itemTitleBase)
.generateOneDuplicate(true)
.build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getNewznabResponse().setTotal(1);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ("titlegroup".equals(params.getQ())) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder()
.numberOfResults(1)
.titleBase(itemTitleBase)
.generateOneDuplicate(false)
.build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getNewznabResponse().setTotal(1);
rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle("titlegroup"));
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getTmdbid() != null) {
if (itemTitleBase.equals("tmdberror") || "capscheckerror".equals(params.getApikey())) {
NewznabXmlError rssError = new NewznabXmlError("123", "description");
return new ResponseEntity<Object>(rssError, HttpStatus.OK);
}
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList());
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ("error".equals(params.getQ())) {
NewznabXmlError rssError = new NewznabXmlError("123", "description");
return new ResponseEntity<Object>(rssError, HttpStatus.OK);
}
if (params.getImdbid() != null) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList());
rssRoot.getRssChannel().getNewznabResponse().setTotal(10);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
int endIndex;
int key = 0;
try {
key = Integer.valueOf(itemTitleBase);
} catch (NumberFormatException e) {
endIndex = 0;
}
if (apikeyToResultCount.containsKey(key)) {
endIndex = apikeyToResultCount.get(key);
} else {
endIndex = 0;
}
if (responsesPerApikey.containsKey(endIndex)) {
return new ResponseEntity<Object>(responsesPerApikey.get(endIndex), HttpStatus.OK);
} else {
if (params.getOffset() != null && params.getLimit() != null) {
endIndex = Math.min(params.getOffset() + params.getLimit(), endIndex);
}
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, endIndex, itemTitleBase, doGenerateDuplicates, Collections.emptyList());
rssRoot.getRssChannel().getNewznabResponse().setTotal(endIndex);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
}
#location 180
#vulnerability type NULL_DEREFERENCE | #fixed code
@RequestMapping(value = {"/api", "/dognzb/api"}, produces = MediaType.TEXT_XML_VALUE)
public ResponseEntity<? extends Object> api(NewznabParameters params, HttpServletRequest request) throws Exception {
if (params.getT() == ActionAttribute.CAPS) {
//throw new RuntimeException("test");
return new ResponseEntity<Object>(NewznabMockBuilder.getCaps(), HttpStatus.OK);
}
if (params.getT() == ActionAttribute.GETNFO) {
NewznabXmlRoot rssRoot = new NewznabXmlRoot();
rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 1));
NewznabXmlItem item = new NewznabXmlItem();
item.setDescription("NFO for NZB with ID " + params.getId());
rssRoot.getRssChannel().getItems().add(item);
return ResponseEntity.ok(rssRoot);
}
String itemTitleBase = params.getApikey();
if (params.getQ() != null && params.getQ().contains("groups")) {
itemTitleBase = "";
}
if (params.getRid() != null && params.getQ() == null) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList());
logger.info("Returning no results for rid based search without query");
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
boolean doGenerateDuplicates = "duplicates".equals(params.getQ());
if (params.getQ() != null && params.getQ().equals("offsettest")) {
NewznabXmlRoot rssRoot = new NewznabXmlRoot();
rssRoot.getRssChannel().setNewznabResponse(new NewznabXmlResponse(0, 0));
if (params.getOffset() >= 40) {
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
int start = params.getOffset() == 0 ? 0 : params.getOffset();
int end = Math.min(start + 10 - 1, 40);
rssRoot = NewznabMockBuilder.generateResponse(start, end, "offsetTest", doGenerateDuplicates, Collections.emptyList());
rssRoot.getRssChannel().getNewznabResponse().setTotal(40);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("offsettest2")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("offsettest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("invalidxml")) {
String invalidXml = Resources.toString(Resources.getResource(MockNewznab.class, "invalidXml.xml"), Charsets.UTF_8);
return new ResponseEntity<Object>(invalidXml, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("slash")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("/").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("actualduplicates")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(10).titleBase("actualduplicates").offset(params.getOffset()).titleWords(Collections.emptyList()).total(10).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle(rssRoot.getRssChannel().getItems().get(0).getTitle()));
rssRoot.getRssChannel().getItems().forEach(x -> x.setLink(rssRoot.getRssChannel().getItems().get(0).getLink()));
rssRoot.getRssChannel().getItems().forEach(x -> x.setRssGuid(rssRoot.getRssChannel().getItems().get(0).getRssGuid()));
rssRoot.getRssChannel().getItems().forEach(x -> x.setNewznabAttributes(rssRoot.getRssChannel().getItems().get(0).getNewznabAttributes()));
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("oneresult")) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(1).titleBase("oneresult").offset(params.getOffset()).titleWords(Collections.emptyList()).total(1).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("uitest")) {
if (params.getApikey().equals("1")) {
NewznabXmlItem result1 = RssItemBuilder.builder("indexer1-result1").pubDate(Instant.now().minus(1, ChronoUnit.DAYS)).hasNfo(false).grabs(1).size(mbToBytes(1)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5000")))).category("TV").build();
NewznabXmlItem result2 = RssItemBuilder.builder("indexer1-result2").pubDate(Instant.now().minus(2, ChronoUnit.DAYS)).hasNfo(true).grabs(2).size(mbToBytes(2)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5040")))).category("TV SD").build();
NewznabXmlItem result3 = RssItemBuilder.builder("indexer1-result3").pubDate(Instant.now().minus(3, ChronoUnit.DAYS)).comments("comments").grabs(3).size(mbToBytes(3)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "5030")))).category("TV HD").build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result1, result2, result3), 0, 3);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
NewznabXmlItem result4 = RssItemBuilder.builder("indexer2-result1").pubDate(Instant.now().minus(4, ChronoUnit.DAYS)).grabs(4).size(mbToBytes(4)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2000")))).category("Movies").build();
NewznabXmlItem result5 = RssItemBuilder.builder("indexer2-result2").pubDate(Instant.now().minus(5, ChronoUnit.DAYS)).grabs(5).size(mbToBytes(5)).newznabAttributes(new ArrayList<>(Arrays.asList(new NewznabAttribute("category", "2040")))).category("Movies HD").build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.getRssRoot(Arrays.asList(result4, result5), 0, 2);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("dognzbtotaltest") && System.getProperty("nomockdognzb") == null) {
if (params.getOffset() >= 300) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList());
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
NewznabMockRequest mockRequest = NewznabMockRequest.builder().numberOfResults(100).titleBase("dognzbtotaltest").offset(params.getOffset()).titleWords(Collections.emptyList()).total(300).build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getNewznabResponse().setTotal(100);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ((params.getQ() != null && params.getQ().equals("noresults")) || (params.getTvdbid() != null && params.getTvdbid().equals("329089"))) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, -1, itemTitleBase, false, Collections.emptyList());
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getQ() != null && params.getQ().equals("sleep")) {
Thread.sleep(new Random().nextInt(5000));
}
if (params.getQ() != null && params.getQ().equals("sleep10")) {
Thread.sleep(10000);
}
if (params.getQ() != null && params.getQ().equals("sleepforever")) {
Thread.sleep(10000 * 10000);
}
if (params.getQ() != null && params.getQ().contains("movies")) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 100, itemTitleBase, false, Arrays.asList("cam", "ts", "blu-ray 2160p", "web-dl 1080p", "bluray 1080p", "3d bluray"));
rssRoot.getRssChannel().getNewznabResponse().setTotal(100);
rssRoot.getRssChannel().getItems().forEach(x -> x.getNewznabAttributes().add(new NewznabAttribute("coverurl", "https://i.omgwtfnzbs.me/tvdb/697fdaeb0fb1ac87d4d6af684b20593a/697fdaeb0fb1ac87d4d6af684b20593a.jpg")));
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ("oneduplicate".equals(params.getQ())) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder()
.numberOfResults(1)
.titleBase(itemTitleBase)
.generateOneDuplicate(true)
.build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getNewznabResponse().setTotal(1);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ("titlegroup".equals(params.getQ())) {
NewznabMockRequest mockRequest = NewznabMockRequest.builder()
.numberOfResults(1)
.titleBase(itemTitleBase)
.generateOneDuplicate(false)
.build();
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(mockRequest);
rssRoot.getRssChannel().getNewznabResponse().setTotal(1);
rssRoot.getRssChannel().getItems().forEach(x -> x.setTitle("titlegroup"));
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if (params.getTmdbid() != null) {
if (itemTitleBase.equals("tmdberror") || "capscheckerror".equals(params.getApikey())) {
NewznabXmlError rssError = new NewznabXmlError("123", "description");
return new ResponseEntity<Object>(rssError, HttpStatus.OK);
}
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList());
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
if ("error".equals(params.getQ())) {
NewznabXmlError rssError = new NewznabXmlError("123", "description");
return new ResponseEntity<Object>(rssError, HttpStatus.OK);
}
if (params.getImdbid() != null) {
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, 10, "avengers", doGenerateDuplicates, Collections.emptyList());
rssRoot.getRssChannel().getNewznabResponse().setTotal(10);
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
int endIndex;
int key = 0;
try {
key = Integer.valueOf(itemTitleBase);
} catch (NumberFormatException e) {
endIndex = 0;
}
if (apikeyToResultCount.containsKey(key)) {
endIndex = apikeyToResultCount.get(key);
} else {
endIndex = 0;
}
if (responsesPerApikey.containsKey(endIndex)) {
return new ResponseEntity<Object>(responsesPerApikey.get(endIndex), HttpStatus.OK);
} else {
if (params.getOffset() != null && params.getLimit() != null) {
endIndex = Math.min(params.getOffset() + params.getLimit(), endIndex);
}
NewznabXmlRoot rssRoot = NewznabMockBuilder.generateResponse(0, endIndex, itemTitleBase, doGenerateDuplicates, Collections.emptyList());
rssRoot.getRssChannel().getNewznabResponse().setTotal(endIndex);
if ("randomage".equalsIgnoreCase(params.getQ())) {
for (NewznabXmlItem item : rssRoot.getRssChannel().getItems()) {
item.setPubDate(item.getPubDate().minus(random.nextInt(300) * 24, ChronoUnit.HOURS));
}
}
return new ResponseEntity<Object>(rssRoot, HttpStatus.OK);
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private void migrateIndexerApiAccesses(Map<Integer, IndexerEntity> oldIdToIndexersMap) throws SQLException {
Statement statement = connection.createStatement();
int countIndexerApiAccesses = getCount(statement, "INDEXERAPIACCESS");
logger.info("Migrating {} indexer API accesses from old database", countIndexerApiAccesses);
eventPublisher.publishEvent(new MigrationMessageEvent("Migrating " + countIndexerApiAccesses + " indexer API access entries"));
ResultSet oldIndexerApiAccesses = statement.executeQuery("SELECT * FROM INDEXERAPIACCESS");
int countMigrated = 1;
IndexerApiAccessEntity entity;
ProgressLogger progressLogger = new ProgressLogger(logger, 5, TimeUnit.SECONDS);
progressLogger.expectedUpdates = countIndexerApiAccesses;
progressLogger.start();
while (oldIndexerApiAccesses.next()) {
entity = new IndexerApiAccessEntity();
entity.setIndexer(oldIdToIndexersMap.get(oldIndexerApiAccesses.getInt("indexer_id")));
entity.setTime(oldIndexerApiAccesses.getTimestamp("time").toInstant());
Object responseTime = oldIndexerApiAccesses.getObject("response_time");
entity.setResponseTime(responseTime != null ? ((Integer) responseTime).longValue() : null);
String error = oldIndexerApiAccesses.getString("error");
entity.setError(error != null ? error.substring(0, Math.min(4000, error.length())) : null);
entity.setAccessType(null);
entity.setResult(oldIndexerApiAccesses.getBoolean("response_successful") ? IndexerAccessResult.SUCCESSFUL : IndexerAccessResult.CONNECTION_ERROR); //Close enough
entity.setAccessType(IndexerApiAccessType.valueOf(oldIndexerApiAccesses.getString("type").toUpperCase()));
entityManager.persist(entity);
progressLogger.lightUpdate();
if (countMigrated++ % 50 == 0) {
entityManager.flush();
entityManager.clear();
}
}
progressLogger.stop();
statement.close();
entityManager.flush();
entityManager.clear();
eventPublisher.publishEvent(new MigrationMessageEvent("Successfully migrated indexer API accesses from old database"));
logger.info("Successfully migrated indexer API accesses from old database");
}
#location 23
#vulnerability type NULL_DEREFERENCE | #fixed code
private void migrateIndexerApiAccesses(Map<Integer, IndexerEntity> oldIdToIndexersMap) throws SQLException {
Statement statement = connection.createStatement();
int countIndexerApiAccesses = getCount(statement, "INDEXERAPIACCESS");
logger.info("Migrating {} indexer API accesses from old database", countIndexerApiAccesses);
eventPublisher.publishEvent(new MigrationMessageEvent("Migrating " + countIndexerApiAccesses + " indexer API access entries"));
ResultSet oldIndexerApiAccesses = statement.executeQuery("SELECT * FROM INDEXERAPIACCESS");
int countMigrated = 1;
IndexerApiAccessEntity entity;
ProgressLogger progressLogger = new ProgressLogger(logger, 5, TimeUnit.SECONDS);
progressLogger.expectedUpdates = countIndexerApiAccesses;
progressLogger.start();
while (oldIndexerApiAccesses.next()) {
entity = new IndexerApiAccessEntity();
entity.setIndexer(oldIdToIndexersMap.get(oldIndexerApiAccesses.getInt("indexer_id")));
entity.setTime(timestampToInstant(oldIndexerApiAccesses.getString("time")));
Object responseTime = oldIndexerApiAccesses.getObject("response_time");
entity.setResponseTime(responseTime != null ? ((Integer) responseTime).longValue() : null);
String error = oldIndexerApiAccesses.getString("error");
entity.setError(error != null ? error.substring(0, Math.min(4000, error.length())) : null);
entity.setAccessType(null);
entity.setResult(oldIndexerApiAccesses.getBoolean("response_successful") ? IndexerAccessResult.SUCCESSFUL : IndexerAccessResult.CONNECTION_ERROR); //Close enough
entity.setAccessType(IndexerApiAccessType.valueOf(oldIndexerApiAccesses.getString("type").toUpperCase()));
entityManager.persist(entity);
progressLogger.lightUpdate();
if (countMigrated++ % 50 == 0) {
entityManager.flush();
entityManager.clear();
}
}
progressLogger.stop();
statement.close();
entityManager.flush();
entityManager.clear();
eventPublisher.publishEvent(new MigrationMessageEvent("Successfully migrated indexer API accesses from old database"));
logger.info("Successfully migrated indexer API accesses from old database");
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void buildVariablesFor(AbstractBuild build, Map<String, String> variables) {
final Executor executor = build.getExecutor();
if (executor != null && executor.getOwner() instanceof DockerComputer) {
final DockerComputer dockerComputer = (DockerComputer) executor.getOwner();
variables.put("DOCKER_CONTAINER_ID", dockerComputer.getContainerId());
variables.put("JENKINS_CLOUD_ID", dockerComputer.getCloudId());
final DockerCloud cloud = dockerComputer.getCloud();
if (cloud.isExposeDockerHost()) {
//replace http:// and https:// from docker-java to tcp://
String dockerHost = cloud.getDockerHost().getUri();
if (dockerHost.startsWith("unix:")) {
dockerHost = "tcp:" + dockerHost.substring(5);
}
variables.put("DOCKER_HOST", dockerHost);
}
}
}
#location 10
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public void buildVariablesFor(AbstractBuild build, Map<String, String> variables) {
final Executor executor = build.getExecutor();
if (executor != null && executor.getOwner() instanceof DockerComputer) {
final DockerComputer dockerComputer = (DockerComputer) executor.getOwner();
variables.put("DOCKER_CONTAINER_ID", dockerComputer.getContainerId());
variables.put("JENKINS_CLOUD_ID", dockerComputer.getCloudId());
final DockerCloud cloud = dockerComputer.getCloud();
if (cloud != null && cloud.isExposeDockerHost()) {
//replace http:// and https:// from docker-java to tcp://
String dockerHost = cloud.getDockerHost().getUri();
if (dockerHost.startsWith("unix:")) {
dockerHost = "tcp:" + dockerHost.substring(5);
}
variables.put("DOCKER_HOST", dockerHost);
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testAddingVmargsInBeforeContainerCreated() throws IOException, InterruptedException {
String vmargs = "-Dhttp.proxyPort=8080";
DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, vmargs));
CreateContainerCmd createCmd = new CreateContainerCmdImpl(createContainerCmd -> null, "hello-world");
createCmd.withName("container-name");
connector.beforeContainerCreated(null, null, createCmd);
String[] env = createCmd.getEnv();
assertNotNull("Environment variable is expected", env);
assertEquals("Environment variable is expected", 1, env.length);
assertTrue("Original environment variable is not found", env[0].endsWith(vmargs));
}
#location 11
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testAddingVmargsInBeforeContainerCreated() throws IOException, InterruptedException {
// Given
final String vmargs = "-Dhttp.proxyPort=8080";
final DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, vmargs));
final CreateContainerCmd createCmd = mock(CreateContainerCmd.class);
final Map<String, String> containerLabels = new TreeMap<>();
when(createCmd.getLabels()).thenReturn(containerLabels);
DockerTemplate.setNodeNameInContainerConfig(createCmd, "nodeName");
// When
connector.beforeContainerCreated(null, null, createCmd);
// Then
verify(createCmd, times(1)).withEnv(new String[]{
"JAVA_OPT=" + vmargs
});
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public synchronized Collection<NodeProvisioner.PlannedNode> provision(Label label, int excessWorkload) {
try {
LOGGER.log(Level.INFO, "Asked to provision {0} slave(s) for: {1}", new Object[]{excessWorkload,label});
List<NodeProvisioner.PlannedNode> r = new ArrayList<NodeProvisioner.PlannedNode>();
final DockerTemplate t = getTemplate(label);
LOGGER.log(Level.INFO, "Will provision \"{0}\" for: {1}", new Object[]{t.image,label});
while (excessWorkload>0) {
if (!addProvisionedSlave(t.image, t.instanceCap)) {
break;
}
r.add(new NodeProvisioner.PlannedNode(t.getDisplayName(),
Computer.threadPoolForRemoting.submit(new Callable<Node>() {
public Node call() throws Exception {
// TODO: record the output somewhere
DockerSlave slave = null;
try {
slave = t.provision(new StreamTaskListener(System.out));
final Jenkins jenkins = Jenkins.getInstance();
// TODO once the baseline is 1.592+ switch to Queue.withLock
synchronized (jenkins.getQueue()) {
jenkins.addNode(slave);
}
// Docker instances may have a long init script. If we declare
// the provisioning complete by returning without the connect
// operation, NodeProvisioner may decide that it still wants
// one more instance, because it sees that (1) all the slaves
// are offline (because it's still being launched) and
// (2) there's no capacity provisioned yet.
//
// deferring the completion of provisioning until the launch
// goes successful prevents this problem.
slave.toComputer().connect(false).get();
return slave;
}
catch(Exception ex) {
LOGGER.log(Level.SEVERE, "Error in provisioning; slave=" + slave + ", template=" + t);
ex.printStackTrace();
throw Throwables.propagate(ex);
}
finally {
decrementAmiSlaveProvision(t.image);
}
}
})
,t.getNumExecutors()));
excessWorkload -= t.getNumExecutors();
}
return r;
} catch (Exception e) {
LOGGER.log(Level.SEVERE,"Exception while provisioning for: " + label,e);
return Collections.emptyList();
}
}
#location 11
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public synchronized Collection<NodeProvisioner.PlannedNode> provision(Label label, int excessWorkload) {
try {
LOGGER.log(Level.INFO, "Asked to provision {0} slave(s) for: {1}", new Object[]{excessWorkload,label});
List<NodeProvisioner.PlannedNode> r = new ArrayList<NodeProvisioner.PlannedNode>();
final List<DockerTemplate> templates = getTemplates(label);
while (excessWorkload > 0 && !templates.isEmpty()) {
final DockerTemplate t = templates.get(0); // get first
LOGGER.log(Level.INFO, "Will provision \"{0}\" for: {1}", new Object[]{t.image,label});
try {
if (!addProvisionedSlave(t)) {
templates.remove(t);
continue;
}
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Bad template {0}: {1}. Trying next template...",
new Object[]{t.image, e.getMessage()});
templates.remove(t);
continue;
}
r.add(new NodeProvisioner.PlannedNode(t.getDisplayName(),
Computer.threadPoolForRemoting.submit(new Callable<Node>() {
public Node call() throws Exception {
// TODO: record the output somewhere
DockerSlave slave = null;
try {
slave = t.provision(new StreamTaskListener(System.out));
final Jenkins jenkins = Jenkins.getInstance();
// TODO once the baseline is 1.592+ switch to Queue.withLock
synchronized (jenkins.getQueue()) {
jenkins.addNode(slave);
}
// Docker instances may have a long init script. If we declare
// the provisioning complete by returning without the connect
// operation, NodeProvisioner may decide that it still wants
// one more instance, because it sees that (1) all the slaves
// are offline (because it's still being launched) and
// (2) there's no capacity provisioned yet.
//
// deferring the completion of provisioning until the launch
// goes successful prevents this problem.
slave.toComputer().connect(false).get();
return slave;
}
catch(Exception ex) {
LOGGER.log(Level.SEVERE, "Error in provisioning; slave=" + slave + ", template=" + t);
ex.printStackTrace();
throw Throwables.propagate(ex);
}
finally {
decrementAmiSlaveProvision(t.image);
}
}
})
,t.getNumExecutors()));
excessWorkload -= t.getNumExecutors();
}
return r;
} catch (Exception e) {
LOGGER.log(Level.SEVERE,"Exception while provisioning for: " + label,e);
return Collections.emptyList();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testKeepingEvnInBeforeContainerCreated() throws IOException, InterruptedException {
String env1 = "ENV1=val1";
DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, "-Dhttp.proxyPort=8080"));
CreateContainerCmd createCmd = new CreateContainerCmdImpl(createContainerCmd -> null, "hello-world");
createCmd.withName("container-name").withEnv(env1);
connector.beforeContainerCreated(null, null, createCmd);
String[] env = createCmd.getEnv();
assertNotNull("Environment variables are expected", env);
assertEquals("Environment variables are expected", 2, env.length);
assertTrue("Original environment variable is not found", Arrays.asList(env).contains(env1));
}
#location 11
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testKeepingEvnInBeforeContainerCreated() throws IOException, InterruptedException {
// Given
final String env1 = "ENV1=val1";
final String vmargs = "-Dhttp.proxyPort=8080";
final DockerComputerJNLPConnector connector = new DockerComputerJNLPConnector(new JNLPLauncher(null, vmargs));
final CreateContainerCmd createCmd = mock(CreateContainerCmd.class);
final Map<String, String> containerLabels = new TreeMap<>();
when(createCmd.getLabels()).thenReturn(containerLabels);
DockerTemplate.setNodeNameInContainerConfig(createCmd, "nodeName");
when(createCmd.getEnv()).thenReturn(new String[]{ env1 });
// When
connector.beforeContainerCreated(null, null, createCmd);
// Then
verify(createCmd, times(1)).withEnv(new String[]{
env1,
"JAVA_OPT=" + vmargs
});
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private void pullImage(DockerTemplate dockerTemplate) throws IOException {
final String imageName = dockerTemplate.getDockerTemplateBase().getImage();
if (shouldPullImage(imageName, dockerTemplate.getPullStrategy())) {
LOGGER.info("Pulling image '{}'. This may take awhile...", imageName);
long startTime = System.currentTimeMillis();
PullImageCmd imgCmd = getClient().pullImageCmd(imageName);
final DockerRegistryEndpoint registry = dockerTemplate.getRegistry();
if (registry == null) {
DockerRegistryToken token = registry.getToken(null);
AuthConfig auth = new AuthConfig()
.withRegistryAddress(registry.getUrl())
.withEmail(token.getEmail())
.withRegistrytoken(token.getToken());
imgCmd.withAuthConfig(auth);
}
imgCmd.exec(new PullImageResultCallback()).awaitSuccess();
long pullTime = System.currentTimeMillis() - startTime;
LOGGER.info("Finished pulling image '{}', took {} ms", imageName, pullTime);
}
}
#location 13
#vulnerability type NULL_DEREFERENCE | #fixed code
private void pullImage(DockerTemplate dockerTemplate) throws IOException {
final String imageName = dockerTemplate.getDockerTemplateBase().getImage();
if (shouldPullImage(imageName, dockerTemplate.getPullStrategy())) {
LOGGER.info("Pulling image '{}'. This may take awhile...", imageName);
long startTime = System.currentTimeMillis();
PullImageCmd imgCmd = getClient().pullImageCmd(imageName);
final DockerRegistryEndpoint registry = dockerTemplate.getRegistry();
setRegistryAuthentication(imgCmd, registry);
imgCmd.exec(new PullImageResultCallback()).awaitSuccess();
long pullTime = System.currentTimeMillis() - startTime;
LOGGER.info("Finished pulling image '{}', took {} ms", imageName, pullTime);
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public RpcResponse send(RpcRequest request) throws Exception {
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(group).channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel channel) throws Exception {
channel.pipeline().addLast(new RpcEncoder(RpcRequest.class)) // 将 RPC 请求进行编码(为了发送请求)
.addLast(new RpcDecoder(RpcResponse.class)) // 将 RPC 响应进行解码(为了处理响应)
.addLast(RpcClient.this); // 使用 RpcClient 发送 RPC 请求
}
}).option(ChannelOption.SO_KEEPALIVE, true);
ChannelFuture future = bootstrap.connect(host, port).sync();
future.channel().writeAndFlush(request).sync();
synchronized (obj) {
obj.wait(); // 未收到响应,使线程等待
}
if (response != null) {
future.channel().closeFuture().sync();
}
return response;
} finally {
group.shutdownGracefully();
}
}
#location 24
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
public RpcResponse send(RpcRequest request) throws Exception {
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(group)
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel channel) throws Exception {
channel.pipeline().addLast(new RpcEncoder(RpcRequest.class)) // 将 RPC 请求进行编码(为了发送请求)
.addLast(new RpcDecoder(RpcResponse.class)) // 将 RPC 响应进行解码(为了处理响应)
.addLast(RpcClient.this); // 使用 RpcClient 发送 RPC 请求
}
})
.option(ChannelOption.SO_TIMEOUT, timeout)
.option(ChannelOption.SO_KEEPALIVE, true);
ChannelFuture future = bootstrap.connect(host, port).sync();
future.channel().writeAndFlush(request).sync();
synchronized (obj) {
obj.wait(); // 未收到响应,使线程等待
}
if (response != null) {
future.channel().closeFuture().sync();
}
return response;
} finally {
group.shutdownGracefully();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testDecompressionWithZLIB() throws Exception {
// ZLIB compress message.
byte[] compressMe = this.originalMessage.getBytes();
byte[] compressedMessage = new byte[compressMe.length];
Deflater compressor = new Deflater();
compressor.setInput(compressMe);
compressor.finish();
compressor.deflate(compressedMessage);
// Build a datagram packet.
DatagramPacket gelfMessage = new DatagramPacket(compressedMessage, compressedMessage.length);
// Let the decompression take place.
SimpleGELFClientHandler handler = new SimpleGELFClientHandler(gelfMessage, "foo");
assertEquals(handler.getClientMessage(), this.originalMessage);
}
#location 9
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testDecompressionWithZLIB() throws Exception {
// Build a datagram packet.
DatagramPacket gelfMessage = GELFTestHelper.buildZLIBCompressedDatagramPacket(this.originalMessage);
// Let the decompression take place.
SimpleGELFClientHandler handler = new SimpleGELFClientHandler(gelfMessage, "foo");
assertEquals(handler.getClientMessage(), this.originalMessage);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public static double heightDegrees(int n) {
if (hashHeightCache.get(n) == null) {
double a;
if (n % 2 == 0)
a = -1;
else
a = -0.5;
double result = 90 / Math.pow(2, 2.5 * n + a);
hashHeightCache.put(n, result);
}
return hashHeightCache.get(n);
}
#location 11
#vulnerability type NULL_DEREFERENCE | #fixed code
public static double heightDegrees(int n) {
if (hashHeightCache[n - 1] == null) {
double a;
if (n % 2 == 0)
a = -1;
else
a = -0.5;
double result = 90 / Math.pow(2, 2.5 * n + a);
hashHeightCache[n - 1] = result;
}
return hashHeightCache[n - 1];
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public void add(double lat, double lon, long time, T t, long expiryTime) {
String hash = GeoHash.encodeHash(lat, lon);
for (int i = 1; i <= hash.length(); i++) {
String key = hash.substring(0, i);
if (map.get(key) == null) {
map.put(key, Maps.<Long, T> newTreeMap());
}
map.get(key).put(time, t);
}
}
#location 8
#vulnerability type NULL_DEREFERENCE | #fixed code
public void add(double lat, double lon, long time, T t, long expiryTime) {
String hash = GeoHash.encodeHash(lat, lon);
// full hash length is 12 so this will insert 12 entries
for (int i = 1; i <= hash.length(); i++) {
long key = Base32.decodeBase32(hash.substring(0, i));
if (map.get(key) == null) {
map.put(key, new ConcurrentSkipListMap<Long, Info<T>>());
}
map.get(key).put(time, new Info<T>(key, lat, lon, time, t));
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testDeleteViewIndexSequences() throws Exception {
createBaseTable(tableName, false, null, null);
Connection conn1 = getConnection();
Connection conn2 = getConnection();
conn1.createStatement().execute("CREATE VIEW " + VIEW_NAME + " AS SELECT * FROM " + tableName);
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + VIEW_NAME + " (v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + VIEW_NAME);
conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
ResultSet rs = conn2.createStatement().executeQuery("SELECT "
+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
+ PhoenixDatabaseMetaData.SEQUENCE_NAME
+ " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
assertFalse("View index sequences should be deleted.", rs.next());
}
#location 13
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testDeleteViewIndexSequences() throws Exception {
createBaseTable(tableName, false, null, null);
Connection conn1 = getConnection();
Connection conn2 = getConnection();
String viewName = schemaName + "." + VIEW_NAME;
conn1.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName);
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + viewName + " (v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_schema like '%"
+ schemaName + "%'";
ResultSet rs = conn1.prepareStatement(query).executeQuery();
assertTrue(rs.next());
assertEquals(MetaDataUtil.getViewIndexSequenceSchemaName(PNameFactory.newName(tableName), isNamespaceMapped),
rs.getString("sequence_schema"));
assertEquals(MetaDataUtil.getViewIndexSequenceName(PNameFactory.newName(tableName), null, isNamespaceMapped),
rs.getString("sequence_name"));
assertEquals(-32767, rs.getInt("current_value"));
assertEquals(1, rs.getInt("increment_by"));
assertFalse(rs.next());
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + viewName);
conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
rs = conn2.createStatement().executeQuery("SELECT "
+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
+ PhoenixDatabaseMetaData.SEQUENCE_NAME
+ " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
assertFalse("View index sequences should be deleted.", rs.next());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) {
Object[] resizedElements = new Object[elements.length];
for (int i = 0; i < elements.length; i++) {
int length = baseType.getMaxLength(elements[i]);
if (length == maxLength) {
resizedElements[i] = elements[i];
} else {
resizedElements[i] = baseType.pad(elements[i],maxLength);
}
}
return resizedElements;
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) {
Object[] resizedElements = new Object[elements.length];
for (int i = 0; i < elements.length; i++) {
Integer length = baseType.getMaxLength(elements[i]);
if (length != null) {
if (length == maxLength) {
resizedElements[i] = elements[i];
} else {
resizedElements[i] = baseType.pad(elements[i], maxLength);
}
} else {
resizedElements[i] = baseType.pad(elements[i], maxLength);
}
}
return resizedElements;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName,
long clientTimeStamp, RowLock rowLock, int clientVersion, boolean skipAddingIndexes,
boolean skipAddingParentColumns, PTable lockedAncestorTable) throws IOException, SQLException {
Region region = env.getRegion();
final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
// if this region doesn't contain the metadata rows look up the table by using PhoenixRuntime.getTable
if (!region.getRegionInfo().containsRow(key)) {
Properties props = new Properties();
if (tenantId != null) {
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, Bytes.toString(tenantId));
}
if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) {
props.setProperty("CurrentSCN", Long.toString(clientTimeStamp));
}
try (PhoenixConnection connection =
QueryUtil.getConnectionOnServer(props, env.getConfiguration())
.unwrap(PhoenixConnection.class)) {
ConnectionQueryServices queryServices = connection.getQueryServices();
MetaDataMutationResult result =
queryServices.getTable(PNameFactory.newName(tenantId), schemaName,
tableName, HConstants.LATEST_TIMESTAMP, clientTimeStamp,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return result.getTable();
} catch (ClassNotFoundException e) {
}
}
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
// Ask Lars about the expense of this call - if we don't take the lock, we still won't get
// partial results
// get the co-processor environment
// TODO: check that key is within region.getStartKey() and region.getEndKey()
// and return special code to force client to lookup region from meta.
/*
* Lock directly on key, though it may be an index table. This will just prevent a table
* from getting rebuilt too often.
*/
final boolean wasLocked = (rowLock != null);
try {
if (!wasLocked) {
rowLock = acquireLock(region, key, null);
}
PTable table =
getTableFromCache(cacheKey, clientTimeStamp, clientVersion, skipAddingIndexes,
skipAddingParentColumns, lockedAncestorTable);
// We only cache the latest, so we'll end up building the table with every call if the
// client connection has specified an SCN.
// TODO: If we indicate to the client that we're returning an older version, but there's a
// newer version available, the client
// can safely not call this, since we only allow modifications to the latest.
if (table != null && table.getTimeStamp() < clientTimeStamp) {
// Table on client is up-to-date with table on server, so just return
if (isTableDeleted(table)) {
return null;
}
return table;
}
// Query for the latest table first, since it's not cached
table =
buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
if ((table != null && table.getTimeStamp() < clientTimeStamp) ||
(blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
return table;
}
// Otherwise, query for an older version of the table - it won't be cached
table =
buildTable(key, cacheKey, region, clientTimeStamp, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return table;
} finally {
if (!wasLocked && rowLock!=null) rowLock.release();
}
}
#location 63
#vulnerability type NULL_DEREFERENCE | #fixed code
private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName,
long clientTimeStamp, RowLock rowLock, int clientVersion, boolean skipAddingIndexes,
boolean skipAddingParentColumns, PTable lockedAncestorTable) throws IOException, SQLException {
Region region = env.getRegion();
final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
// if this region doesn't contain the metadata rows look up the table by using PhoenixRuntime.getTable
if (!region.getRegionInfo().containsRow(key)) {
Properties props = new Properties();
if (tenantId != null) {
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, Bytes.toString(tenantId));
}
if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) {
props.setProperty("CurrentSCN", Long.toString(clientTimeStamp));
}
try (PhoenixConnection connection =
QueryUtil.getConnectionOnServer(props, env.getConfiguration())
.unwrap(PhoenixConnection.class)) {
ConnectionQueryServices queryServices = connection.getQueryServices();
MetaDataMutationResult result =
queryServices.getTable(PNameFactory.newName(tenantId), schemaName,
tableName, HConstants.LATEST_TIMESTAMP, clientTimeStamp,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return result.getTable();
} catch (ClassNotFoundException e) {
}
}
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
// Ask Lars about the expense of this call - if we don't take the lock, we still won't get
// partial results
// get the co-processor environment
// TODO: check that key is within region.getStartKey() and region.getEndKey()
// and return special code to force client to lookup region from meta.
/*
* Lock directly on key, though it may be an index table. This will just prevent a table
* from getting rebuilt too often.
*/
final boolean wasLocked = (rowLock != null);
try {
if (!wasLocked) {
rowLock = acquireLock(region, key, null);
}
PTable table =
getTableFromCache(cacheKey, clientTimeStamp, clientVersion, skipAddingIndexes,
skipAddingParentColumns, lockedAncestorTable);
table = modifyIndexStateForOldClient(clientVersion, table);
// We only cache the latest, so we'll end up building the table with every call if the
// client connection has specified an SCN.
// TODO: If we indicate to the client that we're returning an older version, but there's a
// newer version available, the client
// can safely not call this, since we only allow modifications to the latest.
if (table != null && table.getTimeStamp() < clientTimeStamp) {
// Table on client is up-to-date with table on server, so just return
if (isTableDeleted(table)) {
return null;
}
return table;
}
// Query for the latest table first, since it's not cached
table =
buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
if ((table != null && table.getTimeStamp() < clientTimeStamp) ||
(blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
return table;
}
// Otherwise, query for an older version of the table - it won't be cached
table =
buildTable(key, cacheKey, region, clientTimeStamp, clientVersion,
skipAddingIndexes, skipAddingParentColumns, lockedAncestorTable);
return table;
} finally {
if (!wasLocked && rowLock!=null) rowLock.release();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void close() throws SQLException {
SQLException sqlE = null;
try {
// Attempt to return any unused sequences.
returnAllSequences(this.sequenceMap);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
// Clear any client-side caches.
statsManager.clearStats();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
try {
childServices.clear();
latestMetaData = null;
connection.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
} finally {
try {
super.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
}
}
}
#location 6
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public void close() throws SQLException {
if (closed) {
return;
}
synchronized (this) {
if (closed) {
return;
}
closed = true;
SQLException sqlE = null;
try {
// Attempt to return any unused sequences.
if (connection != null) returnAllSequences(this.sequenceMap);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
// Clear any client-side caches.
statsManager.clearStats();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
try {
childServices.clear();
latestMetaData = null;
if (connection != null) connection.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
} finally {
try {
super.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
}
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException {
boolean isChildTypeUnknown = false;
Expression arrayElemChild = null;
PDataType arrayElemDataType = children.get(0).getDataType();
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
PDataType childType = child.getDataType();
if (childType == null) {
isChildTypeUnknown = true;
} else if (arrayElemDataType == null) {
arrayElemDataType = childType;
isChildTypeUnknown = true;
arrayElemChild = child;
} else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) {
continue;
} else if (arrayElemDataType.isCoercibleTo(childType)) {
arrayElemChild = child;
arrayElemDataType = childType;
} else {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE)
.setMessage(
"Case expressions must have common type: " + arrayElemDataType
+ " cannot be coerced to " + childType).build().buildException();
}
}
// If we found an "unknown" child type and the return type is a number
// make the return type be the most general number type of DECIMAL.
if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) {
arrayElemDataType = PDataType.DECIMAL;
}
final PDataType theArrayElemDataType = arrayElemDataType;
for (int i = 0; i < node.getChildren().size(); i++) {
ParseNode childNode = node.getChildren().get(i);
if (childNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode)childNode,
arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild :
new DelegateDatum(arrayElemChild) {
@Override
public PDataType getDataType() {
return theArrayElemDataType;
}
});
}
}
ImmutableBytesWritable ptr = context.getTempPtr();
Object[] elements = new Object[children.size()];
if (node.isStateless()) {
boolean isDeterministic = true;
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
isDeterministic &= child.isDeterministic();
child.evaluate(null, ptr);
Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder());
elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue();
}
Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements);
return LiteralExpression.newConstant(value,
PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), isDeterministic);
}
ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType);
return wrapGroupByExpression(arrayExpression);
}
#location 62
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException {
boolean isChildTypeUnknown = false;
Expression arrayElemChild = null;
PDataType arrayElemDataType = children.get(0).getDataType();
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
PDataType childType = child.getDataType();
if (childType == null) {
isChildTypeUnknown = true;
} else if (arrayElemDataType == null) {
arrayElemDataType = childType;
isChildTypeUnknown = true;
arrayElemChild = child;
} else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) {
continue;
} else if (arrayElemDataType.isCoercibleTo(childType)) {
arrayElemChild = child;
arrayElemDataType = childType;
} else {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE)
.setMessage(
"Case expressions must have common type: " + arrayElemDataType
+ " cannot be coerced to " + childType).build().buildException();
}
}
// If we found an "unknown" child type and the return type is a number
// make the return type be the most general number type of DECIMAL.
if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) {
arrayElemDataType = PDataType.DECIMAL;
}
final PDataType theArrayElemDataType = arrayElemDataType;
for (int i = 0; i < node.getChildren().size(); i++) {
ParseNode childNode = node.getChildren().get(i);
if (childNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode)childNode,
arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild :
new DelegateDatum(arrayElemChild) {
@Override
public PDataType getDataType() {
return theArrayElemDataType;
}
});
}
}
ImmutableBytesWritable ptr = context.getTempPtr();
Object[] elements = new Object[children.size()];
ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType);
if (ExpressionUtil.isConstant(arrayExpression)) {
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
child.evaluate(null, ptr);
Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder());
elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue();
}
Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements);
return LiteralExpression.newConstant(value,
PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), true);
}
return wrapGroupByExpression(arrayExpression);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
}
#location 117
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
/**
If region is closing and there are large number of rows being verified/rebuilt with IndexTool,
not having this check will impact/delay the region closing -- affecting the availability
as this method holds the read lock on the region.
* */
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public int executeStatements(Reader reader, List<Object> binds, PrintStream out) throws IOException, SQLException {
int bindsOffset = 0;
int nStatements = 0;
PhoenixStatementParser parser = new PhoenixStatementParser(reader);
try {
while (true) {
PhoenixPreparedStatement stmt = new PhoenixPreparedStatement(this, parser);
ParameterMetaData paramMetaData = stmt.getParameterMetaData();
for (int i = 0; i < paramMetaData.getParameterCount(); i++) {
stmt.setObject(i+1, binds.get(bindsOffset+i));
}
long start = System.currentTimeMillis();
boolean isQuery = stmt.execute();
if (isQuery) {
ResultSet rs = stmt.getResultSet();
if (!rs.next()) {
if (out != null) {
out.println("no rows selected");
}
} else {
int columnCount = 0;
if (out != null) {
ResultSetMetaData md = rs.getMetaData();
columnCount = md.getColumnCount();
for (int i = 1; i <= columnCount; i++) {
int displayWidth = md.getColumnDisplaySize(i);
String label = md.getColumnLabel(i);
if (md.isSigned(i)) {
out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padStart(label, displayWidth, ' '));
out.print(' ');
} else {
out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padEnd(md.getColumnLabel(i), displayWidth, ' '));
out.print(' ');
}
}
out.println();
for (int i = 1; i <= columnCount; i++) {
int displayWidth = md.getColumnDisplaySize(i);
out.print(Strings.padStart("", displayWidth,'-'));
out.print(' ');
}
out.println();
}
do {
if (out != null) {
ResultSetMetaData md = rs.getMetaData();
for (int i = 1; i <= columnCount; i++) {
int displayWidth = md.getColumnDisplaySize(i);
String value = rs.getString(i);
String valueString = value == null ? QueryConstants.NULL_DISPLAY_TEXT : value;
if (md.isSigned(i)) {
out.print(Strings.padStart(valueString, displayWidth, ' '));
} else {
out.print(Strings.padEnd(valueString, displayWidth, ' '));
}
out.print(' ');
}
out.println();
}
} while (rs.next());
}
} else if (out != null){
int updateCount = stmt.getUpdateCount();
if (updateCount >= 0) {
out.println((updateCount == 0 ? "no" : updateCount) + (updateCount == 1 ? " row " : " rows ") + stmt.getUpdateOperation().toString());
}
}
bindsOffset += paramMetaData.getParameterCount();
double elapsedDuration = ((System.currentTimeMillis() - start) / 1000.0);
out.println("Time: " + elapsedDuration + " sec(s)\n");
nStatements++;
}
} catch (EOFException e) {
}
return nStatements;
}
#location 62
#vulnerability type RESOURCE_LEAK | #fixed code
public int executeStatements(Reader reader, List<Object> binds, PrintStream out) throws IOException, SQLException {
int bindsOffset = 0;
int nStatements = 0;
PhoenixStatementParser parser = new PhoenixStatementParser(reader);
try {
while (true) {
PhoenixPreparedStatement stmt = new PhoenixPreparedStatement(this, parser);
this.statements.add(stmt);
ParameterMetaData paramMetaData = stmt.getParameterMetaData();
for (int i = 0; i < paramMetaData.getParameterCount(); i++) {
stmt.setObject(i+1, binds.get(bindsOffset+i));
}
long start = System.currentTimeMillis();
boolean isQuery = stmt.execute();
if (isQuery) {
ResultSet rs = stmt.getResultSet();
if (!rs.next()) {
if (out != null) {
out.println("no rows selected");
}
} else {
int columnCount = 0;
if (out != null) {
ResultSetMetaData md = rs.getMetaData();
columnCount = md.getColumnCount();
for (int i = 1; i <= columnCount; i++) {
int displayWidth = md.getColumnDisplaySize(i);
String label = md.getColumnLabel(i);
if (md.isSigned(i)) {
out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padStart(label, displayWidth, ' '));
out.print(' ');
} else {
out.print(displayWidth < label.length() ? label.substring(0,displayWidth) : Strings.padEnd(md.getColumnLabel(i), displayWidth, ' '));
out.print(' ');
}
}
out.println();
for (int i = 1; i <= columnCount; i++) {
int displayWidth = md.getColumnDisplaySize(i);
out.print(Strings.padStart("", displayWidth,'-'));
out.print(' ');
}
out.println();
}
do {
if (out != null) {
ResultSetMetaData md = rs.getMetaData();
for (int i = 1; i <= columnCount; i++) {
int displayWidth = md.getColumnDisplaySize(i);
String value = rs.getString(i);
String valueString = value == null ? QueryConstants.NULL_DISPLAY_TEXT : value;
if (md.isSigned(i)) {
out.print(Strings.padStart(valueString, displayWidth, ' '));
} else {
out.print(Strings.padEnd(valueString, displayWidth, ' '));
}
out.print(' ');
}
out.println();
}
} while (rs.next());
}
} else if (out != null){
int updateCount = stmt.getUpdateCount();
if (updateCount >= 0) {
out.println((updateCount == 0 ? "no" : updateCount) + (updateCount == 1 ? " row " : " rows ") + stmt.getUpdateOperation().toString());
}
}
bindsOffset += paramMetaData.getParameterCount();
double elapsedDuration = ((System.currentTimeMillis() - start) / 1000.0);
out.println("Time: " + elapsedDuration + " sec(s)\n");
nStatements++;
}
} catch (EOFException e) {
}
return nStatements;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException,
InterruptedException, TimeoutException {
HBaseAdmin admin = new HBaseAdmin(config);
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tableName);
admin.modifyTable(tableName, newDesc);
admin.enableTable(tableName);
} else {
admin.modifyTable(tableName, newDesc);
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
}
#location 11
#vulnerability type RESOURCE_LEAK | #fixed code
@Override
public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException,
InterruptedException, TimeoutException {
try (HBaseAdmin admin = new HBaseAdmin(config)) {
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tableName);
admin.modifyTable(tableName, newDesc);
admin.enableTable(tableName);
} else {
admin.modifyTable(tableName, newDesc);
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testSystemCatalogWALEntryFilter() throws Exception {
//now create WAL.Entry objects that refer to cells in those view rows in System.Catalog
Get tenantGet = getGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantGet = getGet(catalogTable, DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
WAL.Entry nonTenantEntry = getEntry(systemCatalogTableName, nonTenantGet);
WAL.Entry tenantEntry = getEntry(systemCatalogTableName, tenantGet);
//verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertNull(filter.filter(nonTenantEntry));
WAL.Entry filteredTenantEntry = filter.filter(tenantEntry);
Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry);
Assert.assertEquals(tenantEntry.getEdit().size(),
filter.filter(tenantEntry).getEdit().size());
//now check that a WAL.Entry with cells from both a tenant and a non-tenant
//catalog row only allow the tenant cells through
WALEdit comboEdit = new WALEdit();
comboEdit.getCells().addAll(nonTenantEntry.getEdit().getCells());
comboEdit.getCells().addAll(tenantEntry.getEdit().getCells());
WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit);
Assert.assertEquals(tenantEntry.getEdit().size() + nonTenantEntry.getEdit().size()
, comboEntry.getEdit().size());
Assert.assertEquals(tenantEntry.getEdit().size(),
filter.filter(comboEntry).getEdit().size());
}
#location 18
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testSystemCatalogWALEntryFilter() throws Exception {
//now create WAL.Entry objects that refer to cells in those view rows in System.Catalog
Get tenantViewGet = getTenantViewGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantViewGet = getTenantViewGet(catalogTable,
DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
Get tenantLinkGet = getParentChildLinkGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantLinkGet = getParentChildLinkGet(catalogTable,
DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
WAL.Entry nonTenantViewEntry = getEntry(systemCatalogTableName, nonTenantViewGet);
WAL.Entry tenantViewEntry = getEntry(systemCatalogTableName, tenantViewGet);
WAL.Entry nonTenantLinkEntry = getEntry(systemCatalogTableName, nonTenantLinkGet);
WAL.Entry tenantLinkEntry = getEntry(systemCatalogTableName, tenantLinkGet);
//verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertNull(filter.filter(nonTenantViewEntry));
WAL.Entry filteredTenantEntry = filter.filter(tenantViewEntry);
Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry);
Assert.assertEquals(tenantViewEntry.getEdit().size(),
filter.filter(tenantViewEntry).getEdit().size());
//now check that a WAL.Entry with cells from both a tenant and a non-tenant
//catalog row only allow the tenant cells through
WALEdit comboEdit = new WALEdit();
comboEdit.getCells().addAll(nonTenantViewEntry.getEdit().getCells());
comboEdit.getCells().addAll(tenantViewEntry.getEdit().getCells());
WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit);
Assert.assertEquals(tenantViewEntry.getEdit().size() + nonTenantViewEntry.getEdit().size()
, comboEntry.getEdit().size());
Assert.assertEquals(tenantViewEntry.getEdit().size(),
filter.filter(comboEntry).getEdit().size());
//now check that the parent-child links (which have the tenant_id of the view's parent,
// but are a part of the view's metadata) are migrated in the tenant case
// but not the non-tenant. The view's tenant_id is in th System.Catalog.COLUMN_NAME field
Assert.assertNull("Non-tenant parent-child link was not filtered " +
"when it should be!", filter.filter(nonTenantLinkEntry));
Assert.assertNotNull("Tenant parent-child link was filtered when it should not be!",
filter.filter(tenantLinkEntry));
Assert.assertEquals(tenantLinkEntry.getEdit().size(),
filter.filter(tenantLinkEntry).getEdit().size());
//add the parent-child link to the tenant view WAL entry,
//since they'll usually be together and they both need to
//be replicated
tenantViewEntry.getEdit().getCells().addAll(tenantLinkEntry.getEdit().getCells());
Assert.assertEquals(tenantViewEntry.getEdit().size(), tenantViewEntry.getEdit().size());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testSelectUpsertWithNewClient() throws Exception {
checkForPreConditions();
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
// Insert more data with new client and read with old client
executeQueriesWithCurrentVersion(ADD_DATA);
executeQueryWithClientVersion(compatibleClientVersion, QUERY_MORE);
assertTrue(compareOutput(ADD_DATA, QUERY_MORE));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Test
public void testSelectUpsertWithNewClient() throws Exception {
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
// Insert more data with new client and read with old client
executeQueriesWithCurrentVersion(ADD_DATA);
executeQueryWithClientVersion(compatibleClientVersion, QUERY_MORE);
assertExpectedOutput(ADD_DATA, QUERY_MORE);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
final String dataTable = generateUniqueName();
final String index = "IDX_" + dataTable;
// create the table and ensure its empty
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(url, props);
conn.createStatement()
.execute("CREATE TABLE " + dataTable + " (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// create the index and ensure its empty as well
conn.createStatement().execute("CREATE INDEX " + index + " ON " + dataTable + " (v1)");
conn = DriverManager.getConnection(getUrl(), props);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTable + " VALUES(?,?,?)");
conn.setAutoCommit(false);
for (int i = 0; i < 100; i++) {
stmt.setInt(1, i);
stmt.setString(2, "v1" + i);
stmt.setString(3, "v2" + i);
stmt.execute();
}
conn.commit();
int numUpsertSelectRunners = 5;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
// run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
// run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
for (int i = 0; i < 100; i += 25) {
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5)));
}
int received = 0;
while (received < futures.size()) {
Future<Boolean> resultFuture = completionService.take();
Boolean result = resultFuture.get();
received++;
assertTrue(result);
}
exec.shutdownNow();
conn.close();
}
#location 8
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
try (Connection conn = driver.connect(url, props)) {
int numUpsertSelectRunners = 5;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
// run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
// run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
for (int i = 0; i < 100; i += 25) {
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5)));
}
int received = 0;
while (received < futures.size()) {
Future<Boolean> resultFuture = completionService.take();
Boolean result = resultFuture.get();
received++;
assertTrue(result);
}
exec.shutdownNow();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
protected static void setupTxManager() throws SQLException, IOException {
TransactionFactory.getTransactionFactory().getTransactionContext().setupTxManager(config, getUrl());
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
protected static void setupTxManager() throws SQLException, IOException {
TransactionFactory.getTransactionProvider().getTransactionContext().setupTxManager(config, getUrl());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testExternalTxContext() throws Exception {
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
String fullTableName = generateUniqueName();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
PhoenixTransactionContext txContext =
TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
PhoenixTransactionalTable txTable =
TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
Put put = new Put(Bytes.toBytes("z"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
// New connection should not see data as it hasn't been committed yet
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
// Use new connection to create a row with a conflict
Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Use TM APIs directly to finish (i.e. commit) the transaction
txContext.commit();
// Confirm that attempt to commit row with conflict fails
try {
connWithConflict.commit();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
}
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
}
// Repeat the same as above, but this time abort the transaction
txContext =
TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
txTable =
TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
put = new Put(Bytes.toBytes("j"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(5,rs.getInt(1));
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
// Use TM APIs directly to abort (i.e. rollback) the transaction
txContext.abort();
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Should succeed since conflicting row was aborted
connWithConflict.commit();
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
}
// Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
// written to hide it.
Result result = htable.get(new Get(Bytes.toBytes("j")));
assertTrue(result.isEmpty());
}
#location 27
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testExternalTxContext() throws Exception {
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
String fullTableName = generateUniqueName();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
PhoenixTransactionContext txContext =
TransactionFactory.getTransactionProvider().getTransactionContext(pconn);
PhoenixTransactionalTable txTable =
TransactionFactory.getTransactionProvider().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
Put put = new Put(Bytes.toBytes("z"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
// New connection should not see data as it hasn't been committed yet
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1,rs.getInt(1));
}
// Use new connection to create a row with a conflict
Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Use TM APIs directly to finish (i.e. commit) the transaction
txContext.commit();
// Confirm that attempt to commit row with conflict fails
try {
connWithConflict.commit();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
}
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
}
// Repeat the same as above, but this time abort the transaction
txContext =
TransactionFactory.getTransactionProvider().getTransactionContext(pconn);
txTable =
TransactionFactory.getTransactionProvider().getTransactionalTable(txContext, htable);
txContext.begin();
// Use HBase APIs to add a new row
put = new Put(Bytes.toBytes("j"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
txTable.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(5,rs.getInt(1));
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
// Use TM APIs directly to abort (i.e. rollback) the transaction
txContext.abort();
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3,rs.getInt(1));
// Should succeed since conflicting row was aborted
connWithConflict.commit();
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4,rs.getInt(1));
}
// Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
// written to hide it.
Result result = htable.get(new Get(Bytes.toBytes("j")));
assertTrue(result.isEmpty());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testUpsertDeleteWithNewClient() throws Exception {
checkForPreConditions();
// Insert data with old client and read with new client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
// Deletes with the new client
executeQueriesWithCurrentVersion(ADD_DELETE);
executeQueriesWithCurrentVersion(QUERY_ADD_DELETE);
assertTrue(compareOutput(ADD_DELETE, QUERY_ADD_DELETE));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Test
public void testUpsertDeleteWithNewClient() throws Exception {
// Insert data with old client and read with new client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
// Deletes with the new client
executeQueriesWithCurrentVersion(ADD_DELETE);
executeQueriesWithCurrentVersion(QUERY_ADD_DELETE);
assertExpectedOutput(ADD_DELETE, QUERY_ADD_DELETE);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException {
boolean isChildTypeUnknown = false;
Expression arrayElemChild = null;
PDataType arrayElemDataType = children.get(0).getDataType();
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
PDataType childType = child.getDataType();
if (childType == null) {
isChildTypeUnknown = true;
} else if (arrayElemDataType == null) {
arrayElemDataType = childType;
isChildTypeUnknown = true;
arrayElemChild = child;
} else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) {
continue;
} else if (arrayElemDataType.isCoercibleTo(childType)) {
arrayElemChild = child;
arrayElemDataType = childType;
} else {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE)
.setMessage(
"Case expressions must have common type: " + arrayElemDataType
+ " cannot be coerced to " + childType).build().buildException();
}
}
// If we found an "unknown" child type and the return type is a number
// make the return type be the most general number type of DECIMAL.
if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) {
arrayElemDataType = PDataType.DECIMAL;
}
final PDataType theArrayElemDataType = arrayElemDataType;
for (int i = 0; i < node.getChildren().size(); i++) {
ParseNode childNode = node.getChildren().get(i);
if (childNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode)childNode,
arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild :
new DelegateDatum(arrayElemChild) {
@Override
public PDataType getDataType() {
return theArrayElemDataType;
}
});
}
}
ImmutableBytesWritable ptr = context.getTempPtr();
Object[] elements = new Object[children.size()];
if (node.isStateless()) {
boolean isDeterministic = true;
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
isDeterministic &= child.isDeterministic();
child.evaluate(null, ptr);
Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder());
elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue();
}
Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements);
return LiteralExpression.newConstant(value,
PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), isDeterministic);
}
ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType);
return wrapGroupByExpression(arrayExpression);
}
#location 57
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public Expression visitLeave(ArrayConstructorNode node, List<Expression> children) throws SQLException {
boolean isChildTypeUnknown = false;
Expression arrayElemChild = null;
PDataType arrayElemDataType = children.get(0).getDataType();
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
PDataType childType = child.getDataType();
if (childType == null) {
isChildTypeUnknown = true;
} else if (arrayElemDataType == null) {
arrayElemDataType = childType;
isChildTypeUnknown = true;
arrayElemChild = child;
} else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) {
continue;
} else if (arrayElemDataType.isCoercibleTo(childType)) {
arrayElemChild = child;
arrayElemDataType = childType;
} else {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CONVERT_TYPE)
.setMessage(
"Case expressions must have common type: " + arrayElemDataType
+ " cannot be coerced to " + childType).build().buildException();
}
}
// If we found an "unknown" child type and the return type is a number
// make the return type be the most general number type of DECIMAL.
if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo(PDataType.DECIMAL)) {
arrayElemDataType = PDataType.DECIMAL;
}
final PDataType theArrayElemDataType = arrayElemDataType;
for (int i = 0; i < node.getChildren().size(); i++) {
ParseNode childNode = node.getChildren().get(i);
if (childNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode)childNode,
arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild :
new DelegateDatum(arrayElemChild) {
@Override
public PDataType getDataType() {
return theArrayElemDataType;
}
});
}
}
ImmutableBytesWritable ptr = context.getTempPtr();
Object[] elements = new Object[children.size()];
ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType);
if (ExpressionUtil.isConstant(arrayExpression)) {
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
child.evaluate(null, ptr);
Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder());
elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.isDeterministic()).getValue();
}
Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements);
return LiteralExpression.newConstant(value,
PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), true);
}
return wrapGroupByExpression(arrayExpression);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testIndexRebuildTask() throws Throwable {
String baseTable = generateUniqueName();
Connection conn = null;
Connection viewConn = null;
try {
conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(false);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1);
viewConn =DriverManager.getConnection(getUrl(), props);
String ddlFormat =
"CREATE TABLE IF NOT EXISTS " + baseTable + " ("
+ " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s";
conn.createStatement().execute(generateDDL(ddlFormat));
conn.commit();
// Create a view
String viewName = generateUniqueName();
String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
viewConn.createStatement().execute(viewDDL);
// Create index
String indexName = generateUniqueName();
String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName);
viewConn.createStatement().execute(idxSDDL);
// Insert rows
int numOfValues = 1000;
for (int i=0; i < numOfValues; i++){
viewConn.createStatement().execute(
String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y",
"z"));
}
viewConn.commit();
String data = "{IndexName:" + indexName + "}";
// Run IndexRebuildTask
TaskRegionObserver.SelfHealingTask task =
new TaskRegionObserver.SelfHealingTask(
TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
// Add a task to System.Task to build indexes
Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
TENANT1, null, viewName,
PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
task.run();
String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable);
ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();
int count = getUtility().countRows(queryServices.getTable(Bytes.toBytes(viewIndexTableName)));
assertTrue(count == numOfValues);
// Remove index contents and try again
Admin admin = queryServices.getAdmin();
TableName tableName = TableName.valueOf(viewIndexTableName);
admin.disableTable(tableName);
admin.truncateTable(tableName, false);
data = "{IndexName:" + indexName + ", DisableBefore:true}";
// Add a new task (update status to created) to System.Task to rebuild indexes
Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
TENANT1, null, viewName,
PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
task.run();
Table systemHTable= queryServices.getTable(Bytes.toBytes("SYSTEM."+PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE));
count = getUtility().countRows(systemHTable);
assertEquals(1, count);
// Check task status and other column values.
waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, PTable.TaskStatus.COMPLETED);
// See that index is rebuilt and confirm index has rows
Table htable= queryServices.getTable(Bytes.toBytes(viewIndexTableName));
count = getUtility().countRows(htable);
assertEquals(numOfValues, count);
} finally {
conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME);
conn.commit();
if (conn != null) {
conn.close();
}
if (viewConn != null) {
viewConn.close();
}
}
}
#location 86
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testIndexRebuildTask() throws Throwable {
String baseTable = generateUniqueName();
String viewName = generateUniqueName();
Connection conn = null;
Connection tenantConn = null;
try {
conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(false);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1);
tenantConn =DriverManager.getConnection(getUrl(), props);
String ddlFormat =
"CREATE TABLE IF NOT EXISTS " + baseTable + " ("
+ " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s";
conn.createStatement().execute(generateDDL(ddlFormat));
conn.commit();
// Create a view
String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
tenantConn.createStatement().execute(viewDDL);
// Create index
String indexName = generateUniqueName();
String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName);
tenantConn.createStatement().execute(idxSDDL);
// Insert rows
int numOfValues = 1000;
for (int i=0; i < numOfValues; i++){
tenantConn.createStatement().execute(
String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y",
"z"));
}
tenantConn.commit();
waitForIndexRebuild(conn, indexName, PIndexState.ACTIVE);
String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable);
ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();
Table indexHTable = queryServices.getTable(Bytes.toBytes(viewIndexTableName));
int count = getUtility().countRows(indexHTable);
assertEquals(numOfValues, count);
// Alter to Unusable makes the index status inactive.
// If I Alter to DISABLE, it fails to in Index tool while setting state to active due to Invalid transition.
tenantConn.createStatement().execute(
String.format("ALTER INDEX %s ON %s UNUSABLE", indexName, viewName));
tenantConn.commit();
// Remove index contents and try again
Admin admin = queryServices.getAdmin();
TableName tableName = TableName.valueOf(viewIndexTableName);
admin.disableTable(tableName);
admin.truncateTable(tableName, false);
count = getUtility().countRows(indexHTable);
assertEquals(0, count);
String data = "{IndexName:" + indexName + ", DisableBefore: true}";
// Run IndexRebuildTask
TaskRegionObserver.SelfHealingTask task =
new TaskRegionObserver.SelfHealingTask(
TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
TENANT1, null, viewName,
PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
task.run();
// Check task status and other column values.
waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, viewName, PTable.TaskStatus.COMPLETED);
// See that index is rebuilt and confirm index has rows
count = getUtility().countRows(indexHTable);
assertEquals(numOfValues, count);
} finally {
if (conn != null) {
conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME
+ " WHERE TABLE_NAME ='" + viewName + "'");
conn.commit();
conn.close();
}
if (tenantConn != null) {
tenantConn.close();
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException,
InterruptedException, TimeoutException {
HBaseAdmin admin = new HBaseAdmin(config);
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tableName);
admin.modifyTable(tableName, newDesc);
admin.enableTable(tableName);
} else {
admin.modifyTable(tableName, newDesc);
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
}
#location 8
#vulnerability type RESOURCE_LEAK | #fixed code
@Override
public void modifyTable(byte[] tableName, HTableDescriptor newDesc) throws IOException,
InterruptedException, TimeoutException {
try (HBaseAdmin admin = new HBaseAdmin(config)) {
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tableName);
admin.modifyTable(tableName, newDesc);
admin.enableTable(tableName);
} else {
admin.modifyTable(tableName, newDesc);
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values,
Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context)
throws IOException, InterruptedException {
TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
int tableIndex = tableNames.indexOf(key.getTableName());
List<Pair<byte[], byte[]>> columns = columnIndexes.get(tableIndex);
for (ImmutableBytesWritable aggregatedArray : values) {
DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get()));
while (input.available() != 0) {
int index = WritableUtils.readVInt(input);
Pair<byte[], byte[]> pair = columns.get(index);
byte type = input.readByte();
ImmutableBytesWritable value = null;
int len = WritableUtils.readVInt(input);
if (len > 0) {
byte[] array = new byte[len];
input.read(array);
value = new ImmutableBytesWritable(array);
}
KeyValue kv;
KeyValue.Type kvType = KeyValue.Type.codeToType(type);
switch (kvType) {
case Put: // not null value
kv = builder.buildPut(key.getRowkey(),
new ImmutableBytesWritable(pair.getFirst()),
new ImmutableBytesWritable(pair.getSecond()), value);
break;
case DeleteColumn: // null value
kv = builder.buildDeleteColumns(key.getRowkey(),
new ImmutableBytesWritable(pair.getFirst()),
new ImmutableBytesWritable(pair.getSecond()));
break;
default:
throw new IOException("Unsupported KeyValue type " + kvType);
}
map.add(kv);
}
KeyValue empty = builder.buildPut(key.getRowkey(),
emptyFamilyName.get(tableIndex),
QueryConstants.EMPTY_COLUMN_BYTES_PTR, ByteUtil.EMPTY_BYTE_ARRAY_PTR);
map.add(empty);
Closeables.closeQuietly(input);
}
context.setStatus("Read " + map.getClass());
int index = 0;
for (KeyValue kv : map) {
context.write(key, kv);
if (++index % 100 == 0) context.setStatus("Wrote " + index);
}
}
#location 20
#vulnerability type RESOURCE_LEAK | #fixed code
@Override
protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values,
Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context)
throws IOException, InterruptedException {
TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
ImmutableBytesWritable rowKey = key.getRowkey();
for (ImmutableBytesWritable aggregatedArray : values) {
DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get()));
while (input.available() != 0) {
byte type = input.readByte();
int index = WritableUtils.readVInt(input);
ImmutableBytesWritable family;
ImmutableBytesWritable name;
ImmutableBytesWritable value = QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR;
if (index == -1) {
family = emptyFamilyName.get(key.getTableName());
name = QueryConstants.EMPTY_COLUMN_BYTES_PTR;
} else {
Pair<byte[], byte[]> pair = columnIndexes.get(index);
if(pair.getFirst() != null) {
family = new ImmutableBytesWritable(pair.getFirst());
} else {
family = emptyFamilyName.get(key.getTableName());
}
name = new ImmutableBytesWritable(pair.getSecond());
}
int len = WritableUtils.readVInt(input);
if (len > 0) {
byte[] array = new byte[len];
input.read(array);
value = new ImmutableBytesWritable(array);
}
KeyValue kv;
KeyValue.Type kvType = KeyValue.Type.codeToType(type);
switch (kvType) {
case Put: // not null value
kv = builder.buildPut(key.getRowkey(), family, name, value);
break;
case DeleteColumn: // null value
kv = builder.buildDeleteColumns(key.getRowkey(), family, name);
break;
default:
throw new IOException("Unsupported KeyValue type " + kvType);
}
map.add(kv);
}
Closeables.closeQuietly(input);
}
context.setStatus("Read " + map.getClass());
int index = 0;
for (KeyValue kv : map) {
context.write(key, kv);
if (++index % 100 == 0) context.setStatus("Wrote " + index);
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testSystemCatalogWALEntryFilter() throws Exception {
//now create WAL.Entry objects that refer to cells in those view rows in System.Catalog
Get tenantGet = getGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantGet = getGet(catalogTable, DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
WAL.Entry nonTenantEntry = getEntry(systemCatalogTableName, nonTenantGet);
WAL.Entry tenantEntry = getEntry(systemCatalogTableName, tenantGet);
//verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertNull(filter.filter(nonTenantEntry));
WAL.Entry filteredTenantEntry = filter.filter(tenantEntry);
Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry);
Assert.assertEquals(tenantEntry.getEdit().size(),
filter.filter(tenantEntry).getEdit().size());
//now check that a WAL.Entry with cells from both a tenant and a non-tenant
//catalog row only allow the tenant cells through
WALEdit comboEdit = new WALEdit();
comboEdit.getCells().addAll(nonTenantEntry.getEdit().getCells());
comboEdit.getCells().addAll(tenantEntry.getEdit().getCells());
WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit);
Assert.assertEquals(tenantEntry.getEdit().size() + nonTenantEntry.getEdit().size()
, comboEntry.getEdit().size());
Assert.assertEquals(tenantEntry.getEdit().size(),
filter.filter(comboEntry).getEdit().size());
}
#location 30
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testSystemCatalogWALEntryFilter() throws Exception {
//now create WAL.Entry objects that refer to cells in those view rows in System.Catalog
Get tenantViewGet = getTenantViewGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantViewGet = getTenantViewGet(catalogTable,
DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
Get tenantLinkGet = getParentChildLinkGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
Get nonTenantLinkGet = getParentChildLinkGet(catalogTable,
DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
WAL.Entry nonTenantViewEntry = getEntry(systemCatalogTableName, nonTenantViewGet);
WAL.Entry tenantViewEntry = getEntry(systemCatalogTableName, tenantViewGet);
WAL.Entry nonTenantLinkEntry = getEntry(systemCatalogTableName, nonTenantLinkGet);
WAL.Entry tenantLinkEntry = getEntry(systemCatalogTableName, tenantLinkGet);
//verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertNull(filter.filter(nonTenantViewEntry));
WAL.Entry filteredTenantEntry = filter.filter(tenantViewEntry);
Assert.assertNotNull("Tenant view was filtered when it shouldn't be!", filteredTenantEntry);
Assert.assertEquals(tenantViewEntry.getEdit().size(),
filter.filter(tenantViewEntry).getEdit().size());
//now check that a WAL.Entry with cells from both a tenant and a non-tenant
//catalog row only allow the tenant cells through
WALEdit comboEdit = new WALEdit();
comboEdit.getCells().addAll(nonTenantViewEntry.getEdit().getCells());
comboEdit.getCells().addAll(tenantViewEntry.getEdit().getCells());
WAL.Entry comboEntry = new WAL.Entry(walKey, comboEdit);
Assert.assertEquals(tenantViewEntry.getEdit().size() + nonTenantViewEntry.getEdit().size()
, comboEntry.getEdit().size());
Assert.assertEquals(tenantViewEntry.getEdit().size(),
filter.filter(comboEntry).getEdit().size());
//now check that the parent-child links (which have the tenant_id of the view's parent,
// but are a part of the view's metadata) are migrated in the tenant case
// but not the non-tenant. The view's tenant_id is in th System.Catalog.COLUMN_NAME field
Assert.assertNull("Non-tenant parent-child link was not filtered " +
"when it should be!", filter.filter(nonTenantLinkEntry));
Assert.assertNotNull("Tenant parent-child link was filtered when it should not be!",
filter.filter(tenantLinkEntry));
Assert.assertEquals(tenantLinkEntry.getEdit().size(),
filter.filter(tenantLinkEntry).getEdit().size());
//add the parent-child link to the tenant view WAL entry,
//since they'll usually be together and they both need to
//be replicated
tenantViewEntry.getEdit().getCells().addAll(tenantLinkEntry.getEdit().getCells());
Assert.assertEquals(tenantViewEntry.getEdit().size(), tenantViewEntry.getEdit().size());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private static Object[] coerceToEqualLength(PDataType baseType, Object[] elements) {
if (elements == null || elements.length == 0) {
return elements;
}
Object element = elements[0];
int maxLength = baseType.getMaxLength(element);
boolean resizeElements = false;
for (int i = 1; i < elements.length; i++) {
int length = baseType.getMaxLength(elements[i]);
if (length > maxLength) {
maxLength = length;
resizeElements = true;
} else if (length < maxLength) {
resizeElements = true;
}
}
if (!resizeElements) {
return elements;
}
return coerceToNewLength(baseType, elements, maxLength);
}
#location 6
#vulnerability type NULL_DEREFERENCE | #fixed code
private static Object[] coerceToEqualLength(PDataType baseType, Object[] elements) {
if (elements == null || elements.length == 0) {
return elements;
}
int maxLength = 0;
boolean resizeElements = false;
for (int i = 0; i < elements.length; i++) {
Integer length = baseType.getMaxLength(elements[i]);
if (length != null) {
if (maxLength == 0){
maxLength = length;
continue;
}
if (length > maxLength) {
maxLength = length;
resizeElements = true;
} else if (length < maxLength) {
resizeElements = true;
}
} else {
resizeElements = true;
}
}
if (!resizeElements) {
return elements;
}
return coerceToNewLength(baseType, elements, maxLength);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) {
int nDeleteCF = 0;
int nDeleteVersionCF = 0;
for (Cell kv : pendingUpdates) {
if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) {
nDeleteVersionCF++;
}
else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()
// Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor
|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionFactory().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
nDeleteCF++;
}
}
// This is what a delete looks like on the server side for mutable indexing...
// Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not
DeleteType deleteType = null;
if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) {
deleteType = DeleteType.SINGLE_VERSION;
} else {
int nDelete = nDeleteCF + nDeleteVersionCF;
if (nDelete>0 && nDelete >= nCFs) {
deleteType = DeleteType.ALL_VERSIONS;
}
}
return deleteType;
}
#location 10
#vulnerability type NULL_DEREFERENCE | #fixed code
private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) {
int nDeleteCF = 0;
int nDeleteVersionCF = 0;
for (Cell kv : pendingUpdates) {
if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) {
nDeleteVersionCF++;
}
else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()
// Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor
|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionProvider().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
nDeleteCF++;
}
}
// This is what a delete looks like on the server side for mutable indexing...
// Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not
DeleteType deleteType = null;
if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) {
deleteType = DeleteType.SINGLE_VERSION;
} else {
int nDelete = nDeleteCF + nDeleteVersionCF;
if (nDelete>0 && nDelete >= nCFs) {
deleteType = DeleteType.ALL_VERSIONS;
}
}
return deleteType;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testIndexQos() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(getUrl(), props);
try {
// create the table
conn.createStatement().execute(
"CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// create the index
conn.createStatement().execute(
"CREATE INDEX " + indexName + " ON " + dataTableFullName + " (v1) INCLUDE (v2)");
ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableFullName + " VALUES(?,?,?)");
stmt.setString(1, "k1");
stmt.setString(2, "v1");
stmt.setString(3, "v2");
stmt.execute();
conn.commit();
// run select query that should use the index
String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?";
stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the query does a range scan on the index table
ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableFullName + " ['v1']", QueryUtil.getExplainPlan(rs));
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// drop index table
conn.createStatement().execute(
"DROP INDEX " + indexName + " ON " + dataTableFullName );
// create a data table with the same name as the index table
conn.createStatement().execute(
"CREATE TABLE " + indexTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// upsert one row to the table (which has the same table name as the previous index table)
stmt = conn.prepareStatement("UPSERT INTO " + indexTableFullName + " VALUES(?,?,?)");
stmt.setString(1, "k1");
stmt.setString(2, "v1");
stmt.setString(3, "v2");
stmt.execute();
conn.commit();
// run select query on the new table
selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?";
stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// verify that that index queue is used only once (for the first upsert)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
TestPhoenixIndexRpcSchedulerFactory.reset();
conn.createStatement().execute(
"CREATE INDEX " + indexName + "_1 ON " + dataTableFullName + " (v1) INCLUDE (v2)");
// verify that that index queue is used and only once (during Upsert Select on server to build the index)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
}
finally {
conn.close();
}
}
#location 7
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testIndexQos() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(getUrl(), props);
try {
// create the table
createTable(conn, dataTableFullName);
// create the index
createIndex(conn, indexName);
ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName);
upsertRow(conn, dataTableFullName);
// run select query that should use the index
String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?";
PreparedStatement stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the query does a range scan on the index table
ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexTableFullName + " ['v1']", QueryUtil.getExplainPlan(rs));
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// drop index table
conn.createStatement().execute(
"DROP INDEX " + indexName + " ON " + dataTableFullName );
// create a data table with the same name as the index table
createTable(conn, indexTableFullName);
// upsert one row to the table (which has the same table name as the previous index table)
upsertRow(conn, indexTableFullName);
// run select query on the new table
selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?";
stmt = conn.prepareStatement(selectSql);
stmt.setString(1, "v1");
// verify that the correct results are returned
rs = stmt.executeQuery();
assertTrue(rs.next());
assertEquals("k1", rs.getString(1));
assertEquals("v2", rs.getString(2));
assertFalse(rs.next());
// verify that that index queue is used only once (for the first upsert)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
TestPhoenixIndexRpcSchedulerFactory.reset();
createIndex(conn, indexName + "_1");
// verify that that index queue is used and only once (during Upsert Select on server to build the index)
Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
}
finally {
conn.close();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
RegionCoprocessorEnvironment env = c.getEnvironment();
Region region = env.getRegion();
long ts = scan.getTimeRange().getMax();
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
if (ScanUtil.isAnalyzeTable(scan)) {
byte[] gp_width_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
byte[] gp_per_region_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
// Let this throw, as this scan is being done for the sole purpose of collecting stats
StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(
env, region.getRegionInfo().getTable().getNameAsString(), ts,
gp_width_bytes, gp_per_region_bytes);
return collectStats(s, statsCollector, region, scan, env.getConfiguration());
}
int offsetToBe = 0;
if (localIndexScan) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offsetToBe);
}
final int offset = offsetToBe;
PTable projectedTable = null;
PTable writeToTable = null;
byte[][] values = null;
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.makePTable(projectedTable, true);
} catch (SQLException e) {
ServerUtil.throwIOException("Upgrade failed", e); // Impossible
}
values = new byte[projectedTable.getPKColumns().size()][];
}
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
RegionScanner theScanner = s;
byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
List<Expression> selectExpressions = null;
byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
boolean isUpsert = false;
boolean isDelete = false;
byte[] deleteCQ = null;
byte[] deleteCF = null;
byte[] emptyCF = null;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (upsertSelectTable != null) {
isUpsert = true;
projectedTable = deserializeTable(upsertSelectTable);
selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
values = new byte[projectedTable.getPKColumns().size()][];
} else {
byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
if (!isDelete) {
deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
}
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
}
if (j != null) {
theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env);
}
int batchSize = 0;
List<Mutation> mutations = Collections.emptyList();
boolean needToWrite = false;
Configuration conf = c.getEnvironment().getConfiguration();
long flushSize = region.getTableDesc().getMemStoreFlushSize();
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
* Upper bound of memstore size allowed for region. Updates will be blocked until the flush
* happen if the memstore reaches this threshold.
*/
final long blockingMemStoreSize = flushSize * (
conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
needToWrite = true;
// TODO: size better
mutations = Lists.newArrayListWithExpectedSize(1024);
batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
}
Aggregators aggregators = ServerAggregators.deserialize(
scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
Aggregator[] rowAggregators = aggregators.getAggregators();
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
long rowCount = 0;
final RegionScanner innerScanner = theScanner;
boolean acquiredLock = false;
try {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount++;
}
}
region.startRegionOperation();
acquiredLock = true;
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
rowCount++;
result.setKeyValues(results);
try {
if (isDescRowKeyOrderUpgrade) {
Arrays.fill(values, null);
Cell firstKV = results.get(0);
RowKeySchema schema = projectedTable.getRowKeySchema();
int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
for (int i = 0; i < schema.getFieldCount(); i++) {
Boolean hasValue = schema.next(ptr, i, maxOffset);
if (hasValue == null) {
break;
}
Field field = schema.getField(i);
if (field.getSortOrder() == SortOrder.DESC) {
// Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
if (field.getDataType().isArrayType()) {
field.getDataType().coerceBytes(ptr, null, field.getDataType(),
field.getMaxLength(), field.getScale(), field.getSortOrder(),
field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
}
// Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
// Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
} else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
ptr.set(invertedBytes);
}
} else if (field.getDataType() == PBinary.INSTANCE) {
// Remove trailing space characters so that the setValues call below will replace them
// with the correct zero byte character. Note this is somewhat dangerous as these
// could be legit, but I don't know what the alternative is.
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
}
values[i] = ptr.copyBytes();
}
writeToTable.newKey(ptr, values);
if (Bytes.compareTo(
firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(),
ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
continue;
}
byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (offset > 0) { // for local indexes (prepend region start key)
byte[] newRowWithOffset = new byte[offset + newRow.length];
System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
newRow = newRowWithOffset;
}
byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
for (Cell cell : results) {
// Copy existing cell but with new row key
Cell newCell = new KeyValue(newRow, 0, newRow.length,
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case Put:
// If Put, point delete old Put
Delete del = new Delete(oldRow);
del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
mutations.add(del);
Put put = new Put(newRow);
put.add(newCell);
mutations.add(put);
break;
case Delete:
case DeleteColumn:
case DeleteFamily:
case DeleteFamilyVersion:
Delete delete = new Delete(newRow);
delete.addDeleteMarker(newCell);
mutations.add(delete);
break;
}
}
} else if (buildLocalIndex) {
for (IndexMaintainer maintainer : indexMaintainers) {
if (!results.isEmpty()) {
result.getKey(ptr);
ValueGetter valueGetter =
maintainer.createGetterFromKeyValues(
ImmutableBytesPtr.copyBytesIfNecessary(ptr),
results);
Put put = maintainer.buildUpdateMutation(kvBuilder,
valueGetter, ptr, results.get(0).getTimestamp(),
env.getRegion().getRegionInfo().getStartKey(),
env.getRegion().getRegionInfo().getEndKey());
indexMutations.add(put);
}
}
result.setKeyValues(results);
} else if (isDelete) {
// FIXME: the version of the Delete constructor without the lock
// args was introduced in 0.94.4, thus if we try to use it here
// we can no longer use the 0.94.2 version of the client.
Cell firstKV = results.get(0);
Delete delete = new Delete(firstKV.getRowArray(),
firstKV.getRowOffset(), firstKV.getRowLength(),ts);
mutations.add(delete);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
} else if (isUpsert) {
Arrays.fill(values, null);
int i = 0;
List<PColumn> projectedColumns = projectedTable.getColumns();
for (; i < projectedTable.getPKColumns().size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
values[i] = ptr.copyBytes();
// If SortOrder from expression in SELECT doesn't match the
// column being projected into then invert the bits.
if (expression.getSortOrder() !=
projectedColumns.get(i).getSortOrder()) {
SortOrder.invert(values[i], 0, values[i], 0,
values[i].length);
}
}
}
projectedTable.newKey(ptr, values);
PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
for (; i < projectedColumns.size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
PColumn column = projectedColumns.get(i);
Object value = expression.getDataType()
.toObject(ptr, column.getSortOrder());
// We are guaranteed that the two column will have the
// same type.
if (!column.getDataType().isSizeCompatible(ptr, value,
column.getDataType(), expression.getMaxLength(),
expression.getScale(), column.getMaxLength(),
column.getScale())) {
throw new DataExceedsCapacityException(
column.getDataType(), column.getMaxLength(),
column.getScale());
}
column.getDataType().coerceBytes(ptr, value,
expression.getDataType(), expression.getMaxLength(),
expression.getScale(), expression.getSortOrder(),
column.getMaxLength(), column.getScale(),
column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
}
for (Mutation mutation : row.toRowMutations()) {
mutations.add(mutation);
}
for (i = 0; i < selectExpressions.size(); i++) {
selectExpressions.get(i).reset();
}
} else if (deleteCF != null && deleteCQ != null) {
// No need to search for delete column, since we project only it
// if no empty key value is being set
if (emptyCF == null ||
result.getValue(deleteCF, deleteCQ) != null) {
Delete delete = new Delete(results.get(0).getRowArray(),
results.get(0).getRowOffset(),
results.get(0).getRowLength());
delete.deleteColumns(deleteCF, deleteCQ, ts);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
mutations.add(delete);
}
}
if (emptyCF != null) {
/*
* If we've specified an emptyCF, then we need to insert an empty
* key value "retroactively" for any key value that is visible at
* the timestamp that the DDL was issued. Key values that are not
* visible at this timestamp will not ever be projected up to
* scans past this timestamp, so don't need to be considered.
* We insert one empty key value per row per timestamp.
*/
Set<Long> timeStamps =
Sets.newHashSetWithExpectedSize(results.size());
for (Cell kv : results) {
long kvts = kv.getTimestamp();
if (!timeStamps.contains(kvts)) {
Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
}
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!mutations.isEmpty() && batchSize > 0 &&
mutations.size() % batchSize == 0) {
commitBatch(region, mutations, indexUUID, blockingMemStoreSize);
mutations.clear();
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!indexMutations.isEmpty() && batchSize > 0 &&
indexMutations.size() % batchSize == 0) {
commitBatch(region, indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
} catch (ConstraintViolationException e) {
// Log and ignore in count
logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
region.getRegionInfo().getRegionNameAsString() + " with values " +
SchemaUtil.toString(values),
ScanUtil.getCustomAnnotations(scan)), e);
continue;
}
aggregators.aggregate(rowAggregators, result);
hasAny = true;
}
} while (hasMore);
if (!mutations.isEmpty()) {
commitBatch(region,mutations, indexUUID, blockingMemStoreSize);
}
if (!indexMutations.isEmpty()) {
commitBatch(region,indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
}
} finally {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount--;
}
}
try {
innerScanner.close();
} finally {
if (acquiredLock) region.closeRegionOperation();
}
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
KeyValue keyValue = null;
if (hadAny) {
byte[] value = aggregators.toBytes(rowAggregators);
keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
}
final KeyValue aggKeyValue = keyValue;
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
private boolean done = !hadAny;
@Override
public boolean isFilterDone() {
return done;
}
@Override
public boolean next(List<Cell> results) throws IOException {
if (done) return false;
done = true;
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
}
#location 404
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
RegionCoprocessorEnvironment env = c.getEnvironment();
Region region = env.getRegion();
long ts = scan.getTimeRange().getMax();
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
if (ScanUtil.isAnalyzeTable(scan)) {
byte[] gp_width_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
byte[] gp_per_region_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
// Let this throw, as this scan is being done for the sole purpose of collecting stats
StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(
env, region.getRegionInfo().getTable().getNameAsString(), ts,
gp_width_bytes, gp_per_region_bytes);
return collectStats(s, statsCollector, region, scan, env.getConfiguration());
}
int offsetToBe = 0;
if (localIndexScan) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offsetToBe);
}
final int offset = offsetToBe;
PTable projectedTable = null;
PTable writeToTable = null;
byte[][] values = null;
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.makePTable(projectedTable, true);
} catch (SQLException e) {
ServerUtil.throwIOException("Upgrade failed", e); // Impossible
}
values = new byte[projectedTable.getPKColumns().size()][];
}
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
RegionScanner theScanner = s;
byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
List<Expression> selectExpressions = null;
byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
boolean isUpsert = false;
boolean isDelete = false;
byte[] deleteCQ = null;
byte[] deleteCF = null;
byte[] emptyCF = null;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (upsertSelectTable != null) {
isUpsert = true;
projectedTable = deserializeTable(upsertSelectTable);
selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
values = new byte[projectedTable.getPKColumns().size()][];
} else {
byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
if (!isDelete) {
deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
}
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
}
if (j != null) {
theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env);
}
int batchSize = 0;
List<Mutation> mutations = Collections.emptyList();
boolean needToWrite = false;
Configuration conf = c.getEnvironment().getConfiguration();
long flushSize = region.getTableDesc().getMemStoreFlushSize();
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
* Slow down the writes if the memstore size more than
* (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
* bytes. This avoids flush storm to hdfs for cases like index building where reads and
* write happen to all the table regions in the server.
*/
final long blockingMemStoreSize = flushSize * (
conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
needToWrite = true;
// TODO: size better
mutations = Lists.newArrayListWithExpectedSize(1024);
batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
}
Aggregators aggregators = ServerAggregators.deserialize(
scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
Aggregator[] rowAggregators = aggregators.getAggregators();
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
long rowCount = 0;
final RegionScanner innerScanner = theScanner;
boolean acquiredLock = false;
try {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount++;
}
}
region.startRegionOperation();
acquiredLock = true;
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
rowCount++;
result.setKeyValues(results);
try {
if (isDescRowKeyOrderUpgrade) {
Arrays.fill(values, null);
Cell firstKV = results.get(0);
RowKeySchema schema = projectedTable.getRowKeySchema();
int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
for (int i = 0; i < schema.getFieldCount(); i++) {
Boolean hasValue = schema.next(ptr, i, maxOffset);
if (hasValue == null) {
break;
}
Field field = schema.getField(i);
if (field.getSortOrder() == SortOrder.DESC) {
// Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
if (field.getDataType().isArrayType()) {
field.getDataType().coerceBytes(ptr, null, field.getDataType(),
field.getMaxLength(), field.getScale(), field.getSortOrder(),
field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
}
// Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
// Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
} else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
ptr.set(invertedBytes);
}
} else if (field.getDataType() == PBinary.INSTANCE) {
// Remove trailing space characters so that the setValues call below will replace them
// with the correct zero byte character. Note this is somewhat dangerous as these
// could be legit, but I don't know what the alternative is.
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
}
values[i] = ptr.copyBytes();
}
writeToTable.newKey(ptr, values);
if (Bytes.compareTo(
firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(),
ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
continue;
}
byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (offset > 0) { // for local indexes (prepend region start key)
byte[] newRowWithOffset = new byte[offset + newRow.length];
System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
newRow = newRowWithOffset;
}
byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
for (Cell cell : results) {
// Copy existing cell but with new row key
Cell newCell = new KeyValue(newRow, 0, newRow.length,
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case Put:
// If Put, point delete old Put
Delete del = new Delete(oldRow);
del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
mutations.add(del);
Put put = new Put(newRow);
put.add(newCell);
mutations.add(put);
break;
case Delete:
case DeleteColumn:
case DeleteFamily:
case DeleteFamilyVersion:
Delete delete = new Delete(newRow);
delete.addDeleteMarker(newCell);
mutations.add(delete);
break;
}
}
} else if (buildLocalIndex) {
for (IndexMaintainer maintainer : indexMaintainers) {
if (!results.isEmpty()) {
result.getKey(ptr);
ValueGetter valueGetter =
maintainer.createGetterFromKeyValues(
ImmutableBytesPtr.copyBytesIfNecessary(ptr),
results);
Put put = maintainer.buildUpdateMutation(kvBuilder,
valueGetter, ptr, results.get(0).getTimestamp(),
env.getRegion().getRegionInfo().getStartKey(),
env.getRegion().getRegionInfo().getEndKey());
indexMutations.add(put);
}
}
result.setKeyValues(results);
} else if (isDelete) {
// FIXME: the version of the Delete constructor without the lock
// args was introduced in 0.94.4, thus if we try to use it here
// we can no longer use the 0.94.2 version of the client.
Cell firstKV = results.get(0);
Delete delete = new Delete(firstKV.getRowArray(),
firstKV.getRowOffset(), firstKV.getRowLength(),ts);
mutations.add(delete);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
} else if (isUpsert) {
Arrays.fill(values, null);
int i = 0;
List<PColumn> projectedColumns = projectedTable.getColumns();
for (; i < projectedTable.getPKColumns().size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
values[i] = ptr.copyBytes();
// If SortOrder from expression in SELECT doesn't match the
// column being projected into then invert the bits.
if (expression.getSortOrder() !=
projectedColumns.get(i).getSortOrder()) {
SortOrder.invert(values[i], 0, values[i], 0,
values[i].length);
}
}
}
projectedTable.newKey(ptr, values);
PRow row = projectedTable.newRow(kvBuilder, ts, ptr);
for (; i < projectedColumns.size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
PColumn column = projectedColumns.get(i);
Object value = expression.getDataType()
.toObject(ptr, column.getSortOrder());
// We are guaranteed that the two column will have the
// same type.
if (!column.getDataType().isSizeCompatible(ptr, value,
column.getDataType(), expression.getMaxLength(),
expression.getScale(), column.getMaxLength(),
column.getScale())) {
throw new DataExceedsCapacityException(
column.getDataType(), column.getMaxLength(),
column.getScale());
}
column.getDataType().coerceBytes(ptr, value,
expression.getDataType(), expression.getMaxLength(),
expression.getScale(), expression.getSortOrder(),
column.getMaxLength(), column.getScale(),
column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
}
for (Mutation mutation : row.toRowMutations()) {
mutations.add(mutation);
}
for (i = 0; i < selectExpressions.size(); i++) {
selectExpressions.get(i).reset();
}
} else if (deleteCF != null && deleteCQ != null) {
// No need to search for delete column, since we project only it
// if no empty key value is being set
if (emptyCF == null ||
result.getValue(deleteCF, deleteCQ) != null) {
Delete delete = new Delete(results.get(0).getRowArray(),
results.get(0).getRowOffset(),
results.get(0).getRowLength());
delete.deleteColumns(deleteCF, deleteCQ, ts);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
mutations.add(delete);
}
}
if (emptyCF != null) {
/*
* If we've specified an emptyCF, then we need to insert an empty
* key value "retroactively" for any key value that is visible at
* the timestamp that the DDL was issued. Key values that are not
* visible at this timestamp will not ever be projected up to
* scans past this timestamp, so don't need to be considered.
* We insert one empty key value per row per timestamp.
*/
Set<Long> timeStamps =
Sets.newHashSetWithExpectedSize(results.size());
for (Cell kv : results) {
long kvts = kv.getTimestamp();
if (!timeStamps.contains(kvts)) {
Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
}
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!mutations.isEmpty() && batchSize > 0 &&
mutations.size() % batchSize == 0) {
commitBatch(region, mutations, indexUUID, blockingMemStoreSize);
mutations.clear();
}
// Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
if (!indexMutations.isEmpty() && batchSize > 0 &&
indexMutations.size() % batchSize == 0) {
commitBatch(region, indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
} catch (ConstraintViolationException e) {
// Log and ignore in count
logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
region.getRegionInfo().getRegionNameAsString() + " with values " +
SchemaUtil.toString(values),
ScanUtil.getCustomAnnotations(scan)), e);
continue;
}
aggregators.aggregate(rowAggregators, result);
hasAny = true;
}
} while (hasMore);
if (!mutations.isEmpty()) {
commitBatch(region,mutations, indexUUID, blockingMemStoreSize);
}
if (!indexMutations.isEmpty()) {
commitBatch(region,indexMutations, null, blockingMemStoreSize);
indexMutations.clear();
}
}
} finally {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount--;
}
}
try {
innerScanner.close();
} finally {
if (acquiredLock) region.closeRegionOperation();
}
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
KeyValue keyValue = null;
if (hadAny) {
byte[] value = aggregators.toBytes(rowAggregators);
keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
}
final KeyValue aggKeyValue = keyValue;
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
private boolean done = !hadAny;
@Override
public boolean isFilterDone() {
return done;
}
@Override
public boolean next(List<Cell> results) throws IOException {
if (done) return false;
done = true;
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testUpsertDeleteWithOldClient() throws Exception {
checkForPreConditions();
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
// Deletes with the old client
executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE);
executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DELETE);
assertTrue(compareOutput(ADD_DELETE, QUERY_ADD_DELETE));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Test
public void testUpsertDeleteWithOldClient() throws Exception {
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
// Deletes with the old client
executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE);
executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DELETE);
assertExpectedOutput(ADD_DELETE, QUERY_ADD_DELETE);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public boolean isFilterDone() { return hasMore; }
#location 2
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public boolean isFilterDone() { return false; } | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@SuppressWarnings("unchecked")
protected QueryPlan compileJoinQuery(StatementContext context, List<Object> binds, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException {
byte[] emptyByteArray = new byte[0];
List<JoinSpec> joinSpecs = joinTable.getJoinSpecs();
if (joinSpecs.isEmpty()) {
Table table = joinTable.getTable();
SelectStatement subquery = table.getAsSubquery(orderBy);
if (!table.isSubselect()) {
context.setCurrentTable(table.getTableRef());
PTable projectedTable = table.createProjectedTable(!projectPKColumns, context);
TupleProjector.serializeProjectorIntoScan(context.getScan(), new TupleProjector(projectedTable));
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
table.projectColumns(context.getScan());
return compileSingleQuery(context, subquery, binds, asSubquery, !asSubquery);
}
QueryPlan plan = compileSubquery(subquery, false);
PTable projectedTable = table.createProjectedTable(plan.getProjector());
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context));
}
boolean[] starJoinVector;
if (!this.useSortMergeJoin && (starJoinVector = joinTable.getStarJoinVector()) != null) {
Table table = joinTable.getTable();
PTable initialProjectedTable;
TableRef tableRef;
SelectStatement query;
TupleProjector tupleProjector;
if (!table.isSubselect()) {
context.setCurrentTable(table.getTableRef());
initialProjectedTable = table.createProjectedTable(!projectPKColumns, context);
tableRef = table.getTableRef();
table.projectColumns(context.getScan());
query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery);
tupleProjector = new TupleProjector(initialProjectedTable);
} else {
SelectStatement subquery = table.getAsSubquery(orderBy);
QueryPlan plan = compileSubquery(subquery, false);
initialProjectedTable = table.createProjectedTable(plan.getProjector());
tableRef = plan.getTableRef();
context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
tupleProjector = new TupleProjector(plan.getProjector());
}
context.setCurrentTable(tableRef);
PTable projectedTable = initialProjectedTable;
int count = joinSpecs.size();
ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count];
List<Expression>[] joinExpressions = new List[count];
JoinType[] joinTypes = new JoinType[count];
PTable[] tables = new PTable[count];
int[] fieldPositions = new int[count];
HashSubPlan[] subPlans = new HashSubPlan[count];
fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size();
for (int i = 0; i < count; i++) {
JoinSpec joinSpec = joinSpecs.get(i);
Scan subScan = ScanUtil.newScan(originalScan);
StatementContext subContext = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
QueryPlan joinPlan = compileJoinQuery(subContext, binds, joinSpec.getJoinTable(), true, true, null);
boolean hasPostReference = joinSpec.getJoinTable().hasPostReference();
if (hasPostReference) {
tables[i] = subContext.getResolver().getTables().get(0).getTable();
projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType());
} else {
tables[i] = null;
}
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes()));
joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder
Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContext, true);
joinExpressions[i] = joinConditions.getFirst();
List<Expression> hashExpressions = joinConditions.getSecond();
Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions);
Expression keyRangeLhsExpression = keyRangeExpressions.getFirst();
Expression keyRangeRhsExpression = keyRangeExpressions.getSecond();
joinTypes[i] = joinSpec.getType();
if (i < count - 1) {
fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size()));
}
subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression);
}
TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
QueryPlan plan = compileSingleQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin());
Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table);
Integer limit = null;
if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
limit = plan.getLimit();
}
HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit);
return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, subPlans);
}
JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
JoinType type = lastJoinSpec.getType();
if (!this.useSortMergeJoin
&& (type == JoinType.Right || type == JoinType.Inner)
&& lastJoinSpec.getJoinTable().getJoinSpecs().isEmpty()
&& lastJoinSpec.getJoinTable().getTable().isFlat()) {
JoinTable rhsJoinTable = lastJoinSpec.getJoinTable();
Table rhsTable = rhsJoinTable.getTable();
JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
Scan subScan = ScanUtil.newScan(originalScan);
StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, true, null);
PTable rhsProjTable;
TableRef rhsTableRef;
SelectStatement rhs;
TupleProjector tupleProjector;
if (!rhsTable.isSubselect()) {
context.setCurrentTable(rhsTable.getTableRef());
rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context);
rhsTableRef = rhsTable.getTableRef();
rhsTable.projectColumns(context.getScan());
rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery);
tupleProjector = new TupleProjector(rhsProjTable);
} else {
SelectStatement subquery = rhsTable.getAsSubquery(orderBy);
QueryPlan plan = compileSubquery(subquery, false);
rhsProjTable = rhsTable.createProjectedTable(plan.getProjector());
rhsTableRef = plan.getTableRef();
context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
tupleProjector = new TupleProjector(plan.getProjector());
}
context.setCurrentTable(rhsTableRef);
context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes()));
ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] {new ImmutableBytesPtr(emptyByteArray)};
Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, true);
List<Expression> joinExpressions = joinConditions.getSecond();
List<Expression> hashExpressions = joinConditions.getFirst();
boolean needsMerge = lhsJoin.hasPostReference();
PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null;
int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0;
PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable;
TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes()));
QueryPlan rhsPlan = compileSingleQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right);
Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable);
Integer limit = null;
if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
limit = rhsPlan.getLimit();
}
HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit);
Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions);
return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond())});
}
JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
JoinTable rhsJoin = lastJoinSpec.getJoinTable();
if (type == JoinType.Right) {
JoinTable temp = lhsJoin;
lhsJoin = rhsJoin;
rhsJoin = temp;
}
List<EqualParseNode> joinConditionNodes = lastJoinSpec.getOnConditions();
List<OrderByNode> lhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size());
List<OrderByNode> rhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size());
for (EqualParseNode condition : joinConditionNodes) {
lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true));
rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true));
}
Scan lhsScan = ScanUtil.newScan(originalScan);
StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), lhsScan, new SequenceManager(statement));
boolean preserveRowkey = !projectPKColumns && type != JoinType.Full;
QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, !preserveRowkey, lhsOrderBy);
PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable();
boolean isInRowKeyOrder = preserveRowkey && lhsPlan.getOrderBy().getOrderByExpressions().isEmpty();
Scan rhsScan = ScanUtil.newScan(originalScan);
StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), rhsScan, new SequenceManager(statement));
QueryPlan rhsPlan = compileJoinQuery(rhsCtx, binds, rhsJoin, true, true, rhsOrderBy);
PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable();
Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, false);
List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst();
List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond();
boolean needsMerge = rhsJoin.hasPostReference();
int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0;
PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable;
ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), new HashMap<String,UDFParseNode>(1));
TableRef tableRef = resolver.getTables().get(0);
StatementContext subCtx = new StatementContext(statement, resolver, ScanUtil.newScan(originalScan), new SequenceManager(statement));
subCtx.setCurrentTable(tableRef);
QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, fieldPosition, lastJoinSpec.isSingleValueOnly());
context.setCurrentTable(tableRef);
context.setResolver(resolver);
TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString()));
ParseNode where = joinTable.getPostFiltersCombined();
SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode> emptyList(), where, null, null, orderBy, null, 0, false, joinTable.getStatement().hasSequence(), Collections.<SelectStatement>emptyList(), joinTable.getStatement().getUdfParseNodes())
: NODE_FACTORY.select(joinTable.getStatement(), from, where);
return compileSingleFlatQuery(context, select, binds, asSubquery, false, innerPlan, null, isInRowKeyOrder);
}
#location 169
#vulnerability type NULL_DEREFERENCE | #fixed code
@SuppressWarnings("unchecked")
protected QueryPlan compileJoinQuery(StatementContext context, List<Object> binds, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List<OrderByNode> orderBy) throws SQLException {
byte[] emptyByteArray = new byte[0];
List<JoinSpec> joinSpecs = joinTable.getJoinSpecs();
if (joinSpecs.isEmpty()) {
Table table = joinTable.getTable();
SelectStatement subquery = table.getAsSubquery(orderBy);
if (!table.isSubselect()) {
context.setCurrentTable(table.getTableRef());
PTable projectedTable = table.createProjectedTable(!projectPKColumns, context);
TupleProjector projector = new TupleProjector(projectedTable);
TupleProjector.serializeProjectorIntoScan(context.getScan(), projector);
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
table.projectColumns(context.getScan());
return compileSingleFlatQuery(context, subquery, binds, asSubquery, !asSubquery, null, projectPKColumns ? projector : null, true);
}
QueryPlan plan = compileSubquery(subquery, false);
PTable projectedTable = table.createProjectedTable(plan.getProjector());
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes()));
return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context));
}
boolean[] starJoinVector;
if (!this.useSortMergeJoin && (starJoinVector = joinTable.getStarJoinVector()) != null) {
Table table = joinTable.getTable();
PTable initialProjectedTable;
TableRef tableRef;
SelectStatement query;
TupleProjector tupleProjector;
if (!table.isSubselect()) {
context.setCurrentTable(table.getTableRef());
initialProjectedTable = table.createProjectedTable(!projectPKColumns, context);
tableRef = table.getTableRef();
table.projectColumns(context.getScan());
query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery);
tupleProjector = new TupleProjector(initialProjectedTable);
} else {
SelectStatement subquery = table.getAsSubquery(orderBy);
QueryPlan plan = compileSubquery(subquery, false);
initialProjectedTable = table.createProjectedTable(plan.getProjector());
tableRef = plan.getTableRef();
context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
tupleProjector = new TupleProjector(plan.getProjector());
}
context.setCurrentTable(tableRef);
PTable projectedTable = initialProjectedTable;
int count = joinSpecs.size();
ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count];
List<Expression>[] joinExpressions = new List[count];
JoinType[] joinTypes = new JoinType[count];
PTable[] tables = new PTable[count];
int[] fieldPositions = new int[count];
HashSubPlan[] subPlans = new HashSubPlan[count];
fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size();
for (int i = 0; i < count; i++) {
JoinSpec joinSpec = joinSpecs.get(i);
Scan subScan = ScanUtil.newScan(originalScan);
StatementContext subContext = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
QueryPlan joinPlan = compileJoinQuery(subContext, binds, joinSpec.getJoinTable(), true, true, null);
boolean hasPostReference = joinSpec.getJoinTable().hasPostReference();
if (hasPostReference) {
tables[i] = subContext.getResolver().getTables().get(0).getTable();
projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType());
} else {
tables[i] = null;
}
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes()));
joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder
Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, subContext, true);
joinExpressions[i] = joinConditions.getFirst();
List<Expression> hashExpressions = joinConditions.getSecond();
Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), tableRef, joinSpec.getType(), joinExpressions[i], hashExpressions);
Expression keyRangeLhsExpression = keyRangeExpressions.getFirst();
Expression keyRangeRhsExpression = keyRangeExpressions.getSecond();
joinTypes[i] = joinSpec.getType();
if (i < count - 1) {
fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size()));
}
subPlans[i] = new HashSubPlan(i, joinPlan, optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), keyRangeLhsExpression, keyRangeRhsExpression);
}
TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
QueryPlan plan = compileSingleFlatQuery(context, query, binds, asSubquery, !asSubquery && joinTable.isAllLeftJoin(), null, !table.isSubselect() && projectPKColumns ? tupleProjector : null, true);
Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, table);
Integer limit = null;
if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
limit = plan.getLimit();
}
HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit);
return HashJoinPlan.create(joinTable.getStatement(), plan, joinInfo, subPlans);
}
JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
JoinType type = lastJoinSpec.getType();
if (!this.useSortMergeJoin
&& (type == JoinType.Right || type == JoinType.Inner)
&& lastJoinSpec.getJoinTable().getJoinSpecs().isEmpty()
&& lastJoinSpec.getJoinTable().getTable().isFlat()) {
JoinTable rhsJoinTable = lastJoinSpec.getJoinTable();
Table rhsTable = rhsJoinTable.getTable();
JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
Scan subScan = ScanUtil.newScan(originalScan);
StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, true, null);
PTable rhsProjTable;
TableRef rhsTableRef;
SelectStatement rhs;
TupleProjector tupleProjector;
if (!rhsTable.isSubselect()) {
context.setCurrentTable(rhsTable.getTableRef());
rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context);
rhsTableRef = rhsTable.getTableRef();
rhsTable.projectColumns(context.getScan());
rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery);
tupleProjector = new TupleProjector(rhsProjTable);
} else {
SelectStatement subquery = rhsTable.getAsSubquery(orderBy);
QueryPlan plan = compileSubquery(subquery, false);
rhsProjTable = rhsTable.createProjectedTable(plan.getProjector());
rhsTableRef = plan.getTableRef();
context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
tupleProjector = new TupleProjector(plan.getProjector());
}
context.setCurrentTable(rhsTableRef);
context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes()));
ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] {new ImmutableBytesPtr(emptyByteArray)};
Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, true);
List<Expression> joinExpressions = joinConditions.getSecond();
List<Expression> hashExpressions = joinConditions.getFirst();
boolean needsMerge = lhsJoin.hasPostReference();
PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null;
int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0;
PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable;
TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector);
context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes()));
QueryPlan rhsPlan = compileSingleFlatQuery(context, rhs, binds, asSubquery, !asSubquery && type == JoinType.Right, null, !rhsTable.isSubselect() && projectPKColumns ? tupleProjector : null, true);
Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context, rhsTable);
Integer limit = null;
if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) {
limit = rhsPlan.getLimit();
}
HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[] {joinExpressions}, new JoinType[] {type == JoinType.Right ? JoinType.Left : type}, new boolean[] {true}, new PTable[] {lhsTable}, new int[] {fieldPosition}, postJoinFilterExpression, limit);
Pair<Expression, Expression> keyRangeExpressions = new Pair<Expression, Expression>(null, null);
getKeyExpressionCombinations(keyRangeExpressions, context, joinTable.getStatement(), rhsTableRef, type, joinExpressions, hashExpressions);
return HashJoinPlan.create(joinTable.getStatement(), rhsPlan, joinInfo, new HashSubPlan[] {new HashSubPlan(0, lhsPlan, hashExpressions, false, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond())});
}
JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
JoinTable rhsJoin = lastJoinSpec.getJoinTable();
if (type == JoinType.Right) {
JoinTable temp = lhsJoin;
lhsJoin = rhsJoin;
rhsJoin = temp;
}
List<EqualParseNode> joinConditionNodes = lastJoinSpec.getOnConditions();
List<OrderByNode> lhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size());
List<OrderByNode> rhsOrderBy = Lists.<OrderByNode> newArrayListWithExpectedSize(joinConditionNodes.size());
for (EqualParseNode condition : joinConditionNodes) {
lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true));
rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true));
}
Scan lhsScan = ScanUtil.newScan(originalScan);
StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), lhsScan, new SequenceManager(statement));
boolean preserveRowkey = !projectPKColumns && type != JoinType.Full;
QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true, !preserveRowkey, lhsOrderBy);
PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable();
boolean isInRowKeyOrder = preserveRowkey && lhsPlan.getOrderBy().getOrderByExpressions().isEmpty();
Scan rhsScan = ScanUtil.newScan(originalScan);
StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), rhsScan, new SequenceManager(statement));
QueryPlan rhsPlan = compileJoinQuery(rhsCtx, binds, rhsJoin, true, true, rhsOrderBy);
PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable();
Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, false);
List<Expression> lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst();
List<Expression> rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond();
boolean needsMerge = rhsJoin.hasPostReference();
int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0;
PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable;
ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), new HashMap<String,UDFParseNode>(1));
TableRef tableRef = resolver.getTables().get(0);
StatementContext subCtx = new StatementContext(statement, resolver, ScanUtil.newScan(originalScan), new SequenceManager(statement));
subCtx.setCurrentTable(tableRef);
QueryPlan innerPlan = new SortMergeJoinPlan(subCtx, joinTable.getStatement(), tableRef, type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, lhsKeyExpressions, rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, fieldPosition, lastJoinSpec.isSingleValueOnly());
context.setCurrentTable(tableRef);
context.setResolver(resolver);
TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString()));
ParseNode where = joinTable.getPostFiltersCombined();
SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode> emptyList(), where, null, null, orderBy, null, 0, false, joinTable.getStatement().hasSequence(), Collections.<SelectStatement>emptyList(), joinTable.getStatement().getUdfParseNodes())
: NODE_FACTORY.select(joinTable.getStatement(), from, where);
return compileSingleFlatQuery(context, select, binds, asSubquery, false, innerPlan, null, isInRowKeyOrder);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testDeleteRowFromTableWithImmutableIndex() throws SQLException {
Connection con = null;
try {
boolean autoCommit = false;
con = DriverManager.getConnection(getUrl());
con.setAutoCommit(autoCommit);
Statement stm = con.createStatement();
stm.execute("CREATE TABLE IF NOT EXISTS web_stats (" +
"HOST CHAR(2) NOT NULL," +
"DOMAIN VARCHAR NOT NULL, " +
"FEATURE VARCHAR NOT NULL, " +
"DATE DATE NOT NULL, \n" +
"USAGE.CORE BIGINT," +
"USAGE.DB BIGINT," +
"STATS.ACTIVE_VISITOR INTEGER " +
"CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) IMMUTABLE_ROWS=true");
stm.execute("CREATE INDEX web_stats_idx ON web_stats (DATE, FEATURE)");
stm.close();
Date date = new Date(0);
PreparedStatement psInsert = con
.prepareStatement("UPSERT INTO web_stats(HOST, DOMAIN, FEATURE, DATE, CORE, DB, ACTIVE_VISITOR) VALUES(?,?, ? , ?, ?, ?, ?)");
psInsert.setString(1, "AA");
psInsert.setString(2, "BB");
psInsert.setString(3, "CC");
psInsert.setDate(4, date);
psInsert.setLong(5, 1L);
psInsert.setLong(6, 2L);
psInsert.setLong(7, 3);
psInsert.execute();
psInsert.close();
if (!autoCommit) {
con.commit();
}
psInsert = con.prepareStatement("DELETE FROM web_stats WHERE (HOST, DOMAIN, FEATURE, DATE) = (?,?,?,?)");
psInsert.setString(1, "AA");
psInsert.setString(2, "BB");
psInsert.setString(3, "CC");
psInsert.setDate(4, date);
psInsert.execute();
if (!autoCommit) {
con.commit();
}
ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM web_stats");
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
rs = con.createStatement().executeQuery("SELECT count(*) FROM web_stats_idx");
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
} finally {
try {
con.close();
} catch (Exception ex) {
}
}
}
#location 58
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testDeleteRowFromTableWithImmutableIndex() throws SQLException {
testDeleteRowFromTableWithImmutableIndex(false);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void writeMetrics() throws Exception {
Connection conn = getConnectionWithoutTracing();
String tableName = generateUniqueName();
TraceSpanReceiver traceSpanReceiver = new TraceSpanReceiver();
latch = new CountDownLatch(1);
testTraceWriter = new TestTraceWriter(tableName, defaultTracingThreadPoolForTest, defaultTracingBatchSizeForTest);
// create a simple metrics record
long traceid = 987654;
String description = "Some generic trace";
long spanid = 10;
long parentid = 11;
long startTime = 12;
long endTime = 13;
String processid = "Some process";
String annotation = "test annotation for a span";
Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime,
processid, annotation);
traceSpanReceiver.getSpanQueue().add(span);
assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS));
// make sure we only get expected stat entry (matcing the trace id), otherwise we could the
// stats for the update as well
TraceReader reader = new TraceReader(conn, tableName);
Collection<TraceHolder> traces = reader.readAll(10);
assertEquals("Wrong number of traces in the tracing table", 1, traces.size());
// validate trace
TraceHolder trace = traces.iterator().next();
// we are just going to get an orphan span b/c we don't send in a parent
assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size());
assertEquals(traceid, trace.traceid);
SpanInfo spanInfo = trace.orphans.get(0);
assertEquals(description, spanInfo.description);
assertEquals(parentid, spanInfo.getParentIdForTesting());
assertEquals(startTime, spanInfo.start);
assertEquals(endTime, spanInfo.end);
assertEquals("Wrong number of tags", 0, spanInfo.tagCount);
assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount);
}
#location 23
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void writeMetrics() throws Exception {
Connection conn = getConnectionWithoutTracing();
latch = new CountDownLatch(1);
testTraceWriter.start();
// create a simple metrics record
long traceid = 987654;
String description = "Some generic trace";
long spanid = 10;
long parentid = 11;
long startTime = 12;
long endTime = 13;
String processid = "Some process";
String annotation = "test annotation for a span";
Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime,
processid, annotation);
Tracer.getInstance().deliver(span);
assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS));
// make sure we only get expected stat entry (matcing the trace id), otherwise we could the
// stats for the update as well
TraceReader reader = new TraceReader(conn, tracingTableName);
Collection<TraceHolder> traces = reader.readAll(10);
assertEquals("Wrong number of traces in the tracing table", 1, traces.size());
// validate trace
TraceHolder trace = traces.iterator().next();
// we are just going to get an orphan span b/c we don't send in a parent
assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size());
assertEquals(traceid, trace.traceid);
SpanInfo spanInfo = trace.orphans.get(0);
assertEquals(description, spanInfo.description);
assertEquals(parentid, spanInfo.getParentIdForTesting());
assertEquals(startTime, spanInfo.start);
assertEquals(endTime, spanInfo.end);
assertEquals("Wrong number of tags", 0, spanInfo.tagCount);
assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
}
#location 94
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
/**
If region is closing and there are large number of rows being verified/rebuilt with IndexTool,
not having this check will impact/delay the region closing -- affecting the availability
as this method holds the read lock on the region.
* */
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public static long convertToNanoseconds(long serverTimeStamp) {
return serverTimeStamp * TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
public static long convertToNanoseconds(long serverTimeStamp) {
return serverTimeStamp * TransactionFactory.getTransactionProvider().getTransactionContext().getMaxTransactionsPerSecond();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testDeleteViewIndexSequences() throws Exception {
createBaseTable(tableName, false, null, null);
Connection conn1 = getConnection();
Connection conn2 = getConnection();
conn1.createStatement().execute("CREATE VIEW " + VIEW_NAME + " AS SELECT * FROM " + tableName);
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + VIEW_NAME + " (v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + VIEW_NAME);
conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
ResultSet rs = conn2.createStatement().executeQuery("SELECT "
+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
+ PhoenixDatabaseMetaData.SEQUENCE_NAME
+ " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
assertFalse("View index sequences should be deleted.", rs.next());
}
#location 9
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testDeleteViewIndexSequences() throws Exception {
createBaseTable(tableName, false, null, null);
Connection conn1 = getConnection();
Connection conn2 = getConnection();
String viewName = schemaName + "." + VIEW_NAME;
conn1.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName);
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + viewName + " (v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_schema like '%"
+ schemaName + "%'";
ResultSet rs = conn1.prepareStatement(query).executeQuery();
assertTrue(rs.next());
assertEquals(MetaDataUtil.getViewIndexSequenceSchemaName(PNameFactory.newName(tableName), isNamespaceMapped),
rs.getString("sequence_schema"));
assertEquals(MetaDataUtil.getViewIndexSequenceName(PNameFactory.newName(tableName), null, isNamespaceMapped),
rs.getString("sequence_name"));
assertEquals(-32767, rs.getInt("current_value"));
assertEquals(1, rs.getInt("increment_by"));
assertFalse(rs.next());
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + viewName);
conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
rs = conn2.createStatement().executeQuery("SELECT "
+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
+ PhoenixDatabaseMetaData.SEQUENCE_NAME
+ " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
assertFalse("View index sequences should be deleted.", rs.next());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
int prefixKeyLength =
prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length
: regionEndKey.length) : 0;
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
DataOutput output = new DataOutputStream(stream);
try {
// For local indexes, we must prepend the row key with the start region key
if (prependRegionStartKey) {
if (regionStartKey.length == 0) {
output.write(new byte[prefixKeyLength]);
} else {
output.write(regionStartKey);
}
}
if (isIndexSalted) {
output.write(0); // will be set at end to index salt byte
}
// The dataRowKeySchema includes the salt byte field,
// so we must adjust for that here.
int dataPosOffset = isDataTableSalted ? 1 : 0 ;
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
// Skip data table salt byte
int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
if (viewIndexId != null) {
output.write(viewIndexId);
}
if (isMultiTenant) {
dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset)));
}
dataPosOffset++;
}
// Write index row key
for (int i = dataPosOffset; i < indexDataColumnCount; i++) {
Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
// Ignore view constants from the data table, as these
// don't need to appear in the index (as they're the
// same for all rows in this index)
if (!viewConstantColumnBitSet.get(i)) {
int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
dataRowKeyLocator[0][pos] = ptr.getOffset();
dataRowKeyLocator[1][pos] = ptr.getLength();
} else {
dataRowKeyLocator[0][pos] = 0;
dataRowKeyLocator[1][pos] = 0;
}
}
}
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
Iterator<Expression> expressionIterator = indexedExpressions.iterator();
for (int i = 0; i < nIndexedColumns; i++) {
PDataType dataColumnType;
boolean isNullable;
SortOrder dataSortOrder;
if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
Expression expression = expressionIterator.next();
dataColumnType = expression.getDataType();
dataSortOrder = expression.getSortOrder();
isNullable = expression.isNullable();
expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr);
}
else {
Field field = dataRowKeySchema.getField(dataPkPosition[i]);
dataColumnType = field.getDataType();
ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
dataSortOrder = field.getSortOrder();
isNullable = field.isNullable();
}
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
}
if (isDataColumnInverted != isIndexColumnDesc) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
if (!indexColumnType.isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] indexRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
if (isIndexSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
indexRowKey[0] = saltByte;
}
return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
}
#location 114
#vulnerability type NULL_DEREFERENCE | #fixed code
public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
int prefixKeyLength =
prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length
: regionEndKey.length) : 0;
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
DataOutput output = new DataOutputStream(stream);
try {
// For local indexes, we must prepend the row key with the start region key
if (prependRegionStartKey) {
if (regionStartKey.length == 0) {
output.write(new byte[prefixKeyLength]);
} else {
output.write(regionStartKey);
}
}
if (isIndexSalted) {
output.write(0); // will be set at end to index salt byte
}
// The dataRowKeySchema includes the salt byte field,
// so we must adjust for that here.
int dataPosOffset = isDataTableSalted ? 1 : 0 ;
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
// Skip data table salt byte
int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
if (viewIndexId != null) {
output.write(viewIndexId);
}
if (isMultiTenant) {
dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset)));
}
dataPosOffset++;
}
// Write index row key
for (int i = dataPosOffset; i < indexDataColumnCount; i++) {
Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
// Ignore view constants from the data table, as these
// don't need to appear in the index (as they're the
// same for all rows in this index)
if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) {
int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
dataRowKeyLocator[0][pos] = ptr.getOffset();
dataRowKeyLocator[1][pos] = ptr.getLength();
} else {
dataRowKeyLocator[0][pos] = 0;
dataRowKeyLocator[1][pos] = 0;
}
}
}
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
Iterator<Expression> expressionIterator = indexedExpressions.iterator();
for (int i = 0; i < nIndexedColumns; i++) {
PDataType dataColumnType;
boolean isNullable;
SortOrder dataSortOrder;
if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
Expression expression = expressionIterator.next();
dataColumnType = expression.getDataType();
dataSortOrder = expression.getSortOrder();
isNullable = expression.isNullable();
expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr);
}
else {
Field field = dataRowKeySchema.getField(dataPkPosition[i]);
dataColumnType = field.getDataType();
ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
dataSortOrder = field.getSortOrder();
isNullable = field.isNullable();
}
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
}
if (isDataColumnInverted != isIndexColumnDesc) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
if (!indexColumnType.isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] indexRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
if (isIndexSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
indexRowKey[0] = saltByte;
}
return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testCSVCommonsUpsert_WithArray() throws Exception {
CSVParser parser = null;
PhoenixConnection conn = null;
try {
// Create table
String statements = "CREATE TABLE IF NOT EXISTS ARRAY_TABLE "
+ "(ID BIGINT NOT NULL PRIMARY KEY, VALARRAY INTEGER ARRAY);";
conn = DriverManager.getConnection(getUrl()).unwrap(
PhoenixConnection.class);
PhoenixRuntime.executeStatements(conn,
new StringReader(statements), null);
// Upsert CSV file
CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "ARRAY_TABLE",
null, true, ',', '"', null, "!");
csvUtil.upsert(
new StringReader("ID,VALARRAY\n"
+ "1,2!3!4\n"));
// Compare Phoenix ResultSet with CSV file content
PreparedStatement statement = conn
.prepareStatement("SELECT ID, VALARRAY FROM ARRAY_TABLE");
ResultSet phoenixResultSet = statement.executeQuery();
assertTrue(phoenixResultSet.next());
assertEquals(1L, phoenixResultSet.getLong(1));
assertEquals(
PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Integer[]{2, 3, 4}),
phoenixResultSet.getArray(2));
assertFalse(phoenixResultSet.next());
} finally {
if (parser != null)
parser.close();
if (conn != null)
conn.close();
}
}
#location 18
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testCSVCommonsUpsert_WithArray() throws Exception {
CSVParser parser = null;
PhoenixConnection conn = null;
try {
// Create table
String statements = "CREATE TABLE IF NOT EXISTS ARRAY_TABLE "
+ "(ID BIGINT NOT NULL PRIMARY KEY, VALARRAY INTEGER ARRAY);";
conn = DriverManager.getConnection(getUrl()).unwrap(
PhoenixConnection.class);
PhoenixRuntime.executeStatements(conn,
new StringReader(statements), null);
// Upsert CSV file
CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "ARRAY_TABLE",
ImmutableList.<String>of(), true, ',', '"', null, "!");
csvUtil.upsert(
new StringReader("ID,VALARRAY\n"
+ "1,2!3!4\n"));
// Compare Phoenix ResultSet with CSV file content
PreparedStatement statement = conn
.prepareStatement("SELECT ID, VALARRAY FROM ARRAY_TABLE");
ResultSet phoenixResultSet = statement.executeQuery();
assertTrue(phoenixResultSet.next());
assertEquals(1L, phoenixResultSet.getLong(1));
assertEquals(
PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Integer[]{2, 3, 4}),
phoenixResultSet.getArray(2));
assertFalse(phoenixResultSet.next());
} finally {
if (parser != null)
parser.close();
if (conn != null)
conn.close();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private String makeQueryString(JobConf jobConf, String tableName, List<String>
readColumnList, String whereClause, String queryTemplate, String hints, Map<String,
TypeInfo> columnTypeMap) throws IOException {
StringBuilder sql = new StringBuilder();
List<String> conditionColumnList = buildWhereClause(jobConf, sql, whereClause, columnTypeMap);
readColumnList = replaceColumns(jobConf, readColumnList);
if (conditionColumnList.size() > 0) {
addConditionColumnToReadColumn(readColumnList, conditionColumnList);
readColumnList = ColumnMappingUtils.quoteColumns(readColumnList);
sql.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
tableName));
} else {
readColumnList = ColumnMappingUtils.quoteColumns(readColumnList);
sql.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
tableName));
}
if (LOG.isInfoEnabled()) {
LOG.info("Input query : " + sql.toString());
}
return sql.toString();
}
#location 10
#vulnerability type NULL_DEREFERENCE | #fixed code
private String makeQueryString(JobConf jobConf, String tableName, List<String>
readColumnList, String whereClause, String queryTemplate, String hints, Map<String,
TypeInfo> columnTypeMap) throws IOException {
StringBuilder sql = new StringBuilder();
List<String> conditionColumnList = buildWhereClause(jobConf, sql, whereClause, columnTypeMap);
readColumnList = replaceColumns(jobConf, readColumnList);
if (conditionColumnList.size() > 0) {
addConditionColumnToReadColumn(readColumnList, conditionColumnList);
sql.insert(0, queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
tableName));
} else {
sql.append(queryTemplate.replace("$HINT$", hints).replace("$COLUMN_LIST$",
getSelectColumns(jobConf, tableName, readColumnList)).replace("$TABLE_NAME$",
tableName));
}
if (LOG.isInfoEnabled()) {
LOG.info("Input query : " + sql.toString());
}
return sql.toString();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private void initTxServiceClient() {
txZKClientService = TransactionFactory.getTransactionFactory().getTransactionContext().setTransactionClient(config, props, connectionInfo);
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
private void initTxServiceClient() {
txZKClientService = TransactionFactory.getTransactionProvider().getTransactionContext().setTransactionClient(config, props, connectionInfo);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testUpsertWithOldClient() throws Exception {
checkForPreConditions();
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Test
public void testUpsertWithOldClient() throws Exception {
// Insert data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
executeQueriesWithCurrentVersion(QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
}
#location 18
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public boolean next(List<Cell> results) throws IOException {
if (indexRowKey != null &&
singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) {
byte[] rowCountBytes =
PLong.INSTANCE.toBytes(Long.valueOf(singleRowRebuildReturnCode));
final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
results.add(aggKeyValue);
return false;
}
Cell lastCell = null;
int rowCount = 0;
region.startRegionOperation();
RegionScanner localScanner = null;
try {
byte[] uuidValue = ServerCacheClient.generateId();
localScanner = getLocalScanner();
if (localScanner == null) {
return false;
}
synchronized (localScanner) {
if (!shouldVerify()) {
skipped = true;
return false;
}
do {
/**
If region is closing and there are large number of rows being verified/rebuilt with IndexTool,
not having this check will impact/delay the region closing -- affecting the availability
as this method holds the read lock on the region.
* */
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
List<Cell> row = new ArrayList<Cell>();
hasMore = localScanner.nextRaw(row);
if (!row.isEmpty()) {
lastCell = row.get(0); // lastCell is any cell from the last visited row
Put put = null;
Delete del = null;
for (Cell cell : row) {
if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) {
continue;
}
if (put == null) {
put = new Put(CellUtil.cloneRow(cell));
}
put.add(cell);
} else {
if (del == null) {
del = new Delete(CellUtil.cloneRow(cell));
}
del.addDeleteMarker(cell);
}
}
if (put == null && del == null) {
continue;
}
// Always add the put first and then delete for a given row. This simplifies the logic in
// IndexRegionObserver
if (put != null) {
mutations.add(put);
}
if (del != null) {
mutations.add(del);
}
if (!verify) {
if (put != null) {
setMutationAttributes(put, uuidValue);
}
if (del != null) {
setMutationAttributes(del, uuidValue);
}
uuidValue = commitIfReady(uuidValue, mutations);
} else {
byte[] dataKey = (put != null) ? put.getRow() : del.getRow();
prepareIndexMutations(put, del);
dataKeyToMutationMap.put(dataKey, new Pair<Put, Delete>(put, del));
}
rowCount++;
}
} while (hasMore && rowCount < pageSizeInRows);
if (!mutations.isEmpty()) {
if (verify) {
verifyAndOrRebuildIndex();
} else {
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize);
}
}
}
} catch (Throwable e) {
LOGGER.error("Exception in IndexRebuildRegionScanner for region "
+ region.getRegionInfo().getRegionNameAsString(), e);
throw e;
} finally {
region.closeRegionOperation();
mutations.clear();
if (verify) {
dataKeyToMutationMap.clear();
indexKeyToMutationMap.clear();
}
if (localScanner!=null && localScanner!=innerScanner) {
localScanner.close();
}
}
if (indexRowKey != null) {
rowCount = singleRowRebuildReturnCode;
}
if (minTimestamp != 0) {
nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell));
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
final Cell aggKeyValue;
if (lastCell == null) {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
} else {
aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
}
results.add(aggKeyValue);
return hasMore || hasMoreIncr;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception {
// TODO Make a Set of Rules that have already been applied so that so we don't generate for every value
List<Scenario> scenarios = parser.getScenarios();
DataValue value = null;
if (scenarios.contains(scenario)) {
logger.debug("We found a correct Scenario");
// Assume the first rule map
Map<DataTypeMapping, List> ruleMap = modelList.get(0);
List<Column> ruleList = ruleMap.get(phxMetaColumn.getType());
// Make sure Column from Phoenix Metadata matches a rule column
if (ruleList.contains(phxMetaColumn)) {
// Generate some random data based on this rule
logger.debug("We found a correct column rule");
Column columnRule = getColumnForRule(ruleList, phxMetaColumn);
value = getDataValue(columnRule);
synchronized (value) {
// Add the prefix to the value if it exists.
if (columnRule.getPrefix() != null) {
value.setValue(columnRule.getPrefix() + value.getValue());
}
}
} else {
logger.warn("Attempted to apply rule to data, but could not find a rule to match type:"
+ phxMetaColumn.getType()
);
}
}
return value;
}
#location 16
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception {
// TODO Make a Set of Rules that have already been applied so that so we don't generate for every value
List<Scenario> scenarios = parser.getScenarios();
DataValue value = null;
if (scenarios.contains(scenario)) {
logger.debug("We found a correct Scenario");
// Assume the first rule map
Map<DataTypeMapping, List> ruleMap = modelList.get(0);
List<Column> ruleList = ruleMap.get(phxMetaColumn.getType());
// Make sure Column from Phoenix Metadata matches a rule column
if (ruleList.contains(phxMetaColumn)) {
// Generate some random data based on this rule
logger.debug("We found a correct column rule");
Column columnRule = getColumnForRule(ruleList, phxMetaColumn);
value = getDataValue(columnRule);
} else {
logger.warn("Attempted to apply rule to data, but could not find a rule to match type:"
+ phxMetaColumn.getType()
);
}
}
return value;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception {
createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
Connection conn2 = DriverManager.getConnection(getUrl());
conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)));
assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY));
HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME));
HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)));
assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys());
}
#location 13
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception {
createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
Connection conn2 = DriverManager.getConnection(getUrl());
conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)));
assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY));
try (HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME))) {
try (HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)))) {
assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys());
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
final String dataTable = generateUniqueName();
final String index = "IDX_" + dataTable;
// create the table and ensure its empty
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(url, props);
conn.createStatement()
.execute("CREATE TABLE " + dataTable + " (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
// create the index and ensure its empty as well
conn.createStatement().execute("CREATE INDEX " + index + " ON " + dataTable + " (v1)");
conn = DriverManager.getConnection(getUrl(), props);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTable + " VALUES(?,?,?)");
conn.setAutoCommit(false);
for (int i = 0; i < 100; i++) {
stmt.setInt(1, i);
stmt.setString(2, "v1" + i);
stmt.setString(3, "v2" + i);
stmt.execute();
}
conn.commit();
int numUpsertSelectRunners = 5;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
// run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
// run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
for (int i = 0; i < 100; i += 25) {
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5)));
}
int received = 0;
while (received < futures.size()) {
Future<Boolean> resultFuture = completionService.take();
Boolean result = resultFuture.get();
received++;
assertTrue(result);
}
exec.shutdownNow();
conn.close();
}
#location 8
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
try (Connection conn = driver.connect(url, props)) {
int numUpsertSelectRunners = 5;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
// run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
// run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
for (int i = 0; i < 100; i += 25) {
futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i+25, 5)));
}
int received = 0;
while (received < futures.size()) {
Future<Boolean> resultFuture = completionService.take();
Boolean result = resultFuture.get();
received++;
assertTrue(result);
}
exec.shutdownNow();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
int prefixKeyLength =
prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length
: regionEndKey.length) : 0;
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
DataOutput output = new DataOutputStream(stream);
try {
// For local indexes, we must prepend the row key with the start region key
if (prependRegionStartKey) {
if (regionStartKey.length == 0) {
output.write(new byte[prefixKeyLength]);
} else {
output.write(regionStartKey);
}
}
if (isIndexSalted) {
output.write(0); // will be set at end to index salt byte
}
// The dataRowKeySchema includes the salt byte field,
// so we must adjust for that here.
int dataPosOffset = isDataTableSalted ? 1 : 0 ;
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
// Skip data table salt byte
int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
if (viewIndexId != null) {
output.write(viewIndexId);
}
if (isMultiTenant) {
dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset)));
}
dataPosOffset++;
}
// Write index row key
for (int i = dataPosOffset; i < indexDataColumnCount; i++) {
Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
// Ignore view constants from the data table, as these
// don't need to appear in the index (as they're the
// same for all rows in this index)
if (!viewConstantColumnBitSet.get(i)) {
int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
dataRowKeyLocator[0][pos] = ptr.getOffset();
dataRowKeyLocator[1][pos] = ptr.getLength();
} else {
dataRowKeyLocator[0][pos] = 0;
dataRowKeyLocator[1][pos] = 0;
}
}
}
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
Iterator<Expression> expressionIterator = indexedExpressions.iterator();
for (int i = 0; i < nIndexedColumns; i++) {
PDataType dataColumnType;
boolean isNullable;
SortOrder dataSortOrder;
if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
Expression expression = expressionIterator.next();
dataColumnType = expression.getDataType();
dataSortOrder = expression.getSortOrder();
isNullable = expression.isNullable();
expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr);
}
else {
Field field = dataRowKeySchema.getField(dataPkPosition[i]);
dataColumnType = field.getDataType();
ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
dataSortOrder = field.getSortOrder();
isNullable = field.isNullable();
}
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
}
if (isDataColumnInverted != isIndexColumnDesc) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
if (!indexColumnType.isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] indexRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
if (isIndexSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
indexRowKey[0] = saltByte;
}
return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
}
#location 106
#vulnerability type NULL_DEREFERENCE | #fixed code
public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
int prefixKeyLength =
prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length
: regionEndKey.length) : 0;
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
DataOutput output = new DataOutputStream(stream);
try {
// For local indexes, we must prepend the row key with the start region key
if (prependRegionStartKey) {
if (regionStartKey.length == 0) {
output.write(new byte[prefixKeyLength]);
} else {
output.write(regionStartKey);
}
}
if (isIndexSalted) {
output.write(0); // will be set at end to index salt byte
}
// The dataRowKeySchema includes the salt byte field,
// so we must adjust for that here.
int dataPosOffset = isDataTableSalted ? 1 : 0 ;
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
// Skip data table salt byte
int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
if (viewIndexId != null) {
output.write(viewIndexId);
}
if (isMultiTenant) {
dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength()==0, dataRowKeySchema.getField(dataPosOffset)));
}
dataPosOffset++;
}
// Write index row key
for (int i = dataPosOffset; i < indexDataColumnCount; i++) {
Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
// Ignore view constants from the data table, as these
// don't need to appear in the index (as they're the
// same for all rows in this index)
if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) {
int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
dataRowKeyLocator[0][pos] = ptr.getOffset();
dataRowKeyLocator[1][pos] = ptr.getLength();
} else {
dataRowKeyLocator[0][pos] = 0;
dataRowKeyLocator[1][pos] = 0;
}
}
}
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
Iterator<Expression> expressionIterator = indexedExpressions.iterator();
for (int i = 0; i < nIndexedColumns; i++) {
PDataType dataColumnType;
boolean isNullable;
SortOrder dataSortOrder;
if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
Expression expression = expressionIterator.next();
dataColumnType = expression.getDataType();
dataSortOrder = expression.getSortOrder();
isNullable = expression.isNullable();
expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr);
}
else {
Field field = dataRowKeySchema.getField(dataPkPosition[i]);
dataColumnType = field.getDataType();
ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
dataSortOrder = field.getSortOrder();
isNullable = field.isNullable();
}
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
}
if (isDataColumnInverted != isIndexColumnDesc) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
if (!indexColumnType.isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] indexRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && indexRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
if (isIndexSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
indexRowKey[0] = saltByte;
}
return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testIndexHalfStoreFileReader() throws Exception {
Connection conn1 = getConnection();
ConnectionQueryServices connectionQueryServices = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES);
HBaseAdmin admin = connectionQueryServices.getAdmin();
String tableName = "TBL_" + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
try {
dropTable(admin, conn1);
createBaseTable(conn1, tableName, "('e')");
conn1.createStatement().execute("CREATE "+(localIndex?"LOCAL":"")+" INDEX " + indexName + " ON " + tableName + "(v1)" + (localIndex?"":" SPLIT ON ('e')"));
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('b',1,2,4,'z')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('f',1,2,3,'z')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('j',2,4,2,'a')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('q',3,1,1,'c')");
conn1.commit();
String query = "SELECT count(*) FROM " + tableName +" where v1<='z'";
ResultSet rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
TableName indexTable = TableName.valueOf(localIndex?tableName: indexName);
admin.flush(indexTable);
boolean merged = false;
HTableInterface table = connectionQueryServices.getTable(indexTable.getName());
// merge regions until 1 left
long numRegions = 0;
while (true) {
rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1)); //TODO this returns 5 sometimes instead of 4, duplicate results?
try {
List<HRegionInfo> indexRegions = admin.getTableRegions(indexTable);
numRegions = indexRegions.size();
if (numRegions==1) {
break;
}
if(!merged) {
List<HRegionInfo> regions =
admin.getTableRegions(indexTable);
Log.info("Merging: " + regions.size());
admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), false);
merged = true;
Threads.sleep(10000);
}
} catch (Exception ex) {
Log.info(ex);
}
long waitStartTime = System.currentTimeMillis();
// wait until merge happened
while (System.currentTimeMillis() - waitStartTime < 10000) {
List<HRegionInfo> regions = admin.getTableRegions(indexTable);
Log.info("Waiting:" + regions.size());
if (regions.size() < numRegions) {
break;
}
Threads.sleep(1000);
}
SnapshotTestingUtils.waitForTableToBeOnline(BaseTest.getUtility(), indexTable);
assertTrue("Index table should be online ", admin.isTableAvailable(indexTable));
}
} finally {
dropTable(admin, conn1);
}
}
#location 9
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testIndexHalfStoreFileReader() throws Exception {
Connection conn1 = getConnection();
ConnectionQueryServices connectionQueryServices = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES);
HBaseAdmin admin = connectionQueryServices.getAdmin();
String tableName = "TBL_" + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
createBaseTable(conn1, tableName, "('e')");
conn1.createStatement().execute("CREATE "+(localIndex?"LOCAL":"")+" INDEX " + indexName + " ON " + tableName + "(v1)" + (localIndex?"":" SPLIT ON ('e')"));
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('b',1,2,4,'z')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('f',1,2,3,'z')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('j',2,4,2,'a')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('q',3,1,1,'c')");
conn1.commit();
String query = "SELECT count(*) FROM " + tableName +" where v1<='z'";
ResultSet rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
TableName indexTable = TableName.valueOf(localIndex?tableName: indexName);
admin.flush(indexTable);
boolean merged = false;
HTableInterface table = connectionQueryServices.getTable(indexTable.getName());
// merge regions until 1 left
long numRegions = 0;
while (true) {
rs = conn1.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1)); //TODO this returns 5 sometimes instead of 4, duplicate results?
try {
List<HRegionInfo> indexRegions = admin.getTableRegions(indexTable);
numRegions = indexRegions.size();
if (numRegions==1) {
break;
}
if(!merged) {
List<HRegionInfo> regions =
admin.getTableRegions(indexTable);
Log.info("Merging: " + regions.size());
admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), false);
merged = true;
Threads.sleep(10000);
}
} catch (Exception ex) {
Log.info(ex);
}
long waitStartTime = System.currentTimeMillis();
// wait until merge happened
while (System.currentTimeMillis() - waitStartTime < 10000) {
List<HRegionInfo> regions = admin.getTableRegions(indexTable);
Log.info("Waiting:" + regions.size());
if (regions.size() < numRegions) {
break;
}
Threads.sleep(1000);
}
SnapshotTestingUtils.waitForTableToBeOnline(BaseTest.getUtility(), indexTable);
assertTrue("Index table should be online ", admin.isTableAvailable(indexTable));
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell);
KeyValue keyToSeek = kv;
KeyValue splitKeyValue = KeyValueUtil.createKeyValueFromKey(reader.getSplitkey());
if (reader.isTop()) {
if(getComparator().compare(kv, splitKeyValue) < 0){
if(!isSeek && realSeekDone()) {
return true;
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
return seekOrReseekToProperKey(isSeek, keyToSeek);
} else {
if (getComparator().compare(kv, splitKeyValue) >= 0) {
close();
return false;
}
if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) {
keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
}
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
#location 12
#vulnerability type NULL_DEREFERENCE | #fixed code
public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
Cell keyToSeek = cell;
KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey());
if (reader.isTop()) {
if(this.comparator.compare(cell, splitKeyValue, true) < 0){
if(!isSeek && realSeekDone()) {
return true;
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
keyToSeek = getKeyPresentInHFiles(cell);
return seekOrReseekToProperKey(isSeek, keyToSeek);
} else {
if (this.comparator.compare(cell, splitKeyValue, true) >= 0) {
close();
return false;
}
if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) {
keyToSeek = getKeyPresentInHFiles(cell);
}
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test(timeout=300000)
public void testWriteFailureDisablesIndex() throws Exception {
String query;
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = driver.connect(url, props);
conn.setAutoCommit(false);
conn.createStatement().execute(
"CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
conn.createStatement().execute(
"CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
// Verify the metadata for index is correct.
rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME,
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
assertEquals(INDEX_TABLE_NAME, rs.getString(3));
assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE"));
assertFalse(rs.next());
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
stmt.setString(1, "a");
stmt.setString(2, "x");
stmt.setString(3, "1");
stmt.execute();
conn.commit();
TableName indexTable = TableName.valueOf(INDEX_TABLE_FULL_NAME);
HBaseAdmin admin = this.util.getHBaseAdmin();
HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
try{
admin.disableTable(indexTable);
admin.deleteTable(indexTable);
} catch (TableNotFoundException ignore) {}
stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
stmt.setString(1, "a2");
stmt.setString(2, "x2");
stmt.setString(3, "2");
stmt.execute();
try {
conn.commit();
} catch (SQLException e) {}
// Verify the metadata for index is correct.
rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME,
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
assertEquals(INDEX_TABLE_NAME, rs.getString(3));
assertEquals(PIndexState.DISABLE.toString(), rs.getString("INDEX_STATE"));
assertFalse(rs.next());
// Verify UPSERT on data table still work after index is disabled
stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
stmt.setString(1, "a3");
stmt.setString(2, "x3");
stmt.setString(3, "3");
stmt.execute();
conn.commit();
query = "SELECT v2 FROM " + DATA_TABLE_FULL_NAME + " where v1='x3'";
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
assertTrue(QueryUtil.getExplainPlan(rs).contains("CLIENT PARALLEL 1-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME));
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
// recreate index table
admin.createTable(indexTableDesc);
do {
Thread.sleep(15 * 1000); // sleep 15 secs
rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME,
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
break;
}
} while(true);
// verify index table has data
query = "SELECT count(1) FROM " + INDEX_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
// using 2 here because we only partially build index from where we failed and the oldest
// index row has been deleted when we dropped the index table during test.
assertEquals(2, rs.getInt(1));
}
#location 8
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test(timeout=300000)
public void testWriteFailureDisablesIndex() throws Exception {
testWriteFailureDisablesIndex(false);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void batchStarted(MiniBatchOperationInProgress<Mutation> miniBatchOp, IndexMetaData context) throws IOException {
// The entire purpose of this method impl is to get the existing rows for the
// table rows being indexed into the block cache, as the index maintenance code
// does a point scan per row.
List<IndexMaintainer> indexMaintainers = ((PhoenixIndexMetaData)context).getIndexMaintainers();
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(miniBatchOp.size());
Map<ImmutableBytesWritable, IndexMaintainer> maintainers =
new HashMap<ImmutableBytesWritable, IndexMaintainer>();
ImmutableBytesWritable indexTableName = new ImmutableBytesWritable();
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
for(IndexMaintainer indexMaintainer: indexMaintainers) {
if (indexMaintainer.isImmutableRows()) continue;
indexTableName.set(indexMaintainer.getIndexTableName());
if (maintainers.get(indexTableName) != null) continue;
maintainers.put(indexTableName, indexMaintainer);
}
}
if (maintainers.isEmpty()) return;
Scan scan = IndexManagementUtil.newLocalStateScan(new ArrayList<IndexMaintainer>(maintainers.values()));
ScanRanges scanRanges = ScanRanges.createPointLookup(keys);
scanRanges.initializeScan(scan);
scan.setFilter(new SkipScanFilter(scanRanges.getSkipScanFilter(),true));
Region region = env.getRegion();
RegionScanner scanner = region.getScanner(scan);
// Run through the scanner using internal nextRaw method
region.startRegionOperation();
try {
synchronized (scanner) {
boolean hasMore;
do {
List<Cell> results = Lists.newArrayList();
// Results are potentially returned even when the return value of s.next is
// false since this is an indication of whether or not there are more values
// after the ones returned
hasMore = scanner.nextRaw(results);
} while (hasMore);
}
} finally {
try {
scanner.close();
} finally {
region.closeRegionOperation();
}
}
}
#location 28
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public void batchStarted(MiniBatchOperationInProgress<Mutation> miniBatchOp, IndexMetaData context) throws IOException {
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testCSVCommonsUpsert_WithTimestamp() throws Exception {
CSVParser parser = null;
PhoenixConnection conn = null;
try {
// Create table
String statements = "CREATE TABLE IF NOT EXISTS TS_TABLE "
+ "(ID BIGINT NOT NULL PRIMARY KEY, TS TIMESTAMP);";
conn = DriverManager.getConnection(getUrl()).unwrap(
PhoenixConnection.class);
PhoenixRuntime.executeStatements(conn,
new StringReader(statements), null);
// Upsert CSV file
CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "TS_TABLE",
null, true, ',', '"', null, "!");
csvUtil.upsert(
new StringReader("ID,TS\n"
+ "1,1970-01-01 00:00:10\n"
+ "2,1970-01-01 00:00:10.123\n"));
// Compare Phoenix ResultSet with CSV file content
PreparedStatement statement = conn
.prepareStatement("SELECT ID, TS FROM TS_TABLE ORDER BY ID");
ResultSet phoenixResultSet = statement.executeQuery();
assertTrue(phoenixResultSet.next());
assertEquals(1L, phoenixResultSet.getLong(1));
assertEquals(10000L, phoenixResultSet.getTimestamp(2).getTime());
assertTrue(phoenixResultSet.next());
assertEquals(2L, phoenixResultSet.getLong(1));
assertEquals(10123L, phoenixResultSet.getTimestamp(2).getTime());
assertFalse(phoenixResultSet.next());
} finally {
if (parser != null)
parser.close();
if (conn != null)
conn.close();
}
}
#location 18
#vulnerability type NULL_DEREFERENCE | #fixed code
@Test
public void testCSVCommonsUpsert_WithTimestamp() throws Exception {
CSVParser parser = null;
PhoenixConnection conn = null;
try {
// Create table
String statements = "CREATE TABLE IF NOT EXISTS TS_TABLE "
+ "(ID BIGINT NOT NULL PRIMARY KEY, TS TIMESTAMP);";
conn = DriverManager.getConnection(getUrl()).unwrap(
PhoenixConnection.class);
PhoenixRuntime.executeStatements(conn,
new StringReader(statements), null);
// Upsert CSV file
CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "TS_TABLE",
ImmutableList.<String>of(), true, ',', '"', null, "!");
csvUtil.upsert(
new StringReader("ID,TS\n"
+ "1,1970-01-01 00:00:10\n"
+ "2,1970-01-01 00:00:10.123\n"));
// Compare Phoenix ResultSet with CSV file content
PreparedStatement statement = conn
.prepareStatement("SELECT ID, TS FROM TS_TABLE ORDER BY ID");
ResultSet phoenixResultSet = statement.executeQuery();
assertTrue(phoenixResultSet.next());
assertEquals(1L, phoenixResultSet.getLong(1));
assertEquals(10000L, phoenixResultSet.getTimestamp(2).getTime());
assertTrue(phoenixResultSet.next());
assertEquals(2L, phoenixResultSet.getLong(1));
assertEquals(10123L, phoenixResultSet.getTimestamp(2).getTime());
assertFalse(phoenixResultSet.next());
} finally {
if (parser != null)
parser.close();
if (conn != null)
conn.close();
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception {
createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
Connection conn2 = DriverManager.getConnection(getUrl());
conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)));
assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY));
HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME));
HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)));
assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys());
}
#location 13
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception {
createBaseTable(DATA_TABLE_NAME, null,"('e','i','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
Connection conn2 = DriverManager.getConnection(getUrl());
conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME + "(v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_FULL_NAME).next();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)));
assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY));
try (HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(DATA_TABLE_NAME))) {
try (HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)))) {
assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys());
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public static long convertToMilliseconds(long serverTimeStamp) {
return serverTimeStamp / TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
public static long convertToMilliseconds(long serverTimeStamp) {
return serverTimeStamp / TransactionFactory.getTransactionProvider().getTransactionContext().getMaxTransactionsPerSecond();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testSelectUpsertWithOldClient() throws Exception {
checkForPreConditions();
// Insert data with new client and read with old client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
// Insert more data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, ADD_DATA);
executeQueriesWithCurrentVersion(QUERY_MORE);
assertTrue(compareOutput(ADD_DATA, QUERY_MORE));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Test
public void testSelectUpsertWithOldClient() throws Exception {
// Insert data with new client and read with old client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
// Insert more data with old client and read with new client
executeQueryWithClientVersion(compatibleClientVersion, ADD_DATA);
executeQueriesWithCurrentVersion(QUERY_MORE);
assertExpectedOutput(ADD_DATA, QUERY_MORE);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private static void tearDownTxManager() throws SQLException {
TransactionFactory.getTransactionFactory().getTransactionContext().tearDownTxManager();
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
private static void tearDownTxManager() throws SQLException {
TransactionFactory.getTransactionProvider().getTransactionContext().tearDownTxManager();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void testSelectWithOldClient() throws Exception {
checkForPreConditions();
// Insert data with new client and read with old client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertTrue(compareOutput(CREATE_ADD, QUERY));
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Test
public void testSelectWithOldClient() throws Exception {
// Insert data with new client and read with old client
executeQueriesWithCurrentVersion(CREATE_ADD);
executeQueryWithClientVersion(compatibleClientVersion, QUERY);
assertExpectedOutput(CREATE_ADD, QUERY);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
protected static void setTxnConfigs() throws IOException {
TransactionFactory.getTransactionFactory().getTransactionContext().setTxnConfigs(config, tmpFolder.newFolder().getAbsolutePath(), DEFAULT_TXN_TIMEOUT_SECONDS);
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
protected static void setTxnConfigs() throws IOException {
TransactionFactory.getTransactionProvider().getTransactionContext().setTxnConfigs(config, tmpFolder.newFolder().getAbsolutePath(), DEFAULT_TXN_TIMEOUT_SECONDS);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public boolean seekToPreviousRow(Cell key) throws IOException {
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key);
if (reader.isTop()) {
Optional<Cell> firstKey = reader.getFirstKey();
// This will be null when the file is empty in which we can not seekBefore to
// any key
if (firstKey.isPresent()) {
return false;
}
byte[] fk = PhoenixKeyValueUtil.maybeCopyCell(firstKey.get()).getKey();
if (getComparator().compare(kv, firstKey.get()) <= 0) {
return super.seekToPreviousRow(key);
}
KeyValue replacedKey = getKeyPresentInHFiles(kv.getRowArray());
boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
}
return seekToPreviousRow;
} else {
// The equals sign isn't strictly necessary just here to be consistent with
// seekTo
KeyValue splitKeyValue = KeyValueUtil.createKeyValueFromKey(reader.getSplitkey());
if (getComparator().compare(kv, splitKeyValue) >= 0) {
boolean seekToPreviousRow = super.seekToPreviousRow(kv);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
}
return seekToPreviousRow;
}
}
boolean seekToPreviousRow = super.seekToPreviousRow(kv);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
}
return seekToPreviousRow;
}
#location 15
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public boolean seekToPreviousRow(Cell key) throws IOException {
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key);
if (reader.isTop()) {
Optional<Cell> firstKey = reader.getFirstKey();
// This will be null when the file is empty in which we can not seekBefore to
// any key
if (firstKey.isPresent()) {
return false;
}
if (this.comparator.compare(kv, firstKey.get(), true) <= 0) {
return super.seekToPreviousRow(key);
}
Cell replacedKey = getKeyPresentInHFiles(kv);
boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
}
return seekToPreviousRow;
} else {
// The equals sign isn't strictly necessary just here to be consistent with
// seekTo
KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey());
if (this.comparator.compare(kv, splitKeyValue, true) >= 0) {
boolean seekToPreviousRow = super.seekToPreviousRow(kv);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
}
return seekToPreviousRow;
}
}
boolean seekToPreviousRow = super.seekToPreviousRow(kv);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
}
return seekToPreviousRow;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public static long getWallClockTimeFromCellTimeStamp(long tsOfCell) {
return TransactionFactory.getTransactionFactory().getTransactionContext().isPreExistingVersion(tsOfCell) ? tsOfCell : TransactionUtil.convertToMilliseconds(tsOfCell);
}
#location 2
#vulnerability type NULL_DEREFERENCE | #fixed code
public static long getWallClockTimeFromCellTimeStamp(long tsOfCell) {
return TransactionFactory.getTransactionProvider().getTransactionContext().isPreExistingVersion(tsOfCell) ? tsOfCell : TransactionUtil.convertToMilliseconds(tsOfCell);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void batchStarted(MiniBatchOperationInProgress<Mutation> miniBatchOp, IndexMetaData context) throws IOException {
// The entire purpose of this method impl is to get the existing rows for the
// table rows being indexed into the block cache, as the index maintenance code
// does a point scan per row.
List<IndexMaintainer> indexMaintainers = ((PhoenixIndexMetaData)context).getIndexMaintainers();
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(miniBatchOp.size());
Map<ImmutableBytesWritable, IndexMaintainer> maintainers =
new HashMap<ImmutableBytesWritable, IndexMaintainer>();
ImmutableBytesWritable indexTableName = new ImmutableBytesWritable();
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
for(IndexMaintainer indexMaintainer: indexMaintainers) {
if (indexMaintainer.isImmutableRows()) continue;
indexTableName.set(indexMaintainer.getIndexTableName());
if (maintainers.get(indexTableName) != null) continue;
maintainers.put(indexTableName, indexMaintainer);
}
}
if (maintainers.isEmpty()) return;
Scan scan = IndexManagementUtil.newLocalStateScan(new ArrayList<IndexMaintainer>(maintainers.values()));
ScanRanges scanRanges = ScanRanges.createPointLookup(keys);
scanRanges.initializeScan(scan);
scan.setFilter(new SkipScanFilter(scanRanges.getSkipScanFilter(),true));
Region region = env.getRegion();
RegionScanner scanner = region.getScanner(scan);
// Run through the scanner using internal nextRaw method
region.startRegionOperation();
try {
synchronized (scanner) {
boolean hasMore;
do {
List<Cell> results = Lists.newArrayList();
// Results are potentially returned even when the return value of s.next is
// false since this is an indication of whether or not there are more values
// after the ones returned
hasMore = scanner.nextRaw(results);
} while (hasMore);
}
} finally {
try {
scanner.close();
} finally {
region.closeRegionOperation();
}
}
}
#location 25
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
public void batchStarted(MiniBatchOperationInProgress<Mutation> miniBatchOp, IndexMetaData context) throws IOException {
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException, SQLException {
RegionCoprocessorEnvironment env = c.getEnvironment();
Region region = env.getRegion();
long ts = scan.getTimeRange().getMax();
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
if (ScanUtil.isAnalyzeTable(scan)) {
byte[] gp_width_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
byte[] gp_per_region_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
// Let this throw, as this scan is being done for the sole purpose of collecting stats
StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(
env, region.getRegionInfo().getTable().getNameAsString(), ts,
gp_width_bytes, gp_per_region_bytes);
return collectStats(s, statsCollector, region, scan, env.getConfiguration());
} else if (ScanUtil.isIndexRebuild(scan)) { return rebuildIndices(s, region, scan, env.getConfiguration()); }
int offsetToBe = 0;
if (localIndexScan) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offsetToBe);
}
final int offset = offsetToBe;
PTable projectedTable = null;
PTable writeToTable = null;
byte[][] values = null;
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.makePTable(projectedTable, true);
} catch (SQLException e) {
ServerUtil.throwIOException("Upgrade failed", e); // Impossible
}
values = new byte[projectedTable.getPKColumns().size()][];
}
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
RegionScanner theScanner = s;
byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
List<Expression> selectExpressions = null;
byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
boolean isUpsert = false;
boolean isDelete = false;
byte[] deleteCQ = null;
byte[] deleteCF = null;
byte[] emptyCF = null;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (upsertSelectTable != null) {
isUpsert = true;
projectedTable = deserializeTable(upsertSelectTable);
selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
values = new byte[projectedTable.getPKColumns().size()][];
} else {
byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
if (!isDelete) {
deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
}
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
}
if (j != null) {
theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env);
}
int batchSize = 0;
long batchSizeBytes = 0L;
List<Mutation> mutations = Collections.emptyList();
boolean needToWrite = false;
Configuration conf = c.getEnvironment().getConfiguration();
long flushSize = region.getTableDesc().getMemStoreFlushSize();
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
* Slow down the writes if the memstore size more than
* (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
* bytes. This avoids flush storm to hdfs for cases like index building where reads and
* write happen to all the table regions in the server.
*/
final long blockingMemStoreSize = flushSize * (
conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
needToWrite = true;
// TODO: size better
mutations = Lists.newArrayListWithExpectedSize(1024);
batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
batchSizeBytes = env.getConfiguration().getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
}
Aggregators aggregators = ServerAggregators.deserialize(
scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
Aggregator[] rowAggregators = aggregators.getAggregators();
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
long rowCount = 0;
final RegionScanner innerScanner = theScanner;
byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
boolean acquiredLock = false;
try {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount++;
}
}
region.startRegionOperation();
acquiredLock = true;
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
rowCount++;
result.setKeyValues(results);
if (isDescRowKeyOrderUpgrade) {
Arrays.fill(values, null);
Cell firstKV = results.get(0);
RowKeySchema schema = projectedTable.getRowKeySchema();
int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
for (int i = 0; i < schema.getFieldCount(); i++) {
Boolean hasValue = schema.next(ptr, i, maxOffset);
if (hasValue == null) {
break;
}
Field field = schema.getField(i);
if (field.getSortOrder() == SortOrder.DESC) {
// Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
if (field.getDataType().isArrayType()) {
field.getDataType().coerceBytes(ptr, null, field.getDataType(),
field.getMaxLength(), field.getScale(), field.getSortOrder(),
field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
}
// Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
// Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
} else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
ptr.set(invertedBytes);
}
} else if (field.getDataType() == PBinary.INSTANCE) {
// Remove trailing space characters so that the setValues call below will replace them
// with the correct zero byte character. Note this is somewhat dangerous as these
// could be legit, but I don't know what the alternative is.
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
}
values[i] = ptr.copyBytes();
}
writeToTable.newKey(ptr, values);
if (Bytes.compareTo(
firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(),
ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
continue;
}
byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (offset > 0) { // for local indexes (prepend region start key)
byte[] newRowWithOffset = new byte[offset + newRow.length];
System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
newRow = newRowWithOffset;
}
byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
for (Cell cell : results) {
// Copy existing cell but with new row key
Cell newCell = new KeyValue(newRow, 0, newRow.length,
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case Put:
// If Put, point delete old Put
Delete del = new Delete(oldRow);
del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
mutations.add(del);
Put put = new Put(newRow);
put.add(newCell);
mutations.add(put);
break;
case Delete:
case DeleteColumn:
case DeleteFamily:
case DeleteFamilyVersion:
Delete delete = new Delete(newRow);
delete.addDeleteMarker(newCell);
mutations.add(delete);
break;
}
}
} else if (buildLocalIndex) {
for (IndexMaintainer maintainer : indexMaintainers) {
if (!results.isEmpty()) {
result.getKey(ptr);
ValueGetter valueGetter =
maintainer.createGetterFromKeyValues(
ImmutableBytesPtr.copyBytesIfNecessary(ptr),
results);
Put put = maintainer.buildUpdateMutation(kvBuilder,
valueGetter, ptr, results.get(0).getTimestamp(),
env.getRegion().getRegionInfo().getStartKey(),
env.getRegion().getRegionInfo().getEndKey());
indexMutations.add(put);
}
}
result.setKeyValues(results);
} else if (isDelete) {
// FIXME: the version of the Delete constructor without the lock
// args was introduced in 0.94.4, thus if we try to use it here
// we can no longer use the 0.94.2 version of the client.
Cell firstKV = results.get(0);
Delete delete = new Delete(firstKV.getRowArray(),
firstKV.getRowOffset(), firstKV.getRowLength(),ts);
mutations.add(delete);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
} else if (isUpsert) {
Arrays.fill(values, null);
int i = 0;
List<PColumn> projectedColumns = projectedTable.getColumns();
for (; i < projectedTable.getPKColumns().size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
values[i] = ptr.copyBytes();
// If SortOrder from expression in SELECT doesn't match the
// column being projected into then invert the bits.
if (expression.getSortOrder() !=
projectedColumns.get(i).getSortOrder()) {
SortOrder.invert(values[i], 0, values[i], 0,
values[i].length);
}
}
}
projectedTable.newKey(ptr, values);
PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
for (; i < projectedColumns.size(); i++) {
Expression expression = selectExpressions.get(i);
if (expression.evaluate(result, ptr)) {
PColumn column = projectedColumns.get(i);
if (!column.getDataType().isSizeCompatible(ptr, null,
expression.getDataType(), expression.getSortOrder(),
expression.getMaxLength(), expression.getScale(),
column.getMaxLength(), column.getScale())) {
throw new DataExceedsCapacityException(
column.getDataType(), column.getMaxLength(),
column.getScale(), column.getName().getString(), ptr);
}
column.getDataType().coerceBytes(ptr, null,
expression.getDataType(), expression.getMaxLength(),
expression.getScale(), expression.getSortOrder(),
column.getMaxLength(), column.getScale(),
column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
}
for (Mutation mutation : row.toRowMutations()) {
mutations.add(mutation);
}
for (i = 0; i < selectExpressions.size(); i++) {
selectExpressions.get(i).reset();
}
} else if (deleteCF != null && deleteCQ != null) {
// No need to search for delete column, since we project only it
// if no empty key value is being set
if (emptyCF == null ||
result.getValue(deleteCF, deleteCQ) != null) {
Delete delete = new Delete(results.get(0).getRowArray(),
results.get(0).getRowOffset(),
results.get(0).getRowLength());
delete.deleteColumns(deleteCF, deleteCQ, ts);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
mutations.add(delete);
}
}
if (emptyCF != null) {
/*
* If we've specified an emptyCF, then we need to insert an empty
* key value "retroactively" for any key value that is visible at
* the timestamp that the DDL was issued. Key values that are not
* visible at this timestamp will not ever be projected up to
* scans past this timestamp, so don't need to be considered.
* We insert one empty key value per row per timestamp.
*/
Set<Long> timeStamps =
Sets.newHashSetWithExpectedSize(results.size());
for (Cell kv : results) {
long kvts = kv.getTimestamp();
if (!timeStamps.contains(kvts)) {
Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
}
// Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config
List<List<Mutation>> batchMutationList =
MutationState.getMutationBatchList(batchSize, batchSizeBytes, mutations);
for (List<Mutation> batchMutations : batchMutationList) {
commitBatch(region, batchMutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr,
txState);
batchMutations.clear();
}
mutations.clear();
// Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config
List<List<Mutation>> batchIndexMutationList =
MutationState.getMutationBatchList(batchSize, batchSizeBytes, indexMutations);
for (List<Mutation> batchIndexMutations : batchIndexMutationList) {
commitBatch(region, batchIndexMutations, null, blockingMemStoreSize, null, txState);
batchIndexMutations.clear();
}
indexMutations.clear();
}
aggregators.aggregate(rowAggregators, result);
hasAny = true;
}
} while (hasMore);
if (!mutations.isEmpty()) {
commitBatch(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState);
}
if (!indexMutations.isEmpty()) {
commitBatch(region, indexMutations, null, blockingMemStoreSize, indexMaintainersPtr, txState);
indexMutations.clear();
}
}
} finally {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount--;
}
}
try {
innerScanner.close();
} finally {
if (acquiredLock) region.closeRegionOperation();
}
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
KeyValue keyValue = null;
if (hadAny) {
byte[] value = aggregators.toBytes(rowAggregators);
keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
}
final KeyValue aggKeyValue = keyValue;
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
private boolean done = !hadAny;
@Override
public boolean isFilterDone() {
return done;
}
@Override
public boolean next(List<Cell> results) throws IOException {
if (done) return false;
done = true;
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
}
#location 200
#vulnerability type THREAD_SAFETY_VIOLATION | #fixed code
@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException, SQLException {
RegionCoprocessorEnvironment env = c.getEnvironment();
Region region = env.getRegion();
long ts = scan.getTimeRange().getMax();
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
if (ScanUtil.isAnalyzeTable(scan)) {
byte[] gp_width_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
byte[] gp_per_region_bytes =
scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
// Let this throw, as this scan is being done for the sole purpose of collecting stats
StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(
env, region.getRegionInfo().getTable().getNameAsString(), ts,
gp_width_bytes, gp_per_region_bytes);
return collectStats(s, statsCollector, region, scan, env.getConfiguration());
} else if (ScanUtil.isIndexRebuild(scan)) { return rebuildIndices(s, region, scan, env.getConfiguration()); }
int offsetToBe = 0;
if (localIndexScan) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offsetToBe);
}
final int offset = offsetToBe;
PTable projectedTable = null;
PTable writeToTable = null;
byte[][] values = null;
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.makePTable(projectedTable, true);
} catch (SQLException e) {
ServerUtil.throwIOException("Upgrade failed", e); // Impossible
}
values = new byte[projectedTable.getPKColumns().size()][];
}
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
RegionScanner theScanner = s;
byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
List<Expression> selectExpressions = null;
byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
boolean isUpsert = false;
boolean isDelete = false;
byte[] deleteCQ = null;
byte[] deleteCF = null;
byte[] emptyCF = null;
HTable targetHTable = null;
boolean areMutationInSameRegion = true;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (upsertSelectTable != null) {
isUpsert = true;
projectedTable = deserializeTable(upsertSelectTable);
targetHTable = new HTable(env.getConfiguration(), projectedTable.getPhysicalName().getBytes());
selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
values = new byte[projectedTable.getPKColumns().size()][];
areMutationInSameRegion = Bytes.compareTo(targetHTable.getTableName(),
region.getTableDesc().getTableName().getName()) == 0
&& !isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
} else {
byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
if (!isDelete) {
deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
}
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
}
if (j != null) {
theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env);
}
int batchSize = 0;
long batchSizeBytes = 0L;
List<Mutation> mutations = Collections.emptyList();
boolean needToWrite = false;
Configuration conf = c.getEnvironment().getConfiguration();
long flushSize = region.getTableDesc().getMemStoreFlushSize();
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
* Slow down the writes if the memstore size more than
* (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
* bytes. This avoids flush storm to hdfs for cases like index building where reads and
* write happen to all the table regions in the server.
*/
final long blockingMemStoreSize = flushSize * (
conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
needToWrite = true;
// TODO: size better
mutations = Lists.newArrayListWithExpectedSize(1024);
batchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
batchSizeBytes = env.getConfiguration().getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
}
Aggregators aggregators = ServerAggregators.deserialize(
scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
Aggregator[] rowAggregators = aggregators.getAggregators();
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
long rowCount = 0;
final RegionScanner innerScanner = theScanner;
byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
boolean acquiredLock = false;
try {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount++;
}
}
region.startRegionOperation();
acquiredLock = true;
synchronized (innerScanner) {
do {
List<Cell> results = new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
hasMore = innerScanner.nextRaw(results);
if (!results.isEmpty()) {
rowCount++;
result.setKeyValues(results);
if (isDescRowKeyOrderUpgrade) {
Arrays.fill(values, null);
Cell firstKV = results.get(0);
RowKeySchema schema = projectedTable.getRowKeySchema();
int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
for (int i = 0; i < schema.getFieldCount(); i++) {
Boolean hasValue = schema.next(ptr, i, maxOffset);
if (hasValue == null) {
break;
}
Field field = schema.getField(i);
if (field.getSortOrder() == SortOrder.DESC) {
// Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
if (field.getDataType().isArrayType()) {
field.getDataType().coerceBytes(ptr, null, field.getDataType(),
field.getMaxLength(), field.getScale(), field.getSortOrder(),
field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
}
// Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
// Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
} else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
ptr.set(invertedBytes);
}
} else if (field.getDataType() == PBinary.INSTANCE) {
// Remove trailing space characters so that the setValues call below will replace them
// with the correct zero byte character. Note this is somewhat dangerous as these
// could be legit, but I don't know what the alternative is.
int len = ptr.getLength();
while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
len--;
}
ptr.set(ptr.get(), ptr.getOffset(), len);
}
values[i] = ptr.copyBytes();
}
writeToTable.newKey(ptr, values);
if (Bytes.compareTo(
firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(),
ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
continue;
}
byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (offset > 0) { // for local indexes (prepend region start key)
byte[] newRowWithOffset = new byte[offset + newRow.length];
System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
newRow = newRowWithOffset;
}
byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
for (Cell cell : results) {
// Copy existing cell but with new row key
Cell newCell = new KeyValue(newRow, 0, newRow.length,
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case Put:
// If Put, point delete old Put
Delete del = new Delete(oldRow);
del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
mutations.add(del);
Put put = new Put(newRow);
put.add(newCell);
mutations.add(put);
break;
case Delete:
case DeleteColumn:
case DeleteFamily:
case DeleteFamilyVersion:
Delete delete = new Delete(newRow);
delete.addDeleteMarker(newCell);
mutations.add(delete);
break;
}
}
} else if (buildLocalIndex) {
for (IndexMaintainer maintainer : indexMaintainers) {
if (!results.isEmpty()) {
result.getKey(ptr);
ValueGetter valueGetter =
maintainer.createGetterFromKeyValues(
ImmutableBytesPtr.copyBytesIfNecessary(ptr),
results);
Put put = maintainer.buildUpdateMutation(kvBuilder,
valueGetter, ptr, results.get(0).getTimestamp(),
env.getRegion().getRegionInfo().getStartKey(),
env.getRegion().getRegionInfo().getEndKey());
indexMutations.add(put);
}
}
result.setKeyValues(results);
} else if (isDelete) {
// FIXME: the version of the Delete constructor without the lock
// args was introduced in 0.94.4, thus if we try to use it here
// we can no longer use the 0.94.2 version of the client.
Cell firstKV = results.get(0);
Delete delete = new Delete(firstKV.getRowArray(),
firstKV.getRowOffset(), firstKV.getRowLength(),ts);
mutations.add(delete);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
} else if (isUpsert) {
Arrays.fill(values, null);
int bucketNumOffset = 0;
if (projectedTable.getBucketNum() != null) {
values[0] = new byte[] { 0 };
bucketNumOffset = 1;
}
int i = bucketNumOffset;
List<PColumn> projectedColumns = projectedTable.getColumns();
for (; i < projectedTable.getPKColumns().size(); i++) {
Expression expression = selectExpressions.get(i - bucketNumOffset);
if (expression.evaluate(result, ptr)) {
values[i] = ptr.copyBytes();
// If SortOrder from expression in SELECT doesn't match the
// column being projected into then invert the bits.
if (expression.getSortOrder() !=
projectedColumns.get(i).getSortOrder()) {
SortOrder.invert(values[i], 0, values[i], 0,
values[i].length);
}
}else{
values[i] = ByteUtil.EMPTY_BYTE_ARRAY;
}
}
projectedTable.newKey(ptr, values);
PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
for (; i < projectedColumns.size(); i++) {
Expression expression = selectExpressions.get(i - bucketNumOffset);
if (expression.evaluate(result, ptr)) {
PColumn column = projectedColumns.get(i);
if (!column.getDataType().isSizeCompatible(ptr, null,
expression.getDataType(), expression.getSortOrder(),
expression.getMaxLength(), expression.getScale(),
column.getMaxLength(), column.getScale())) {
throw new DataExceedsCapacityException(
column.getDataType(), column.getMaxLength(),
column.getScale(), column.getName().getString(), ptr);
}
column.getDataType().coerceBytes(ptr, null,
expression.getDataType(), expression.getMaxLength(),
expression.getScale(), expression.getSortOrder(),
column.getMaxLength(), column.getScale(),
column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
}
for (Mutation mutation : row.toRowMutations()) {
mutations.add(mutation);
}
for (i = 0; i < selectExpressions.size(); i++) {
selectExpressions.get(i).reset();
}
} else if (deleteCF != null && deleteCQ != null) {
// No need to search for delete column, since we project only it
// if no empty key value is being set
if (emptyCF == null ||
result.getValue(deleteCF, deleteCQ) != null) {
Delete delete = new Delete(results.get(0).getRowArray(),
results.get(0).getRowOffset(),
results.get(0).getRowLength());
delete.deleteColumns(deleteCF, deleteCQ, ts);
// force tephra to ignore this deletes
delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
mutations.add(delete);
}
}
if (emptyCF != null) {
/*
* If we've specified an emptyCF, then we need to insert an empty
* key value "retroactively" for any key value that is visible at
* the timestamp that the DDL was issued. Key values that are not
* visible at this timestamp will not ever be projected up to
* scans past this timestamp, so don't need to be considered.
* We insert one empty key value per row per timestamp.
*/
Set<Long> timeStamps =
Sets.newHashSetWithExpectedSize(results.size());
for (Cell kv : results) {
long kvts = kv.getTimestamp();
if (!timeStamps.contains(kvts)) {
Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
}
// Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config
List<List<Mutation>> batchMutationList =
MutationState.getMutationBatchList(batchSize, batchSizeBytes, mutations);
for (List<Mutation> batchMutations : batchMutationList) {
commit(region, batchMutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr,
txState, areMutationInSameRegion, targetHTable);
batchMutations.clear();
}
mutations.clear();
// Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config
List<List<Mutation>> batchIndexMutationList =
MutationState.getMutationBatchList(batchSize, batchSizeBytes, indexMutations);
for (List<Mutation> batchIndexMutations : batchIndexMutationList) {
commitBatch(region, batchIndexMutations, null, blockingMemStoreSize, null, txState);
batchIndexMutations.clear();
}
indexMutations.clear();
}
aggregators.aggregate(rowAggregators, result);
hasAny = true;
}
} while (hasMore);
if (!mutations.isEmpty()) {
commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState,
areMutationInSameRegion, targetHTable);
mutations.clear();
}
if (!indexMutations.isEmpty()) {
commitBatch(region, indexMutations, null, blockingMemStoreSize, indexMaintainersPtr, txState);
indexMutations.clear();
}
}
} finally {
if(needToWrite) {
synchronized (lock) {
scansReferenceCount--;
}
}
if (targetHTable != null) {
targetHTable.close();
}
try {
innerScanner.close();
} finally {
if (acquiredLock) region.closeRegionOperation();
}
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
KeyValue keyValue = null;
if (hadAny) {
byte[] value = aggregators.toBytes(rowAggregators);
keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
}
final KeyValue aggKeyValue = keyValue;
RegionScanner scanner = new BaseRegionScanner(innerScanner) {
private boolean done = !hadAny;
@Override
public boolean isFilterDone() {
return done;
}
@Override
public boolean next(List<Cell> results) throws IOException {
if (done) return false;
done = true;
results.add(aggKeyValue);
return false;
}
@Override
public long getMaxResultSize() {
return scan.getMaxResultSize();
}
};
return scanner;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public int newKey(ImmutableBytesWritable key, byte[][] values) {
int nValues = values.length;
while (nValues > 0 && (values[nValues-1] == null || values[nValues-1].length == 0)) {
nValues--;
}
int i = 0;
TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
try {
Integer bucketNum = this.getBucketNum();
if (bucketNum != null) {
// Write place holder for salt byte
i++;
os.write(QueryConstants.SEPARATOR_BYTE_ARRAY);
}
List<PColumn> columns = getPKColumns();
int nColumns = columns.size();
PDataType type = null;
while (i < nValues && i < nColumns) {
// Separate variable length column values in key with zero byte
if (type != null && !type.isFixedWidth()) {
os.write(SEPARATOR_BYTE);
}
PColumn column = columns.get(i);
type = column.getDataType();
// This will throw if the value is null and the type doesn't allow null
byte[] byteValue = values[i++];
if (byteValue == null) {
byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
}
// An empty byte array return value means null. Do this,
// since a type may have muliple representations of null.
// For example, VARCHAR treats both null and an empty string
// as null. This way we don't need to leak that part of the
// implementation outside of PDataType by checking the value
// here.
if (byteValue.length == 0 && !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
Integer maxLength = column.getMaxLength();
if (maxLength != null && type.isFixedWidth() && byteValue.length <= maxLength) {
byteValue = StringUtil.padChar(byteValue, maxLength);
} else if (maxLength != null && byteValue.length > maxLength) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " bytes (" + SchemaUtil.toString(type, byteValue) + ")");
}
os.write(byteValue, 0, byteValue.length);
}
// If some non null pk values aren't set, then throw
if (i < nColumns) {
PColumn column = columns.get(i);
type = column.getDataType();
if (type.isFixedWidth() || !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
}
byte[] buf = os.getBuffer();
int size = os.size();
if (bucketNum != null) {
buf[0] = SaltingUtil.getSaltingByte(buf, 1, size-1, bucketNum);
}
key.set(buf,0,size);
return i;
} finally {
try {
os.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
}
#location 59
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public int newKey(ImmutableBytesWritable key, byte[][] values) {
int nValues = values.length;
while (nValues > 0 && (values[nValues-1] == null || values[nValues-1].length == 0)) {
nValues--;
}
int i = 0;
TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
try {
Integer bucketNum = this.getBucketNum();
if (bucketNum != null) {
// Write place holder for salt byte
i++;
os.write(QueryConstants.SEPARATOR_BYTE_ARRAY);
}
List<PColumn> columns = getPKColumns();
int nColumns = columns.size();
PDataType type = null;
while (i < nValues && i < nColumns) {
// Separate variable length column values in key with zero byte
if (type != null && !type.isFixedWidth()) {
os.write(SEPARATOR_BYTE);
}
PColumn column = columns.get(i);
type = column.getDataType();
// This will throw if the value is null and the type doesn't allow null
byte[] byteValue = values[i++];
if (byteValue == null) {
byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
}
// An empty byte array return value means null. Do this,
// since a type may have muliple representations of null.
// For example, VARCHAR treats both null and an empty string
// as null. This way we don't need to leak that part of the
// implementation outside of PDataType by checking the value
// here.
if (byteValue.length == 0 && !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
Integer maxLength = column.getMaxLength();
if (maxLength != null && type.isFixedWidth() && byteValue.length <= maxLength) {
byteValue = StringUtil.padChar(byteValue, maxLength);
} else if (maxLength != null && byteValue.length > maxLength) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " bytes (" + SchemaUtil.toString(type, byteValue) + ")");
}
os.write(byteValue, 0, byteValue.length);
}
// If some non null pk values aren't set, then throw
if (i < nColumns) {
PColumn column = columns.get(i);
type = column.getDataType();
if (type.isFixedWidth() || !column.isNullable()) {
throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
}
}
if (nValues == 0) {
throw new ConstraintViolationException("Primary key may not be null ("+ name.getString() + ")");
}
byte[] buf = os.getBuffer();
int size = os.size();
if (bucketNum != null) {
buf[0] = SaltingUtil.getSaltingByte(buf, 1, size-1, bucketNum);
}
key.set(buf,0,size);
return i;
} finally {
try {
os.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public byte[] getViewIndexIdFromIndexRowKey(ImmutableBytesWritable indexRowKeyPtr) {
assert(isLocalIndex);
RowKeySchema indexRowKeySchema = getIndexRowKeySchema();
// TODO add logic to skip region start key as well because we cannot find the region startkey in indexhalfstorefilereader.
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
TrustedByteArrayOutputStream stream =
new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes);
DataOutput output = new DataOutputStream(stream);
try {
int indexPosOffset = (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (isMultiTenant ? 1 : 0) + (viewIndexId == null ? 0 : 1);
Boolean hasValue =
indexRowKeySchema.iterator(indexRowKeyPtr, ptr, indexPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
int length = stream.size();
byte[] dataRowKey = stream.getBuffer();
return dataRowKey.length == length ? dataRowKey : Arrays.copyOf(dataRowKey, length);
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e); // Impossible
}
}
}
#location 18
#vulnerability type NULL_DEREFERENCE | #fixed code
public byte[] getViewIndexIdFromIndexRowKey(ImmutableBytesWritable indexRowKeyPtr) {
assert (isLocalIndex);
ImmutableBytesPtr ptr =
new ImmutableBytesPtr(indexRowKeyPtr.get(),( indexRowKeyPtr.getOffset()
+ (nIndexSaltBuckets > 0 ? 1 : 0)), viewIndexId.length);
return ptr.copyBytesIfNecessary();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell);
KeyValue keyToSeek = kv;
KeyValue splitKeyValue = KeyValueUtil.createKeyValueFromKey(reader.getSplitkey());
if (reader.isTop()) {
if(getComparator().compare(kv, splitKeyValue) < 0){
if(!isSeek && realSeekDone()) {
return true;
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
return seekOrReseekToProperKey(isSeek, keyToSeek);
} else {
if (getComparator().compare(kv, splitKeyValue) >= 0) {
close();
return false;
}
if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) {
keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
}
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
#location 12
#vulnerability type NULL_DEREFERENCE | #fixed code
public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
Cell keyToSeek = cell;
KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey());
if (reader.isTop()) {
if(this.comparator.compare(cell, splitKeyValue, true) < 0){
if(!isSeek && realSeekDone()) {
return true;
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
keyToSeek = getKeyPresentInHFiles(cell);
return seekOrReseekToProperKey(isSeek, keyToSeek);
} else {
if (this.comparator.compare(cell, splitKeyValue, true) >= 0) {
close();
return false;
}
if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) {
keyToSeek = getKeyPresentInHFiles(cell);
}
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void lookup()
{
ClassGroup group = this.getInstructions().getCode().getAttributes().getClassFile().getGroup();
ClassFile otherClass = group.findClass(method.getClassEntry().getName());
if (otherClass == null)
return; // not our class
// when I recompile classes I can see the class of invokevirtuals methods change, get all methods
//List<net.runelite.deob.Method> list = new ArrayList<>();
//findMethodFromClass(new HashSet<>(), list, otherClass);
net.runelite.deob.Method m = otherClass.findMethodDeep(method.getNameAndType());
if (m == null)
{
return;
}
myMethods = Renamer.getVirutalMethods(m);
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public void lookup()
{
myMethods = lookupMethods();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public void run(ClassGroup one, ClassGroup two)
{
eone = new Execution(one);
eone.populateInitialMethods();
List<Method> initial1 = eone.getInitialMethods().stream().sorted((m1, m2) -> m1.getName().compareTo(m2.getName())).collect(Collectors.toList());
eone.run();
etwo = new Execution(two);
etwo.populateInitialMethods();
List<Method> initial2 = etwo.getInitialMethods().stream().sorted((m1, m2) -> m1.getName().compareTo(m2.getName())).collect(Collectors.toList());
etwo.run();
assert initial1.size() == initial2.size();
for (int i = 0; i < initial1.size(); ++i)
{
Method m1 = initial1.get(i), m2 = initial2.get(i);
objMap.put(m1, m2);
}
// process(
// initial1.get(0).getMethod(),
// initial2.get(0).getMethod()
// );
// processed.add(initial1.get(0).getMethod());
process(
one.findClass("class143").findMethod("run"),
two.findClass("class143").findMethod("run")
);
// processed.add(one.findClass("client").findMethod("init"));
// for (;;)
// {
// Optional next = objMap.keySet().stream()
// .filter(m -> !processed.contains(m))
// .findAny();
// if (!next.isPresent())
// break;
//
// Method m = (Method) next.get();
// Method m2 = (Method) objMap.get(m);
//
// System.out.println("Scanning " + m.getName() + " -> " + m2.getName());
// process(m, m2);
// processed.add(m);
// }
for (Entry<Object, Object> e : objMap.entrySet())
{
Method m1 = (Method) e.getKey();
Method m2 = (Method) e.getValue();
System.out.println("FINAL " + m1.getMethods().getClassFile().getName() + "." + m1.getName() + " -> " + m2.getMethods().getClassFile().getName() + "." + m2.getName());
}
System.out.println("done");
}
#location 28
#vulnerability type NULL_DEREFERENCE | #fixed code
public void run(ClassGroup one, ClassGroup two)
{
eone = new Execution(one);
eone.populateInitialMethods();
List<Method> initial1 = eone.getInitialMethods().stream().sorted((m1, m2) -> m1.getName().compareTo(m2.getName())).collect(Collectors.toList());
eone.run();
etwo = new Execution(two);
etwo.populateInitialMethods();
List<Method> initial2 = etwo.getInitialMethods().stream().sorted((m1, m2) -> m1.getName().compareTo(m2.getName())).collect(Collectors.toList());
etwo.run();
assert initial1.size() == initial2.size();
for (int i = 0; i < initial1.size(); ++i)
{
Method m1 = initial1.get(i), m2 = initial2.get(i);
assert m1.getName().equals(m2.getName());
objMap.put(m1, m2);
}
// process(
// initial1.get(0).getMethod(),
// initial2.get(0).getMethod()
// );
// processed.add(initial1.get(0).getMethod());
// process(
// one.findClass("class143").findMethod("run"),
// two.findClass("class143").findMethod("run")
// );
// processed.add(one.findClass("client").findMethod("init"));
for (;;)
{
Optional next = objMap.keySet().stream()
.filter(m -> !processed.contains(m))
.findAny();
if (!next.isPresent())
break;
Method m = (Method) next.get();
Method m2 = (Method) objMap.get(m);
System.out.println("Scanning " + m.getName() + " -> " + m2.getName());
process(m, m2);
processed.add(m);
}
for (Entry<Object, Object> e : objMap.entrySet())
{
Method m1 = (Method) e.getKey();
Method m2 = (Method) e.getValue();
System.out.println("FINAL " + m1.getMethods().getClassFile().getName() + "." + m1.getName() + " -> " + m2.getMethods().getClassFile().getName() + "." + m2.getName());
}
System.out.println("done count " + objMap.size());
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void renameField(net.runelite.deob.Field f, Field newField)
{
Class clazz = field.getClassEntry();
NameAndType nat = field.getNameAndType();
ClassFile cf = this.getInstructions().getCode().getAttributes().getClassFile().getGroup().findClass(clazz.getName());
if (cf == null)
return;
net.runelite.deob.Field f2 = cf.findFieldDeep(nat);
assert f2 != null;
if (f2 == f)
field = newField;
}
#location 7
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public void renameField(net.runelite.deob.Field f, Field newField)
{
net.runelite.deob.Field f2 = getMyField();
if (f2 == f)
field = newField;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void test1() throws IOException
{
File file = folder.newFile();
Store store = new Store();
DataFile df = new DataFile(store, 42, file);
int sector = df.write(3, ByteBuffer.wrap("test".getBytes()));
ByteBuffer buf = df.read(3, sector, 4);
String str = new String(buf.array());
Assert.assertEquals("test", str);
file.delete();
}
#location 8
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void test1() throws IOException
{
File file = folder.newFile();
Store store = new Store(folder.getRoot());
DataFile df = new DataFile(store, 42, file);
int sector = df.write(3, ByteBuffer.wrap("test".getBytes()));
ByteBuffer buf = df.read(3, sector, 4);
String str = new String(buf.array());
Assert.assertEquals("test", str);
file.delete();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void buildInstructionGraph()
{
Class clazz = field.getClassEntry();
NameAndType nat = field.getNameAndType();
ClassFile cf = this.getInstructions().getCode().getAttributes().getClassFile().getGroup().findClass(clazz.getName());
if (cf == null)
return;
net.runelite.deob.Field f = cf.findFieldDeep(nat);
assert f != null;
f.addReference(this);
}
#location 7
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public void buildInstructionGraph()
{
net.runelite.deob.Field f = getMyField();
if (f != null)
f.addReference(this);
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public void run(ClassGroup one, ClassGroup two)
{
eone = new Execution(one);
eone.populateInitialMethods();
eone.run();
etwo = new Execution(two);
etwo.populateInitialMethods();
etwo.run();
process(
one.findClass("client").findMethod("init"),
two.findClass("client").findMethod("init")
);
System.out.println("done");
}
#location 12
#vulnerability type NULL_DEREFERENCE | #fixed code
public void run(ClassGroup one, ClassGroup two)
{
eone = new Execution(one);
eone.populateInitialMethods();
List<Method> initial1 = eone.getInitialMethods().stream().sorted((m1, m2) -> m1.getName().compareTo(m2.getName())).collect(Collectors.toList());
eone.run();
etwo = new Execution(two);
etwo.populateInitialMethods();
List<Method> initial2 = etwo.getInitialMethods().stream().sorted((m1, m2) -> m1.getName().compareTo(m2.getName())).collect(Collectors.toList());
etwo.run();
assert initial1.size() == initial2.size();
for (int i = 0; i < initial1.size(); ++i)
{
Method m1 = initial1.get(i), m2 = initial2.get(i);
objMap.put(m1, m2);
}
// process(
// initial1.get(0).getMethod(),
// initial2.get(0).getMethod()
// );
// processed.add(initial1.get(0).getMethod());
process(
one.findClass("class143").findMethod("run"),
two.findClass("class143").findMethod("run")
);
// processed.add(one.findClass("client").findMethod("init"));
// for (;;)
// {
// Optional next = objMap.keySet().stream()
// .filter(m -> !processed.contains(m))
// .findAny();
// if (!next.isPresent())
// break;
//
// Method m = (Method) next.get();
// Method m2 = (Method) objMap.get(m);
//
// System.out.println("Scanning " + m.getName() + " -> " + m2.getName());
// process(m, m2);
// processed.add(m);
// }
for (Entry<Object, Object> e : objMap.entrySet())
{
Method m1 = (Method) e.getKey();
Method m2 = (Method) e.getValue();
System.out.println("FINAL " + m1.getMethods().getClassFile().getName() + "." + m1.getName() + " -> " + m2.getMethods().getClassFile().getName() + "." + m2.getName());
}
System.out.println("done");
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public void renameField(net.runelite.deob.Field f, Field newField)
{
Class clazz = field.getClassEntry();
NameAndType nat = field.getNameAndType();
ClassFile cf = this.getInstructions().getCode().getAttributes().getClassFile().getGroup().findClass(clazz.getName());
if (cf == null)
return;
net.runelite.deob.Field f2 = cf.findFieldDeep(nat);
assert f2 != null;
if (f2 == f)
{
field = newField;
}
}
#location 7
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public void renameField(net.runelite.deob.Field f, Field newField)
{
net.runelite.deob.Field f2 = getMyField();
if (f2 == f)
{
field = newField;
}
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Test
public void test2() throws IOException
{
byte[] b = new byte[1024];
for (int i = 0; i < 1024; ++i) b[i] = (byte) i;
File file = folder.newFile();
Store store = new Store(folder.getRoot());
DataFile df = new DataFile(store, file);
int sector = df.write(42, 0x1FFFF, ByteBuffer.wrap(b));
byte[] buf = df.read(42, 0x1FFFF, sector, b.length);
Assert.assertArrayEquals(b, buf);
file.delete();
}
#location 11
#vulnerability type RESOURCE_LEAK | #fixed code
@Test
public void test2() throws IOException
{
byte[] b = new byte[1024];
for (int i = 0; i < 1024; ++i)
b[i] = (byte) i;
File file = folder.newFile();
Store store = new Store(folder.getRoot());
DataFile df = new DataFile(store, file);
DataFileWriteResult res = df.write(42, 0x1FFFF, ByteBuffer.wrap(b), 0, 0);
DataFileReadResult res2 = df.read(42, 0x1FFFF, res.sector, res.compressedLength);
byte[] buf = res2.data;
Assert.assertArrayEquals(b, buf);
file.delete();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
public NameMappings run(ClassGroup one, ClassGroup two)
{
Execution eone = new Execution(one);
eone.setBuildGraph(true);
eone.populateInitialMethods();
eone.run();
Execution etwo = new Execution(two);
etwo.setBuildGraph(true);
etwo.populateInitialMethods();
etwo.run();
g1 = eone.getGraph();
g2 = etwo.getGraph();
System.out.println(eone.getGraph());
System.out.println(etwo.getGraph());
for (int i = 0; i < 250; ++i)
//for (int i = 0; i < Math.min(one.getClasses().size(), two.getClasses().size()); ++i)
{
ClassFile c1 = one.findClass("class" + i);
ClassFile c2 = two.findClass("class" + i);
if (c1 == null || c2 == null)
continue;
//Map m1 = this.find(c1);
//Map m2 = this.find(c2);
// mapClassMethods(m1, m2);
mapDeobfuscatedMethods(c1, c2);
}
ClassFile cf1 = one.findClass("client"), cf2 = two.findClass("client");
mapDeobfuscatedMethods(cf1, cf2);
//List<Field> fl1 = getClientFields(one, eone);
//List<Field> fl2 = getClientFields(two, etwo);
// for (int i = 0; i < Math.min(fl1.size(), fl2.size()); ++i)
// {
// Field f1 = fl1.get(i), f2 = fl2.get(i);
//
// Vertex v1 = g1.getVertexFor(f1);
// Vertex v2 = g2.getVertexFor(f2);
//
// v1.is(v2);
// v2.is(v1);
//
// System.out.println(fname(f1) + " is " + fname(f2));
// }
System.out.println("g1 verticies " + g1.getVerticies().size() + " reachable " + g1.reachableVerticiesFromSolvedVerticies().size());
Set<Vertex> reachable = g1.reachableVerticiesFromSolvedVerticies();
for (Vertex v : g1.getVerticies())
if (!reachable.contains(v))
{
System.out.println("unreachable " + v);
}
for (;;)
{
int before = g1.solved(null);
System.out.println("Before " + before);
solve();
g1.getVerticies().forEach(v -> v.finish());
//g2
int after = g1.solved(null);
System.out.println("After " + after);
if (before == after)
break;
}
g1.check();
g2.check();
System.out.println("methods " +g1.solved(VertexType.METHOD));
System.out.println("f " +g1.solved(VertexType.FIELD));
Vertex stored = null;
for (Vertex v : g1.getVerticies())
{
if (v.getOther() == null)
continue;
if (!v.toString().equals("Vertex{object=class0.<init>()V}"))
continue;
assert stored == null;
stored = v;
for (Edge e : v.getEdges())
{
if (e.getTo().getOther() == null)
{
System.out.println("Edge " + e + " on vertex " + v + " is unsolved");
}
}
}
// NameMappings col = buildCollisionMap(one, two);
// rename(col, two);
//
// NameMappings mappings = buildMappings(one, two); // two -> one
//
// show(mappings);
System.out.println("Solved methods "+ g1.solved(VertexType.METHOD) + ", total " + g1.getVerticies().size());
//rename(mappings, two);
try
{
JarUtil.saveJar(two, new File("/Users/adam/w/rs/07/adamout.jar"));
}
catch (IOException ex)
{
Logger.getLogger(Rename2.class.getName()).log(Level.SEVERE, null, ex);
}
return null;
}
#location 37
#vulnerability type NULL_DEREFERENCE | #fixed code
public NameMappings run(ClassGroup one, ClassGroup two)
{
Execution eone = new Execution(one);
eone.setBuildGraph(true);
eone.populateInitialMethods();
eone.run();
Execution etwo = new Execution(two);
etwo.setBuildGraph(true);
etwo.populateInitialMethods();
etwo.run();
g1 = eone.getGraph();
g2 = etwo.getGraph();
System.out.println(eone.getGraph());
System.out.println(etwo.getGraph());
for (int i = 0; i < 250; ++i)
{
ClassFile c1 = one.findClass("class" + i);
ClassFile c2 = two.findClass("class" + i);
if (c1 == null || c2 == null)
continue;
//Map m1 = this.find(c1);
//Map m2 = this.find(c2);
// mapClassMethods(m1, m2);
mapDeobfuscatedMethods(c1, c2);
}
ClassFile cf1 = one.findClass("client"), cf2 = two.findClass("client");
mapDeobfuscatedMethods(cf1, cf2);
// List<Field> fl1 = getClientFields(one, eone);
// List<Field> fl2 = getClientFields(two, etwo);
//
// for (int i = 0; i < Math.min(fl1.size(), fl2.size()); ++i)
// {
// Field f1 = fl1.get(i), f2 = fl2.get(i);
//
// Vertex v1 = g1.getVertexFor(f1);
// Vertex v2 = g2.getVertexFor(f2);
//
// v1.is(v2);
// v2.is(v1);
//
// System.out.println(fname(f1) + " is " + fname(f2));
// }
System.out.println("g1 verticies " + g1.getVerticies().size() + " reachable " + g1.reachableVerticiesFromSolvedVerticies().size());
Set<Vertex> reachable = g1.reachableVerticiesFromSolvedVerticies();
for (Vertex v : g1.getVerticies())
if (!reachable.contains(v))
{
System.out.println("unreachable " + v);
}
for (;;)
{
int before = g1.solved(null);
System.out.println("Before " + before);
solve();
g1.getVerticies().forEach(v -> v.finish());
//g2
int after = g1.solved(null);
System.out.println("After " + after);
if (before == after)
break;
}
g1.check();
g2.check();
System.out.println("methods " +g1.solved(VertexType.METHOD));
System.out.println("f " +g1.solved(VertexType.FIELD));
Vertex stored = null;
for (Vertex v : g1.getVerticies())
{
if (v.getOther() == null)
continue;
if (!v.toString().equals("Vertex{object=class0.<init>()V}"))
continue;
assert stored == null;
stored = v;
for (Edge e : v.getEdges())
{
if (e.getTo().getOther() == null)
{
System.out.println("Edge " + e + " on vertex " + v + " is unsolved");
}
}
}
// NameMappings col = buildCollisionMap(one, two);
// rename(col, two);
//
// NameMappings mappings = buildMappings(one, two); // two -> one
//
// show(mappings);
System.out.println("Solved methods "+ g1.solved(VertexType.METHOD) + ", solved fields " + g1.solved(VertexType.FIELD) + ", total " + g1.getVerticies().size());
//rename(mappings, two);
try
{
JarUtil.saveJar(two, new File("/Users/adam/w/rs/07/adamout.jar"));
}
catch (IOException ex)
{
Logger.getLogger(Rename2.class.getName()).log(Level.SEVERE, null, ex);
}
return null;
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
@Override
public List<net.runelite.deob.Method> getMethods()
{
ClassGroup group = this.getInstructions().getCode().getAttributes().getClassFile().getGroup();
ClassFile otherClass = group.findClass(method.getClassEntry().getName());
if (otherClass == null)
return new ArrayList<>(); // not our class
// look up this method in this class and anything that inherits from it
List<net.runelite.deob.Method> list = new ArrayList<>();
findMethodFromClass(list, otherClass);
return list;
}
#location 4
#vulnerability type NULL_DEREFERENCE | #fixed code
@Override
public List<net.runelite.deob.Method> getMethods()
{
return myMethods != null ? myMethods : Arrays.asList();
} | Below is the vulnerable code, please generate the patch based on the following information. |
#vulnerable code
private ParallelExecutorMapping mapMethods(ClassGroup one, ClassGroup two)
{
MethodSignatureMapper msm = new MethodSignatureMapper();
msm.map(one, two);
List<ParallelExecutorMapping> pmes = new ArrayList<>();
for (Method m : msm.getMap().keySet())
{
Collection<Method> methods = msm.getMap().get(m);
ExecutionMapper em = new ExecutionMapper(m, methods);
ParallelExecutorMapping mapping = em.run();
mapping.map(mapping.m1, mapping.m2);
pmes.add(mapping);
}
ParallelExecutorMapping finalm = new ParallelExecutorMapping(one, two);
for (ParallelExecutorMapping pme : pmes)
finalm.merge(pme);
return finalm;
}
#location 15
#vulnerability type NULL_DEREFERENCE | #fixed code
private ParallelExecutorMapping mapMethods(ClassGroup one, ClassGroup two)
{
MethodSignatureMapper msm = new MethodSignatureMapper();
msm.map(one, two);
List<ParallelExecutorMapping> pmes = new ArrayList<>();
for (Method m : msm.getMap().keySet())
{
Collection<Method> methods = msm.getMap().get(m);
ExecutionMapper em = new ExecutionMapper(m, methods);
ParallelExecutorMapping mapping = em.run();
if (mapping == null)
continue;
mapping.map(mapping.m1, mapping.m2);
pmes.add(mapping);
}
ParallelExecutorMapping finalm = new ParallelExecutorMapping(one, two);
for (ParallelExecutorMapping pme : pmes)
finalm.merge(pme);
return finalm;
} | Below is the vulnerable code, please generate the patch based on the following information. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.