input
stringlengths 205
73.3k
| output
stringlengths 64
73.2k
| instruction
stringclasses 1
value |
---|---|---|
#vulnerable code
@Test
public void test() {
JedisPoolConfig config = new JedisPoolConfig();
// 设置空间连接
config.setMaxIdle(20);
config.setMaxWaitMillis(1000);
JedisPool pool = new JedisPool(config, "27.126.180.210", 6379);
System.out.println(pool.getResource());
Jedis jedis = pool.getResource();
jedis.set("name", "陈杰");
System.out.println(jedis.get("name"));
}
#location 12
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Test
public void test() {
JedisPoolConfig config = new JedisPoolConfig();
// 设置空间连接
config.setMaxIdle(20);
config.setMaxWaitMillis(1000);
// JedisPool pool = new JedisPool(config, "27.126.180.210", 6379);
// System.out.println(pool.getResource());
// Jedis jedis = pool.getResource();
// jedis.set("name", "陈杰");
// System.out.println(jedis.get("name"));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public File saveAs(String path) throws IOException, InterruptedException {
Runtime rt = Runtime.getRuntime();
String command = this.commandWithParameters() + Symbol.separator + path;
Process proc = rt.exec(command);
if(htmlFromString) {
OutputStream stdin = proc.getOutputStream();
stdin.write(htmlInput.getBytes());
stdin.close();
}
proc.waitFor();
if(proc.exitValue() != 0) {
throw new RuntimeException("Process (" + command + ") exited with status code " + proc.exitValue());
}
return new File(path);
}
#location 16
#vulnerability type RESOURCE_LEAK
|
#fixed code
public File saveAs(String path) throws IOException, InterruptedException {
byte[] pdf = this.getPDF();
File file = new File(path);
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(new FileOutputStream(file));
bufferedOutputStream.write(pdf);
bufferedOutputStream.flush();
bufferedOutputStream.close();
return file;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public byte[] getPDF() throws IOException, InterruptedException {
Runtime runtime = Runtime.getRuntime();
if(htmlFromString && !this.params.contains(new Param("-"))) {
this.addParam(new Param("-"));
}
String command = this.commandWithParameters() + Symbol.separator + "-";
Process process = runtime.exec(command);
if(htmlFromString) {
OutputStream stdInStream = process.getOutputStream();
stdInStream.write(htmlInput.getBytes());
stdInStream.close();
}
InputStream stdOutStream = process.getInputStream();
InputStream stdErrStream = process.getErrorStream();
process.waitFor();
ByteArrayOutputStream stdOut = new ByteArrayOutputStream();
ByteArrayOutputStream stdErr = new ByteArrayOutputStream();
for(int i = 0; i < stdOutStream.available(); i++) {
stdOut.write((char) stdOutStream.read());
}
stdOutStream.close();
for(int i = 0; i < stdErrStream.available(); i++) {
stdErr.write((char) stdErrStream.read());
}
stdErrStream.close();
if(process.exitValue() != 0) {
throw new RuntimeException("Process (" + command + ") exited with status code " + process.exitValue() + ":\n"+new String(stdErr.toByteArray()));
}
return stdOut.toByteArray();
}
#location 33
#vulnerability type RESOURCE_LEAK
|
#fixed code
public byte[] getPDF() throws IOException, InterruptedException {
return getPDF(STDOUT);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public File saveAs(String path) throws IOException, InterruptedException {
Runtime rt = Runtime.getRuntime();
String command = this.commandWithParameters() + Symbol.separator + path;
Process proc = rt.exec(command);
if(htmlFromString) {
OutputStream stdin = proc.getOutputStream();
stdin.write(htmlInput.getBytes());
stdin.close();
}
proc.waitFor();
if(proc.exitValue() != 0) {
throw new RuntimeException("Process (" + command + ") exited with status code " + proc.exitValue());
}
return new File(path);
}
#location 13
#vulnerability type RESOURCE_LEAK
|
#fixed code
public File saveAs(String path) throws IOException, InterruptedException {
byte[] pdf = this.getPDF();
File file = new File(path);
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(new FileOutputStream(file));
bufferedOutputStream.write(pdf);
bufferedOutputStream.flush();
bufferedOutputStream.close();
return file;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public byte[] getPDF() throws IOException, InterruptedException {
Runtime runtime = Runtime.getRuntime();
if(htmlFromString && !this.params.contains(new Param("-"))) {
this.addParam(new Param("-"));
}
String command = this.commandWithParameters() + Symbol.separator + "-";
Process process = runtime.exec(command);
if(htmlFromString) {
OutputStream stdInStream = process.getOutputStream();
stdInStream.write(htmlInput.getBytes());
stdInStream.close();
}
InputStream stdOutStream = process.getInputStream();
InputStream stdErrStream = process.getErrorStream();
process.waitFor();
ByteArrayOutputStream stdOut = new ByteArrayOutputStream();
ByteArrayOutputStream stdErr = new ByteArrayOutputStream();
for(int i = 0; i < stdOutStream.available(); i++) {
stdOut.write((char) stdOutStream.read());
}
stdOutStream.close();
for(int i = 0; i < stdErrStream.available(); i++) {
stdErr.write((char) stdErrStream.read());
}
stdErrStream.close();
if(process.exitValue() != 0) {
throw new RuntimeException("Process (" + command + ") exited with status code " + process.exitValue() + ":\n"+new String(stdErr.toByteArray()));
}
return stdOut.toByteArray();
}
#location 30
#vulnerability type RESOURCE_LEAK
|
#fixed code
public byte[] getPDF() throws IOException, InterruptedException {
return getPDF(STDOUT);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public byte[] getPDF() throws IOException, InterruptedException {
Runtime runtime = Runtime.getRuntime();
Process process = runtime.exec(getCommandAsArray());
for (Page page : pages) {
if (page.getType().equals(PageType.htmlAsString)) {
OutputStream stdInStream = process.getOutputStream();
stdInStream.write(page.getSource().getBytes("UTF-8"));
stdInStream.close();
}
}
StreamEater outputStreamEater = new StreamEater(process.getInputStream());
outputStreamEater.start();
StreamEater errorStreamEater = new StreamEater(process.getErrorStream());
errorStreamEater.start();
outputStreamEater.join();
errorStreamEater.join();
process.waitFor();
if (process.exitValue() != 0) {
throw new RuntimeException("Process (" + getCommand() + ") exited with status code " + process.exitValue() + ":\n" + new String(errorStreamEater.getBytes()));
}
if (outputStreamEater.getError() != null) {
throw outputStreamEater.getError();
}
if (errorStreamEater.getError() != null) {
throw errorStreamEater.getError();
}
return outputStreamEater.getBytes();
}
#location 35
#vulnerability type RESOURCE_LEAK
|
#fixed code
public byte[] getPDF() throws IOException, InterruptedException {
Runtime runtime = Runtime.getRuntime();
Process process = runtime.exec(getCommandAsArray());
StreamEater outputStreamEater = new StreamEater(process.getInputStream());
outputStreamEater.start();
StreamEater errorStreamEater = new StreamEater(process.getErrorStream());
errorStreamEater.start();
outputStreamEater.join();
errorStreamEater.join();
process.waitFor();
if (process.exitValue() != 0) {
throw new RuntimeException("Process (" + getCommand() + ") exited with status code " + process.exitValue() + ":\n" + new String(errorStreamEater.getBytes()));
}
if (outputStreamEater.getError() != null) {
throw outputStreamEater.getError();
}
if (errorStreamEater.getError() != null) {
throw errorStreamEater.getError();
}
return outputStreamEater.getBytes();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public File saveAs(String path) throws IOException, InterruptedException {
Runtime rt = Runtime.getRuntime();
String command = this.commandWithParameters() + Symbol.separator + path;
Process proc = rt.exec(command);
if(htmlFromString) {
OutputStream stdin = proc.getOutputStream();
stdin.write(htmlInput.getBytes());
stdin.close();
}
proc.waitFor();
if(proc.exitValue() != 0) {
throw new RuntimeException("Process (" + command + ") exited with status code " + proc.exitValue());
}
return new File(path);
}
#location 16
#vulnerability type RESOURCE_LEAK
|
#fixed code
public File saveAs(String path) throws IOException, InterruptedException {
byte[] pdf = this.getPDF();
File file = new File(path);
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(new FileOutputStream(file));
bufferedOutputStream.write(pdf);
bufferedOutputStream.flush();
bufferedOutputStream.close();
return file;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testFieldPopulator()
{
Album funeral = DataGenerator.funeral();
ResultTraverser traverser = new ResultTraverser();
populatorRegistry.register( new AlbumFieldPopulator() );
YogaRequestContext requestContext = new YogaRequestContext( "test",
new DummyHttpServletRequest(), new DummyHttpServletResponse(), new HrefListener( this.populatorRegistry ) );
Map<String, Object> objectTree = doTraverse( funeral, ":", traverser, requestContext );
Assert.assertEquals( "/album/" + funeral.getId() + ".test", objectTree.get( "href" ) );
}
#location 12
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testFieldPopulator()
{
Album funeral = DataGenerator.funeral();
ResultTraverser traverser = new ResultTraverser();
populatorRegistry.register( new AlbumFieldPopulator() );
YogaRequestContext requestContext = new YogaRequestContext( "test", new GDataSelectorParser(),
new DummyHttpServletRequest(), new DummyHttpServletResponse(), new HrefListener( this.populatorRegistry ) );
Map<String, Object> objectTree = doTraverse( funeral, ":", traverser, requestContext );
Assert.assertEquals( "/album/" + funeral.getId() + ".test", objectTree.get( "href" ) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testSelectUnsupportedField()
{
Album chasingProphecy = DataGenerator.chasingProphecy();
ResultTraverser traverser = new ResultTraverser();
populatorRegistry.register( new AlbumFieldPopulator() );
Map<String,Object> objectTree = doTraverse( chasingProphecy, ":(id,title,year,artist)", traverser );
Assert.assertEquals( 3, objectTree.size() );
Assert.assertEquals( chasingProphecy.getId(), objectTree.get( "id" ) );
Assert.assertEquals( chasingProphecy.getTitle(), objectTree.get( "title" ) );
Map<String,Object> eighthDay = (Map<String, Object>) objectTree.get( "artist" );
Assert.assertEquals( DataGenerator.eigthDay().getName(), eighthDay.get( "name" ) );
}
#location 9
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testSelectUnsupportedField()
{
Album chasingProphecy = DataGenerator.chasingProphecy();
ResultTraverser traverser = new ResultTraverser();
populatorRegistry.register( new AlbumFieldPopulator() );
Map<String,Object> objectTree = doTraverse( chasingProphecy, "id,title,year,artist", traverser );
Assert.assertEquals( 3, objectTree.size() );
Assert.assertEquals( chasingProphecy.getId(), objectTree.get( "id" ) );
Assert.assertEquals( chasingProphecy.getTitle(), objectTree.get( "title" ) );
Map<String,Object> eighthDay = (Map<String, Object>) objectTree.get( "artist" );
Assert.assertEquals( DataGenerator.eigthDay().getName(), eighthDay.get( "name" ) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testComplexCoreFields()
{
User carter = DataGenerator.carter();
carter.getFavoriteArtists().add( DataGenerator.neutralMilkHotel() );
carter.getFavoriteArtists().add( DataGenerator.arcadeFire() );
ResultTraverser traverser = new ResultTraverser();
MapSelector selector = new MapSelector();
selector.register( User.class, "id", "favoriteArtists" );
Map<String, Object> objectTree = doTraverse( carter, traverser, _simpleContext, new CompositeSelector( selector, new CoreSelector() ) );
Assert.assertTrue( objectTree.size() >= 2 );
Assert.assertEquals( carter.getId(), objectTree.get( "id" ) );
Assert.assertEquals( carter.getName(), objectTree.get( "name" ) );
List<Map<String, Object>> favoriteArtists = getList( objectTree, "favoriteArtists" );
Assert.assertNotNull( favoriteArtists );
Assert.assertEquals( 2, favoriteArtists.size() );
Map<String, Object> neutralMap = findItem( favoriteArtists, "name", "Neutral Milk Hotel" );
Assert.assertEquals( DataGenerator.neutralMilkHotel().getId(), neutralMap.get( "id" ) );
Map<String, Object> arcadeMap = findItem( favoriteArtists, "name", "Arcade Fire" );
Assert.assertEquals( DataGenerator.arcadeFire().getId(), arcadeMap.get( "id" ) );
}
#location 20
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testComplexCoreFields()
{
User carter = DataGenerator.carter();
carter.getFavoriteArtists().add( DataGenerator.neutralMilkHotel() );
carter.getFavoriteArtists().add( DataGenerator.arcadeFire() );
ResultTraverser traverser = new ResultTraverser();
traverser.getFieldPopulatorRegistry().register( new UserFieldPopulatorWithArtistCoreField() );
Map<String, Object> objectTree = doTraverse( carter, ":", traverser, _simpleContext );
Assert.assertEquals( 3, objectTree.size() );
Assert.assertEquals( carter.getId(), objectTree.get( "id" ) );
Assert.assertEquals( carter.getName(), objectTree.get( "name" ) );
List<Map<String, Object>> favoriteArtists = getList( objectTree, "favoriteArtists" );
Assert.assertNotNull( favoriteArtists );
Assert.assertEquals( 2, favoriteArtists.size() );
Map<String, Object> neutralMap = findItem( favoriteArtists, "name", "Neutral Milk Hotel" );
Assert.assertEquals( DataGenerator.neutralMilkHotel().getId(), neutralMap.get( "id" ) );
Map<String, Object> arcadeMap = findItem( favoriteArtists, "name", "Arcade Fire" );
Assert.assertEquals( DataGenerator.arcadeFire().getId(), arcadeMap.get( "id" ) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@SuppressWarnings("unchecked")
@Test
// Add the MetadataLinkListener to the listener chain. The output will render an href to view the metadata
// for the album object.
public void testMetadataHref()
{
String prefixUrl = "/metadata/";
String fileExtension = "test";
Album signOfTheTimes = DataGenerator.signOfTheTimes();
DefaultMetaDataRegistry service = new DefaultMetaDataRegistry();
service.setRootMetaDataUrl( prefixUrl );
service.setCoreSelector( new CoreSelector( populatorRegistry ) );
Map<String,Class<?>> typeMappings = new HashMap<String, Class<?>>();
typeMappings.put( "album", Album.class );
service.setTypeMappings( typeMappings );
MetadataLinkListener metadataLinkListener = new MetadataLinkListener();
metadataLinkListener.setMetaDataRegistry( service );
ResultTraverser traverser = new ResultTraverser();
YogaRequestContext requestContext = new YogaRequestContext( fileExtension,
new DummyHttpServletRequest(), new DummyHttpServletResponse(), metadataLinkListener );
Map<String, Object> objectTree = doTraverse( signOfTheTimes, ":", traverser, requestContext );
Map<String,String> metadataMap = (Map<String,String>) objectTree.get( "metadata" );
String metadataHref = prefixUrl + "album." + fileExtension;
Assert.assertEquals( metadataHref, metadataMap.get( "href" ) );
}
#location 27
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@SuppressWarnings("unchecked")
@Test
// Add the MetadataLinkListener to the listener chain. The output will render an href to view the metadata
// for the album object.
public void testMetadataHref()
{
String prefixUrl = "/metadata/";
String fileExtension = "test";
Album signOfTheTimes = DataGenerator.signOfTheTimes();
DefaultMetaDataRegistry service = new DefaultMetaDataRegistry();
service.setRootMetaDataUrl( prefixUrl );
service.setCoreSelector( new CoreSelector( populatorRegistry ) );
Map<String,Class<?>> typeMappings = new HashMap<String, Class<?>>();
typeMappings.put( "album", Album.class );
service.setTypeMappings( typeMappings );
MetadataLinkListener metadataLinkListener = new MetadataLinkListener();
metadataLinkListener.setMetaDataRegistry( service );
ResultTraverser traverser = new ResultTraverser();
YogaRequestContext requestContext = new YogaRequestContext( fileExtension, new GDataSelectorParser(),
new DummyHttpServletRequest(), new DummyHttpServletResponse(), metadataLinkListener );
Map<String, Object> objectTree = doTraverse( signOfTheTimes, "", traverser, requestContext );
Map<String,String> metadataMap = (Map<String,String>) objectTree.get( "metadata" );
String metadataHref = prefixUrl + "album." + fileExtension;
Assert.assertEquals( metadataHref, metadataMap.get( "href" ) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void init() throws IOException
{
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream( "sampledb.sql" );
BufferedReader reader = new BufferedReader( new InputStreamReader( is ) );
String line = null;
while ( ( line = reader.readLine() ) != null )
{
String type = line.replaceFirst( "INSERT INTO ([^(]+).*", "$1" );
String[] values = line.replaceFirst( ".*VALUES\\((.*)\\)", "$1" ).split( ", " );
if ("User".equalsIgnoreCase( type ))
{
newUser( toLong( values[ 0 ] ), toStr( values[ 1 ] ) );
}
else if ("Friend".equalsIgnoreCase( type ))
{
newFriend( toLong( values[ 0 ] ), toLong( values[ 1 ] ) );
}
else if ("Artist".equalsIgnoreCase( type ))
{
newArtist( toLong( values[ 0 ] ), toStr( values[ 1 ] ) );
}
else if ("Fan".equalsIgnoreCase( type ))
{
newFan( toLong( values[ 0 ] ), toLong( values[ 1 ] ) );
}
else if ("Album".equalsIgnoreCase( type ))
{
newAlbum( toLong( values[ 0 ] ), toStr( values[ 1 ] ), toLong( values[ 2 ] ), new Integer( values[ 3 ].trim() ) );
}
else if ("Song".equalsIgnoreCase( type ))
{
newSong( toLong( values[ 0 ] ), toStr( values[ 1 ] ), toLong( values[ 2 ] ), toLong( values[ 3 ] ) );
}
}
is.close();
}
#location 13
#vulnerability type RESOURCE_LEAK
|
#fixed code
public void init()
{
try
{
init( Thread.currentThread().getContextClassLoader().getResourceAsStream( "sampledb.sql" ) );
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
// this doesn't seem to work yet. It reads a partial line and throws up
// try
// {
// init( new GZIPInputStream( new URL( remoteData ).openStream() ) );
// }
// catch ( Exception e )
// {
// e.printStackTrace();
// }
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testAnnotatedModel()
{
User solomon = DataGenerator.solomon();
ResultTraverser traverser = new ResultTraverser();
YogaRequestContext requestContext = new YogaRequestContext( "test",
new DummyHttpServletRequest(), new DummyHttpServletResponse(), new HrefListener() );
Map<String, Object> objectTree = doTraverse( solomon, ":", traverser, requestContext );
Assert.assertEquals( "/user/" + solomon.getId() + ".test", objectTree.get( "href" ) );
}
#location 11
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testAnnotatedModel()
{
User solomon = DataGenerator.solomon();
ResultTraverser traverser = new ResultTraverser();
YogaRequestContext requestContext = new YogaRequestContext( "test", new GDataSelectorParser(),
new DummyHttpServletRequest(), new DummyHttpServletResponse(), new HrefListener() );
Map<String, Object> objectTree = doTraverse( solomon, ":", traverser, requestContext );
Assert.assertEquals( "/user/" + solomon.getId() + ".test", objectTree.get( "href" ) );
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testView() throws IOException {
PosixFileAttributeView view = service.getFileAttributeView(
fileSupplier(), PosixFileAttributeView.class);
assertNotNull(view);
ASSERT.that(view.name()).is("posix");
ASSERT.that(view.getOwner()).is(createUserPrincipal("user"));
PosixFileAttributes attrs = view.readAttributes();
ASSERT.that(attrs.fileKey()).is(0L);
ASSERT.that(attrs.owner()).is(createUserPrincipal("user"));
ASSERT.that(attrs.group()).is(createGroupPrincipal("group"));
ASSERT.that(attrs.permissions()).is(PosixFilePermissions.fromString("rw-r--r--"));
FileTime time = FileTime.fromMillis(0L);
view.setTimes(time, time, time);
assertContainsAll(file, ImmutableMap.<String, Object>of(
"posix:creationTime", time, "posix:lastAccessTime", time, "posix:lastModifiedTime", time));
view.setOwner(createUserPrincipal("root"));
ASSERT.that(view.getOwner()).is(createUserPrincipal("root"));
ASSERT.that(file.getAttribute("owner:owner")).is(createUserPrincipal("root"));
view.setGroup(createGroupPrincipal("root"));
ASSERT.that(view.readAttributes().group()).is(createGroupPrincipal("root"));
ASSERT.that(file.getAttribute("posix:group")).is(createGroupPrincipal("root"));
view.setPermissions(PosixFilePermissions.fromString("rwx------"));
ASSERT.that(view.readAttributes().permissions())
.is(PosixFilePermissions.fromString("rwx------"));
ASSERT.that(file.getAttribute("posix:permissions"))
.is(PosixFilePermissions.fromString("rwx------"));
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testView() throws IOException {
store.setAttribute("owner:owner", createUserPrincipal("user"));
PosixFileAttributeView view = provider.getView(attributeStoreSupplier());
assertNotNull(view);
ASSERT.that(view.name()).is("posix");
ASSERT.that(view.getOwner()).is(createUserPrincipal("user"));
PosixFileAttributes attrs = view.readAttributes();
ASSERT.that(attrs.fileKey()).is(0L);
ASSERT.that(attrs.owner()).is(createUserPrincipal("user"));
ASSERT.that(attrs.group()).is(createGroupPrincipal("group"));
ASSERT.that(attrs.permissions()).is(PosixFilePermissions.fromString("rw-r--r--"));
view.setOwner(createUserPrincipal("root"));
ASSERT.that(view.getOwner()).is(createUserPrincipal("root"));
ASSERT.that(store.getAttribute("owner:owner")).is(createUserPrincipal("root"));
view.setGroup(createGroupPrincipal("root"));
ASSERT.that(view.readAttributes().group()).is(createGroupPrincipal("root"));
ASSERT.that(store.getAttribute("posix:group")).is(createGroupPrincipal("root"));
view.setPermissions(PosixFilePermissions.fromString("rwx------"));
ASSERT.that(view.readAttributes().permissions())
.is(PosixFilePermissions.fromString("rwx------"));
ASSERT.that(store.getAttribute("posix:permissions"))
.is(PosixFilePermissions.fromString("rwx------"));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public JimfsFileSystem newFileSystem(URI uri, Map<String, ?> env) {
checkArgument(uri.getScheme().equalsIgnoreCase(SCHEME),
"uri (%s) scheme must be '%s'", uri, SCHEME);
checkArgument(env.get(CONFIG_KEY) instanceof JimfsConfiguration,
"env map (%s) must contain key '%s' mapped to an instance of JimfsConfiguration",
env, CONFIG_KEY);
JimfsConfiguration config = (JimfsConfiguration) env.get(CONFIG_KEY);
JimfsFileSystem fileSystem = new JimfsFileSystem(this, uri, config);
if (fileSystems.putIfAbsent(uri, fileSystem) != null) {
throw new FileSystemAlreadyExistsException(uri.toString());
}
return fileSystem;
}
#location 9
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Override
public JimfsFileSystem newFileSystem(URI uri, Map<String, ?> env) throws IOException {
checkArgument(uri.getScheme().equalsIgnoreCase(SCHEME),
"uri (%s) scheme must be '%s'", uri, SCHEME);
checkArgument(env.get(CONFIG_KEY) instanceof JimfsConfiguration,
"env map (%s) must contain key '%s' mapped to an instance of JimfsConfiguration",
env, CONFIG_KEY);
JimfsConfiguration config = (JimfsConfiguration) env.get(CONFIG_KEY);
JimfsFileSystem fileSystem = FileSystemInitializer.createFileSystem(this, uri, config);
if (fileSystems.putIfAbsent(uri, fileSystem) != null) {
throw new FileSystemAlreadyExistsException(uri.toString());
}
return fileSystem;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testView() throws IOException {
AclFileAttributeView view =
service.getFileAttributeView(fileSupplier(), AclFileAttributeView.class);
assertNotNull(view);
ASSERT.that(view.name()).is("acl");
ASSERT.that(view.getAcl()).is(defaultAcl);
ASSERT.that(view.getOwner()).is(USER);
view.setAcl(ImmutableList.<AclEntry>of());
view.setOwner(FOO);
ASSERT.that(view.getAcl()).is(ImmutableList.<AclEntry>of());
ASSERT.that(view.getOwner()).is(FOO);
ASSERT.that(file.getAttribute("acl:acl")).is(ImmutableList.<AclEntry>of());
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testView() throws IOException {
AclFileAttributeView view = provider.getView(attributeStoreSupplier());
assertNotNull(view);
ASSERT.that(view.name()).is("acl");
ASSERT.that(view.getAcl()).is(defaultAcl);
view.setAcl(ImmutableList.<AclEntry>of());
view.setOwner(FOO);
ASSERT.that(view.getAcl()).is(ImmutableList.<AclEntry>of());
ASSERT.that(view.getOwner()).is(FOO);
ASSERT.that(store.getAttribute("acl:acl")).is(ImmutableList.<AclEntry>of());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testClosedChannel() throws IOException, InterruptedException {
RegularFile file = regularFile(15);
ExecutorService executor = Executors.newSingleThreadExecutor();
try {
JimfsAsynchronousFileChannel channel = channel(file, executor, READ, WRITE);
channel.close();
assertClosed(channel.read(ByteBuffer.allocate(10), 0));
assertClosed(channel.write(ByteBuffer.allocate(10), 15));
assertClosed(channel.lock());
assertClosed(channel.lock(0, 10, true));
} finally {
executor.shutdown();
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Test
public void testClosedChannel() throws Throwable {
RegularFile file = regularFile(15);
ExecutorService executor = Executors.newSingleThreadExecutor();
try {
JimfsAsynchronousFileChannel channel = channel(file, executor, READ, WRITE);
channel.close();
assertClosed(channel.read(ByteBuffer.allocate(10), 0));
assertClosed(channel.write(ByteBuffer.allocate(10), 15));
assertClosed(channel.lock());
assertClosed(channel.lock(0, 10, true));
} finally {
executor.shutdown();
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testView() throws IOException {
UserDefinedFileAttributeView view =
service.getFileAttributeView(fileSupplier(), UserDefinedFileAttributeView.class);
assertNotNull(view);
ASSERT.that(view.name()).is("user");
ASSERT.that(view.list()).isEmpty();
byte[] b1 = {0, 1, 2};
byte[] b2 = {0, 1, 2, 3, 4};
view.write("b1", ByteBuffer.wrap(b1));
view.write("b2", ByteBuffer.wrap(b2));
ASSERT.that(view.list()).has().allOf("b1", "b2");
ASSERT.that(service.readAttributes(file, "user:*").keySet())
.has().allOf("b1", "b2");
ASSERT.that(view.size("b1")).is(3);
ASSERT.that(view.size("b2")).is(5);
ByteBuffer buf1 = ByteBuffer.allocate(view.size("b1"));
ByteBuffer buf2 = ByteBuffer.allocate(view.size("b2"));
view.read("b1", buf1);
view.read("b2", buf2);
ASSERT.that(Arrays.equals(b1, buf1.array())).isTrue();
ASSERT.that(Arrays.equals(b2, buf2.array())).isTrue();
view.delete("b2");
ASSERT.that(view.list()).has().exactly("b1");
ASSERT.that(service.readAttributes(file, "user:*").keySet())
.has().exactly("b1");
try {
view.size("b2");
fail();
} catch (IllegalArgumentException expected) {
ASSERT.that(expected.getMessage()).contains("not set");
}
try {
view.read("b2", ByteBuffer.allocate(10));
fail();
} catch (IllegalArgumentException expected) {
ASSERT.that(expected.getMessage()).contains("not set");
}
view.write("b1", ByteBuffer.wrap(b2));
ASSERT.that(view.size("b1")).is(5);
view.delete("b2"); // succeeds
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testView() throws IOException {
UserDefinedFileAttributeView view = provider.getView(attributeStoreSupplier());
assertNotNull(view);
ASSERT.that(view.name()).is("user");
ASSERT.that(view.list()).isEmpty();
byte[] b1 = {0, 1, 2};
byte[] b2 = {0, 1, 2, 3, 4};
view.write("b1", ByteBuffer.wrap(b1));
view.write("b2", ByteBuffer.wrap(b2));
ASSERT.that(view.list()).has().allOf("b1", "b2");
ASSERT.that(store.getAttributeKeys()).has().exactly("user:b1", "user:b2");
ASSERT.that(view.size("b1")).is(3);
ASSERT.that(view.size("b2")).is(5);
ByteBuffer buf1 = ByteBuffer.allocate(view.size("b1"));
ByteBuffer buf2 = ByteBuffer.allocate(view.size("b2"));
view.read("b1", buf1);
view.read("b2", buf2);
ASSERT.that(Arrays.equals(b1, buf1.array())).isTrue();
ASSERT.that(Arrays.equals(b2, buf2.array())).isTrue();
view.delete("b2");
ASSERT.that(view.list()).has().exactly("b1");
ASSERT.that(store.getAttributeKeys()).has().exactly("user:b1");
try {
view.size("b2");
fail();
} catch (IllegalArgumentException expected) {
ASSERT.that(expected.getMessage()).contains("not set");
}
try {
view.read("b2", ByteBuffer.allocate(10));
fail();
} catch (IllegalArgumentException expected) {
ASSERT.that(expected.getMessage()).contains("not set");
}
view.write("b1", ByteBuffer.wrap(b2));
ASSERT.that(view.size("b1")).is(5);
view.delete("b2"); // succeeds
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static FileSystem newFileSystem(String name) {
String os = System.getProperty("os.name");
Configuration config;
if (os.contains("Windows")) {
config = Configuration.windows();
} else if (os.contains("OS X")) {
config = Configuration.osX();
} else {
config = Configuration.unix();
}
return newFileSystem(name, config);
}
#location 5
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public static FileSystem newFileSystem(String name) {
return newFileSystem(name, Configuration.forCurrentPlatform());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testAsynchronousClose() throws IOException, InterruptedException {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
List<Future<?>> futures = queueAllBlockingOperations(channel, executor);
// ensure time for operations to start blocking
Uninterruptibles.sleepUninterruptibly(10, MILLISECONDS);
channel.close();
for (Future<?> future : futures) {
try {
future.get();
fail();
} catch (ExecutionException expected) {
assertTrue(expected.getCause() instanceof AsynchronousCloseException);
}
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Test
public void testAsynchronousClose() throws Exception {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
CountDownLatch latch = new CountDownLatch(BLOCKING_OP_COUNT);
List<Future<?>> futures = queueAllBlockingOperations(channel, executor, latch);
// wait for all the threads to have started running
latch.await();
// then ensure time for operations to start blocking
Uninterruptibles.sleepUninterruptibly(20, MILLISECONDS);
// close channel on this thread
channel.close();
// the blocking operations are running on different threads, so they all get
// AsynchronousCloseException
for (Future<?> future : futures) {
try {
future.get();
fail();
} catch (ExecutionException expected) {
assertThat(expected.getCause()).named("blocking thread exception")
.isA(AsynchronousCloseException.class);
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public WatchService newWatchService() throws IOException {
return new PollingWatchService(defaultView, pathService, fileStore.state());
}
#location 3
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Override
public WatchService newWatchService() throws IOException {
return watchServiceConfig.newWatchService(defaultView, pathService);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public JimfsPath toRealPath(
JimfsPath path, PathService pathService, Set<? super LinkOption> options) throws IOException {
checkNotNull(path);
checkNotNull(options);
store.readLock().lock();
try {
DirectoryEntry entry = lookUp(path, options)
.requireExists(path);
List<Name> names = new ArrayList<>();
names.add(entry.name());
while (!entry.file().isRootDirectory()) {
// entryInParent(), though @Nullable, won't return null here. The only way to get a null
// entry is to look up a file relative to a SecureDirectoryStream that is open against a
// deleted directory. toRealPath doesn't do this: it looks up a file relative to a Path,
// not a SecureDirectoryStream.
entry = entry.directory().entryInParent();
names.add(entry.name());
}
// names are ordered last to first in the list, so get the reverse view
List<Name> reversed = Lists.reverse(names);
Name root = reversed.remove(0);
return pathService.createPath(root, reversed);
} finally {
store.readLock().unlock();
}
}
#location 12
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public JimfsPath toRealPath(
JimfsPath path, PathService pathService, Set<? super LinkOption> options) throws IOException {
checkNotNull(path);
checkNotNull(options);
store.readLock().lock();
try {
DirectoryEntry entry = lookUp(path, options)
.requireExists(path);
List<Name> names = new ArrayList<>();
names.add(entry.name());
while (!entry.file().isRootDirectory()) {
entry = entry.directory().entryInParent();
names.add(entry.name());
}
// names are ordered last to first in the list, so get the reverse view
List<Name> reversed = Lists.reverse(names);
Name root = reversed.remove(0);
return pathService.createPath(root, reversed);
} finally {
store.readLock().unlock();
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testCloseByInterrupt() throws IOException, InterruptedException {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> interruptException = new AtomicReference<>();
// This thread, being the first to run, will be blocking on the interruptible lock (the byte
// file's write lock) and as such will be interrupted properly... the other threads will be
// blocked on the lock that guards the position field and the specification that only one method
// on the channel will be in progress at a time. That lock is not interruptible, so we must
// interrupt this thread.
Thread thread = new Thread(new Runnable() {
@Override
public void run() {
try {
channel.write(ByteBuffer.allocate(20));
latch.countDown();
} catch (Throwable e) {
interruptException.set(e);
latch.countDown();
}
}
});
thread.start();
// ensure time for thread to start blocking on the write lock
Uninterruptibles.sleepUninterruptibly(5, MILLISECONDS);
List<Future<?>> futures = queueAllBlockingOperations(channel, executor);
// ensure time for operations to start blocking
Uninterruptibles.sleepUninterruptibly(10, MILLISECONDS);
// interrupting this blocking thread closes the channel and makes all the other threads
// throw AsynchronousCloseException... the operation on this thread should throw
// ClosedByInterruptException
thread.interrupt();
latch.await();
assertTrue(interruptException.get() instanceof ClosedByInterruptException);
for (Future<?> future : futures) {
try {
future.get();
fail();
} catch (ExecutionException expected) {
assertTrue(expected.getCause() instanceof AsynchronousCloseException);
}
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Test
public void testCloseByInterrupt() throws Exception {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
final CountDownLatch threadStartLatch = new CountDownLatch(1);
final SettableFuture<Throwable> interruptException = SettableFuture.create();
// This thread, being the first to run, will be blocking on the interruptible lock (the byte
// file's write lock) and as such will be interrupted properly... the other threads will be
// blocked on the lock that guards the position field and the specification that only one method
// on the channel will be in progress at a time. That lock is not interruptible, so we must
// interrupt this thread.
Thread thread = new Thread(new Runnable() {
@Override
public void run() {
threadStartLatch.countDown();
try {
channel.write(ByteBuffer.allocate(20));
interruptException.set(null);
} catch (Throwable e) {
interruptException.set(e);
}
}
});
thread.start();
// let the thread start running
threadStartLatch.await();
// then ensure time for thread to start blocking on the write lock
Uninterruptibles.sleepUninterruptibly(10, MILLISECONDS);
CountDownLatch blockingStartLatch = new CountDownLatch(BLOCKING_OP_COUNT);
List<Future<?>> futures = queueAllBlockingOperations(channel, executor, blockingStartLatch);
// wait for all blocking threads to start
blockingStartLatch.await();
// then ensure time for the operations to start blocking
Uninterruptibles.sleepUninterruptibly(20, MILLISECONDS);
// interrupting this blocking thread closes the channel and makes all the other threads
// throw AsynchronousCloseException... the operation on this thread should throw
// ClosedByInterruptException
thread.interrupt();
// get the exception that caused the interrupted operation to terminate
assertThat(interruptException.get(200, MILLISECONDS))
.named("interrupted thread exception")
.isA(ClosedByInterruptException.class);
// check that each other thread got AsynchronousCloseException (since the interrupt, on a
// different thread, closed the channel)
for (Future<?> future : futures) {
try {
future.get();
fail();
} catch (ExecutionException expected) {
assertThat(expected.getCause()).named("blocking thread exception")
.isA(AsynchronousCloseException.class);
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testView() throws IOException {
DosFileAttributeView view =
service.getFileAttributeView(fileSupplier(), DosFileAttributeView.class);
assertNotNull(view);
ASSERT.that(view.name()).is("dos");
DosFileAttributes attrs = view.readAttributes();
ASSERT.that(attrs.isHidden()).isFalse();
ASSERT.that(attrs.isArchive()).isFalse();
ASSERT.that(attrs.isReadOnly()).isFalse();
ASSERT.that(attrs.isSystem()).isFalse();
view.setArchive(true);
view.setReadOnly(true);
view.setHidden(true);
view.setSystem(false);
ASSERT.that(attrs.isHidden()).isFalse();
ASSERT.that(attrs.isArchive()).isFalse();
ASSERT.that(attrs.isReadOnly()).isFalse();
attrs = view.readAttributes();
ASSERT.that(attrs.isHidden()).isTrue();
ASSERT.that(attrs.isArchive()).isTrue();
ASSERT.that(attrs.isReadOnly()).isTrue();
ASSERT.that(attrs.isSystem()).isFalse();
view.setTimes(FileTime.fromMillis(0L), null, null);
ASSERT.that(view.readAttributes().lastModifiedTime())
.is(FileTime.fromMillis(0L));
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testView() throws IOException {
DosFileAttributeView view = provider.getView(attributeStoreSupplier());
assertNotNull(view);
ASSERT.that(view.name()).is("dos");
DosFileAttributes attrs = view.readAttributes();
ASSERT.that(attrs.isHidden()).isFalse();
ASSERT.that(attrs.isArchive()).isFalse();
ASSERT.that(attrs.isReadOnly()).isFalse();
ASSERT.that(attrs.isSystem()).isFalse();
view.setArchive(true);
view.setReadOnly(true);
view.setHidden(true);
view.setSystem(false);
ASSERT.that(attrs.isHidden()).isFalse();
ASSERT.that(attrs.isArchive()).isFalse();
ASSERT.that(attrs.isReadOnly()).isFalse();
attrs = view.readAttributes();
ASSERT.that(attrs.isHidden()).isTrue();
ASSERT.that(attrs.isArchive()).isTrue();
ASSERT.that(attrs.isReadOnly()).isTrue();
ASSERT.that(attrs.isSystem()).isFalse();
view.setTimes(FileTime.fromMillis(0L), null, null);
ASSERT.that(view.readAttributes().lastModifiedTime())
.is(FileTime.fromMillis(0L));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void main(String[] args) {
int concurrents = Integer.parseInt(properties.getProperty("concurrents"));
int runtime = Integer.parseInt(properties.getProperty("runtime"));
String classname = properties.getProperty("classname");
String params = properties.getProperty("params");
isMultiClient = Boolean.parseBoolean(properties.getProperty("isMultiClient"));
if (args.length == 5) {
concurrents = Integer.parseInt(args[0]);
runtime = Integer.parseInt(args[1]);
classname = args[2];
params = args[3];
isMultiClient = Boolean.parseBoolean(args[4]);
}
ApplicationContext applicationContext = new ClassPathXmlApplicationContext(new String[]{"classpath*:motan-benchmark-client.xml"});
benchmarkService = (BenchmarkService) applicationContext.getBean("motanBenchmarkReferer");
new MotanBenchmarkClient().start(concurrents, runtime, classname, params);
}
#location 16
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static void main(String[] args) {
loadProperties();
int concurrents = Integer.parseInt(properties.getProperty("concurrents"));
int runtime = Integer.parseInt(properties.getProperty("runtime"));
String classname = properties.getProperty("classname");
String params = properties.getProperty("params");
isMultiClient = Boolean.parseBoolean(properties.getProperty("isMultiClient"));
if (args.length == 5) {
concurrents = Integer.parseInt(args[0]);
runtime = Integer.parseInt(args[1]);
classname = args[2];
params = args[3];
isMultiClient = Boolean.parseBoolean(args[4]);
}
ApplicationContext applicationContext = new ClassPathXmlApplicationContext(
new String[]{"classpath*:motan-benchmark-client.xml"});
benchmarkService = (BenchmarkService) applicationContext.getBean("motanBenchmarkReferer");
new MotanBenchmarkClient().start(concurrents, runtime, classname, params);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private void register(List<URL> registryUrls, URL serviceUrl) {
for (URL url : registryUrls) {
// 根据check参数的设置,register失败可能会抛异常,上层应该知晓
RegistryFactory registryFactory = ExtensionLoader.getExtensionLoader(RegistryFactory.class).getExtension(url.getProtocol());
Registry registry = registryFactory.getRegistry(url);
registry.register(serviceUrl);
}
}
#location 5
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private void register(List<URL> registryUrls, URL serviceUrl) {
for (URL url : registryUrls) {
// 根据check参数的设置,register失败可能会抛异常,上层应该知晓
RegistryFactory registryFactory = ExtensionLoader.getExtensionLoader(RegistryFactory.class).getExtension(url.getProtocol());
if (registryFactory == null) {
throw new MotanFrameworkException(new MotanErrorMsg(500, MotanErrorMsgConstant.FRAMEWORK_REGISTER_ERROR_CODE,
"register error! Could not find extension for registry protocol:" + url.getProtocol()
+ ", make sure registry module for " + url.getProtocol() + " is in classpath!"));
}
Registry registry = registryFactory.getRegistry(url);
registry.register(serviceUrl);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Before
public void setup() throws DBException {
orientDBClient = new OrientDBClient();
Properties p = new Properties();
// TODO: Extract the property names into final variables in OrientDBClient
p.setProperty("orientdb.url", TEST_DB_URL);
orientDBClient.setProperties(p);
orientDBClient.init();
orientDBDictionary = orientDBClient.getDB().getDictionary();
}
#location 11
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Before
public void setup() throws DBException {
orientDBClient = new OrientDBClient();
Properties p = new Properties();
// TODO: Extract the property names into final variables in OrientDBClient
p.setProperty("orientdb.url", TEST_DB_URL);
orientDBClient.setProperties(p);
orientDBClient.init();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void cleanup() throws DBException
{
// Get the measurements instance as this is the only client that should
// count clean up time like an update since autoflush is off.
Measurements _measurements = Measurements.getMeasurements();
try {
long st=System.nanoTime();
if (_hTable != null) {
_hTable.flushCommits();
}
if (_hConn != null) {
_hConn.close();
}
long en=System.nanoTime();
_measurements.measure("UPDATE", (int)((en-st)/1000));
} catch (IOException e) {
throw new DBException(e);
}
}
#location 9
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void cleanup() throws DBException
{
// Get the measurements instance as this is the only client that should
// count clean up time like an update since autoflush is off.
Measurements _measurements = Measurements.getMeasurements();
try {
long st=System.nanoTime();
if (_hTable != null) {
_hTable.flushCommits();
}
synchronized(THREAD_COUNT) {
int threadCount = THREAD_COUNT.decrementAndGet();
if (threadCount <= 0 && _hConn != null) {
_hConn.close();
}
}
long en=System.nanoTime();
_measurements.measure("UPDATE", (int)((en-st)/1000));
} catch (IOException e) {
throw new DBException(e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public int read(String table, String key, Set<String> fields,
HashMap<String, ByteIterator> result) {
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document query = new Document("_id", key);
FindIterable<Document> findIterable = collection
.withReadPreference(readPreference)
.find(query);
Document queryResult = null;
if (fields != null) {
Document projection = new Document();
for (String field : fields) {
projection.put(field, INCLUDE);
}
findIterable.projection(projection);
}
queryResult = findIterable.first();
if (queryResult != null) {
fillMap(result, queryResult);
}
return queryResult != null ? 0 : 1;
}
catch (Exception e) {
System.err.println(e.toString());
return 1;
}
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public int read(String table, String key, Set<String> fields,
HashMap<String, ByteIterator> result) {
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document query = new Document("_id", key);
FindIterable<Document> findIterable = collection.find(query);
Document queryResult = null;
if (fields != null) {
Document projection = new Document();
for (String field : fields) {
projection.put(field, INCLUDE);
}
findIterable.projection(projection);
}
queryResult = findIterable.first();
if (queryResult != null) {
fillMap(result, queryResult);
}
return queryResult != null ? 0 : 1;
}
catch (Exception e) {
System.err.println(e.toString());
return 1;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public int update(String table, String key,
HashMap<String, ByteIterator> values) {
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document query = new Document("_id", key);
Document fieldsToSet = new Document();
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
fieldsToSet.put(entry.getKey(), entry.getValue().toArray());
}
Document update = new Document("$set", fieldsToSet);
UpdateResult result = collection.withWriteConcern(writeConcern)
.updateOne(query, update);
if (result.wasAcknowledged() && result.getMatchedCount() == 0) {
System.err.println("Nothing updated for key " + key);
return 1;
}
return 0;
}
catch (Exception e) {
System.err.println(e.toString());
return 1;
}
}
#location 15
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public int update(String table, String key,
HashMap<String, ByteIterator> values) {
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document query = new Document("_id", key);
Document fieldsToSet = new Document();
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
fieldsToSet.put(entry.getKey(), entry.getValue().toArray());
}
Document update = new Document("$set", fieldsToSet);
UpdateResult result = collection.updateOne(query, update);
if (result.wasAcknowledged() && result.getMatchedCount() == 0) {
System.err.println("Nothing updated for key " + key);
return 1;
}
return 0;
}
catch (Exception e) {
System.err.println(e.toString());
return 1;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static RemoteCacheManager getInstance(Properties props){
if(cacheManager == null){
synchronized (RemoteCacheManager.class) {
cacheManager = new RemoteCacheManager(props);
}
}
return cacheManager;
}
#location 7
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public static RemoteCacheManager getInstance(Properties props){
RemoteCacheManager result = cacheManager;
if(result == null){
synchronized (RemoteCacheManagerHolder.class) {
result = cacheManager;
if (result == null) {
cacheManager = result = new RemoteCacheManager(props);
}
}
}
return result;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void init() throws DBException
{
if ( (getProperties().getProperty("debug")!=null) &&
(getProperties().getProperty("debug").compareTo("true")==0) )
{
_debug=true;
}
if (getProperties().containsKey("clientbuffering"))
{
_clientSideBuffering = Boolean.parseBoolean(getProperties().getProperty("clientbuffering"));
}
if (getProperties().containsKey("writebuffersize"))
{
_writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if ("false".equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
_usePageFilter = false;
}
_columnFamily = getProperties().getProperty("columnfamily");
if (_columnFamily == null)
{
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
_columnFamilyBytes = Bytes.toBytes(_columnFamily);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
try
{
HTable ht = new HTable(config, table);
ht.getTableDescriptor();
}
catch (IOException e)
{
throw new DBException(e);
}
}
#location 36
#vulnerability type RESOURCE_LEAK
|
#fixed code
public void init() throws DBException
{
if ( (getProperties().getProperty("debug")!=null) &&
(getProperties().getProperty("debug").compareTo("true")==0) )
{
_debug=true;
}
if (getProperties().containsKey("clientbuffering"))
{
_clientSideBuffering = Boolean.parseBoolean(getProperties().getProperty("clientbuffering"));
}
if (getProperties().containsKey("writebuffersize"))
{
_writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if ("false".equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
_usePageFilter = false;
}
if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
config.set("hadoop.security.authentication", "Kerberos");
UserGroupInformation.setConfiguration(config);
}
if ( (getProperties().getProperty("principal")!=null) && (getProperties().getProperty("keytab")!=null) ){
try {
UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), getProperties().getProperty("keytab"));
} catch (IOException e) {
System.err.println("Keytab file is not readable or not found");
throw new DBException(e);
}
}
try {
_hConn = HConnectionManager.createConnection(config);
} catch (IOException e) {
System.err.println("Connection to HBase was not successful");
throw new DBException(e);
}
_columnFamily = getProperties().getProperty("columnfamily");
if (_columnFamily == null)
{
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
_columnFamilyBytes = Bytes.toBytes(_columnFamily);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
try
{
HTableInterface ht = _hConn.getTable(table);
ht.getTableDescriptor();
}
catch (IOException e)
{
throw new DBException(e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
Statement stmt;
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = selectBuilder.from(table);
// The statement builder is not setup right for tokens.
// So, we need to build it manually.
String initialStmt = stmt.toString();
StringBuilder scanStmt = new StringBuilder();
scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1));
scanStmt.append(" WHERE ");
scanStmt.append(QueryBuilder.token(YCSB_KEY));
scanStmt.append(" >= ");
scanStmt.append("token('");
scanStmt.append(startkey);
scanStmt.append("')");
scanStmt.append(" LIMIT ");
scanStmt.append(recordcount);
stmt = new SimpleStatement(scanStmt.toString());
stmt.setConsistencyLevel(readConsistencyLevel);
if (debug) {
System.out.println(stmt.toString());
}
if (trace) {
stmt.enableTracing();
}
ResultSet rs = session.execute(stmt);
HashMap<String, ByteIterator> tuple;
while (!rs.isExhausted()) {
Row row = rs.one();
tuple = new HashMap<String, ByteIterator>();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
tuple.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
tuple.put(def.getName(), null);
}
}
result.add(tuple);
}
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error scanning with startkey: " + startkey);
return Status.ERROR;
}
}
#location 37
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields);
// Prepare statement on demand
if (stmt == null) {
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
Select selectStmt = selectBuilder.from(table);
// The statement builder is not setup right for tokens.
// So, we need to build it manually.
String initialStmt = selectStmt.toString();
StringBuilder scanStmt = new StringBuilder();
scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1));
scanStmt.append(" WHERE ");
scanStmt.append(QueryBuilder.token(YCSB_KEY));
scanStmt.append(" >= ");
scanStmt.append("token(");
scanStmt.append(QueryBuilder.bindMarker());
scanStmt.append(")");
scanStmt.append(" LIMIT ");
scanStmt.append(QueryBuilder.bindMarker());
stmt = session.prepare(scanStmt.toString());
stmt.setConsistencyLevel(readConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = (fields == null) ?
scanAllStmt.getAndSet(stmt) :
scanStmts.putIfAbsent(new HashSet(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
logger.debug(stmt.getQueryString());
logger.debug("startKey = {}, recordcount = {}", startkey, recordcount);
ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount)));
HashMap<String, ByteIterator> tuple;
while (!rs.isExhausted()) {
Row row = rs.one();
tuple = new HashMap<String, ByteIterator>();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
tuple.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
tuple.put(def.getName(), null);
}
}
result.add(tuple);
}
return Status.OK;
} catch (Exception e) {
logger.error(
MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e);
return Status.ERROR;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
Statement stmt;
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = selectBuilder.from(table).where(QueryBuilder.eq(YCSB_KEY, key))
.limit(1);
stmt.setConsistencyLevel(readConsistencyLevel);
if (debug) {
System.out.println(stmt.toString());
}
if (trace) {
stmt.enableTracing();
}
ResultSet rs = session.execute(stmt);
if (rs.isExhausted()) {
return Status.NOT_FOUND;
}
// Should be only 1 row
Row row = rs.one();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
result.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
result.put(def.getName(), null);
}
}
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error reading key: " + key);
return Status.ERROR;
}
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields);
// Prepare statement on demand
if (stmt == null) {
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = session.prepare(selectBuilder.from(table)
.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))
.limit(1));
stmt.setConsistencyLevel(readConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = (fields == null) ?
readAllStmt.getAndSet(stmt) :
readStmts.putIfAbsent(new HashSet(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
logger.debug(stmt.getQueryString());
logger.debug("key = {}", key);
ResultSet rs = session.execute(stmt.bind(key));
if (rs.isExhausted()) {
return Status.NOT_FOUND;
}
// Should be only 1 row
Row row = rs.one();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
result.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
result.put(def.getName(), null);
}
}
return Status.OK;
} catch (Exception e) {
logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e);
return Status.ERROR;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public int scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
MongoCursor<Document> cursor = null;
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document scanRange = new Document("$gte", startkey);
Document query = new Document("_id", scanRange);
Document sort = new Document("_id", INCLUDE);
Document projection = null;
if (fields != null) {
projection = new Document();
for (String fieldName : fields) {
projection.put(fieldName, INCLUDE);
}
}
cursor = collection.withReadPreference(readPreference).find(query)
.projection(projection).sort(sort).limit(recordcount).iterator();
if (!cursor.hasNext()) {
System.err.println("Nothing found in scan for key " + startkey);
return 1;
}
while (cursor.hasNext()) {
HashMap<String, ByteIterator> resultMap = new HashMap<String, ByteIterator>();
Document obj = cursor.next();
fillMap(resultMap, obj);
result.add(resultMap);
}
return 0;
}
catch (Exception e) {
System.err.println(e.toString());
return 1;
}
finally {
if (cursor != null) {
cursor.close();
}
}
}
#location 20
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public int scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
MongoCursor<Document> cursor = null;
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document scanRange = new Document("$gte", startkey);
Document query = new Document("_id", scanRange);
Document sort = new Document("_id", INCLUDE);
Document projection = null;
if (fields != null) {
projection = new Document();
for (String fieldName : fields) {
projection.put(fieldName, INCLUDE);
}
}
cursor = collection.find(query)
.projection(projection).sort(sort).limit(recordcount).iterator();
if (!cursor.hasNext()) {
System.err.println("Nothing found in scan for key " + startkey);
return 1;
}
while (cursor.hasNext()) {
HashMap<String, ByteIterator> resultMap = new HashMap<String, ByteIterator>();
Document obj = cursor.next();
fillMap(resultMap, obj);
result.add(resultMap);
}
return 0;
}
catch (Exception e) {
System.err.println(e.toString());
return 1;
}
finally {
if (cursor != null) {
cursor.close();
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void init() throws DBException {
Properties props = getProperties();
String url = props.getProperty(URL_PROPERTY);
String user = props.getProperty(USER_PROPERTY, USER_PROPERTY_DEFAULT);
String password = props.getProperty(PASSWORD_PROPERTY, PASSWORD_PROPERTY_DEFAULT);
Boolean newdb = Boolean.parseBoolean(props.getProperty(NEWDB_PROPERTY, NEWDB_PROPERTY_DEFAULT));
String remoteStorageType = props.getProperty(STORAGE_TYPE_PROPERTY);
Boolean dotransactions = Boolean.parseBoolean(
props.getProperty(DO_TRANSACTIONS_PROPERTY, DO_TRANSACTIONS_PROPERTY_DEFAULT));
if (url == null) {
throw new DBException(String.format("Required property \"%s\" missing for OrientDBClient", URL_PROPERTY));
}
log.info("OrientDB loading database url = " + url);
// If using a remote database, use the OServerAdmin interface to connect
if (url.startsWith(OEngineRemote.NAME)) {
isRemote = true;
if (remoteStorageType == null) {
throw new DBException("When connecting to a remote OrientDB instance, " +
"specify a database storage type (plocal or memory) with " + STORAGE_TYPE_PROPERTY);
}
try {
OServerAdmin server = new OServerAdmin(url).connect(user, password);
if (server.existsDatabase()) {
if (newdb && !dotransactions) {
log.info("OrientDB dropping and recreating fresh db on remote server.");
server.dropDatabase(remoteStorageType);
server.createDatabase(server.getURL(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType);
}
} else {
log.info("OrientDB database not found, creating fresh db");
server.createDatabase(server.getURL(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType);
}
server.close();
db = new ODatabaseDocumentTx(url).open(user, password);
} catch (IOException | OException e) {
throw new DBException(String.format("Error interfacing with %s", url), e);
}
} else {
try {
db = new ODatabaseDocumentTx(url);
if (db.exists()) {
db.open(user, password);
if (newdb && !dotransactions) {
log.info("OrientDB dropping and recreating fresh db.");
db.drop();
db.create();
}
} else {
log.info("OrientDB database not found, creating fresh db");
db.create();
}
} catch (ODatabaseException e) {
throw new DBException(String.format("Error interfacing with %s", url), e);
}
}
log.info("OrientDB connection created with " + url);
dictionary = db.getMetadata().getIndexManager().getDictionary();
if (!db.getMetadata().getSchema().existsClass(CLASS)) {
db.getMetadata().getSchema().createClass(CLASS);
}
db.declareIntent(new OIntentMassiveInsert());
}
#location 42
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Override
public void init() throws DBException {
Properties props = getProperties();
String url = props.getProperty(URL_PROPERTY);
String user = props.getProperty(USER_PROPERTY, USER_PROPERTY_DEFAULT);
String password = props.getProperty(PASSWORD_PROPERTY, PASSWORD_PROPERTY_DEFAULT);
Boolean newdb = Boolean.parseBoolean(props.getProperty(NEWDB_PROPERTY, NEWDB_PROPERTY_DEFAULT));
String remoteStorageType = props.getProperty(STORAGE_TYPE_PROPERTY);
String intent = props.getProperty(INTENT_PROPERTY, INTENT_PROPERTY_DEFAULT);
Boolean dotransactions = Boolean.parseBoolean(
props.getProperty(DO_TRANSACTIONS_PROPERTY, DO_TRANSACTIONS_PROPERTY_DEFAULT));
if (url == null) {
throw new DBException(String.format("Required property \"%s\" missing for OrientDBClient", URL_PROPERTY));
}
log.info("OrientDB loading database url = " + url);
// If using a remote database, use the OServerAdmin interface to connect
if (url.startsWith(OEngineRemote.NAME)) {
isRemote = true;
if (remoteStorageType == null) {
throw new DBException("When connecting to a remote OrientDB instance, " +
"specify a database storage type (plocal or memory) with " + STORAGE_TYPE_PROPERTY);
}
try {
OServerAdmin server = new OServerAdmin(url).connect(user, password);
if (server.existsDatabase()) {
if (newdb && !dotransactions) {
log.info("OrientDB dropping and recreating fresh db on remote server.");
server.dropDatabase(remoteStorageType);
server.createDatabase(server.getURL(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType);
}
} else {
log.info("OrientDB database not found, creating fresh db");
server.createDatabase(server.getURL(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType);
}
server.close();
db = new ODatabaseDocumentTx(url).open(user, password);
} catch (IOException | OException e) {
throw new DBException(String.format("Error interfacing with %s", url), e);
}
} else {
try {
db = new ODatabaseDocumentTx(url);
if (db.exists()) {
db.open(user, password);
if (newdb && !dotransactions) {
log.info("OrientDB dropping and recreating fresh db.");
db.drop();
db.create();
}
} else {
log.info("OrientDB database not found, creating fresh db");
db.create();
}
} catch (ODatabaseException e) {
throw new DBException(String.format("Error interfacing with %s", url), e);
}
}
log.info("OrientDB connection created with " + url);
dictionary = db.getMetadata().getIndexManager().getDictionary();
if (!db.getMetadata().getSchema().existsClass(CLASS)) {
db.getMetadata().getSchema().createClass(CLASS);
}
if (intent.equals(ORIENTDB_MASSIVEINSERT)) {
log.info("Declaring intent of MassiveInsert.");
db.declareIntent(new OIntentMassiveInsert());
} else if (intent.equals(ORIENTDB_MASSIVEREAD)) {
log.info("Declaring intent of MassiveRead.");
db.declareIntent(new OIntentMassiveRead());
} else if (intent.equals(ORIENTDB_NOCACHE)) {
log.info("Declaring intent of NoCache.");
db.declareIntent(new OIntentNoCache());
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
this.currentTable = this.connection.getTable(tName);
// suggestions from
// http://ryantwopointoh.blogspot.com/2009/01/
// performance-of-hbase-importing.html
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = this.connection.getBufferedMutator(p);
}
}
#location 9
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status delete(String table, String key) {
try {
Statement stmt;
stmt = QueryBuilder.delete().from(table)
.where(QueryBuilder.eq(YCSB_KEY, key));
stmt.setConsistencyLevel(writeConsistencyLevel);
if (debug) {
System.out.println(stmt.toString());
}
if (trace) {
stmt.enableTracing();
}
session.execute(stmt);
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error deleting key: " + key);
}
return Status.ERROR;
}
#location 11
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status delete(String table, String key) {
try {
PreparedStatement stmt = deleteStmt.get();
// Prepare statement on demand
if (stmt == null) {
stmt = session.prepare(QueryBuilder.delete().from(table)
.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())));
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = deleteStmt.getAndSet(stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
logger.debug(stmt.getQueryString());
logger.debug("key = {}", key);
session.execute(stmt.bind(key));
return Status.OK;
} catch (Exception e) {
logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Before
public void setUp() throws Exception {
// check that this is Java 8+
int javaVersion = Integer.parseInt(System.getProperty("java.version").split("\\.")[1]);
Assume.assumeTrue(javaVersion >= 8);
session = cassandraUnit.getSession();
Properties p = new Properties();
p.setProperty("hosts", HOST);
p.setProperty("port", Integer.toString(PORT));
p.setProperty("table", TABLE);
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
client = new CassandraCQLClient();
client.setProperties(p);
client.init();
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Before
public void setUp() throws Exception {
session = cassandraUnit.getSession();
Properties p = new Properties();
p.setProperty("hosts", HOST);
p.setProperty("port", Integer.toString(PORT));
p.setProperty("table", TABLE);
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
client = new CassandraCQLClient();
client.setProperties(p);
client.init();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
protected net.spy.memcached.MemcachedClient createMemcachedClient()
throws Exception {
ConnectionFactoryBuilder connectionFactoryBuilder =
new ConnectionFactoryBuilder();
connectionFactoryBuilder.setReadBufferSize(Integer.parseInt(
getProperties().getProperty(READ_BUFFER_SIZE_PROPERTY,
DEFAULT_READ_BUFFER_SIZE)));
connectionFactoryBuilder.setOpTimeout(Integer.parseInt(
getProperties().getProperty(OP_TIMEOUT_PROPERTY, DEFAULT_OP_TIMEOUT)));
String failureString = getProperties().getProperty(FAILURE_MODE_PROPERTY);
connectionFactoryBuilder.setFailureMode(
failureString == null ? FAILURE_MODE_PROPERTY_DEFAULT
: FailureMode.valueOf(failureString));
// Note: this only works with IPv4 addresses due to its assumption of
// ":" being the separator of hostname/IP and port; this is not the case
// when dealing with IPv6 addresses.
//
// TODO(mbrukman): fix this.
List<InetSocketAddress> addresses = new ArrayList<InetSocketAddress>();
String[] hosts = getProperties().getProperty(HOSTS_PROPERTY).split(",");
for (String address : hosts) {
int colon = address.indexOf(":");
int port = DEFAULT_PORT;
String host = address;
if (colon != -1) {
port = Integer.parseInt(address.substring(colon + 1));
host = address.substring(0, colon);
}
addresses.add(new InetSocketAddress(host, port));
}
return new net.spy.memcached.MemcachedClient(
connectionFactoryBuilder.build(), addresses);
}
#location 24
#vulnerability type NULL_DEREFERENCE
|
#fixed code
protected net.spy.memcached.MemcachedClient createMemcachedClient()
throws Exception {
ConnectionFactoryBuilder connectionFactoryBuilder =
new ConnectionFactoryBuilder();
connectionFactoryBuilder.setReadBufferSize(Integer.parseInt(
getProperties().getProperty(READ_BUFFER_SIZE_PROPERTY,
DEFAULT_READ_BUFFER_SIZE)));
connectionFactoryBuilder.setOpTimeout(Integer.parseInt(
getProperties().getProperty(OP_TIMEOUT_PROPERTY, DEFAULT_OP_TIMEOUT)));
String protocolString = getProperties().getProperty(PROTOCOL_PROPERTY);
connectionFactoryBuilder.setProtocol(
protocolString == null ? DEFAULT_PROTOCOL
: ConnectionFactoryBuilder.Protocol.valueOf(protocolString.toUpperCase()));
String failureString = getProperties().getProperty(FAILURE_MODE_PROPERTY);
connectionFactoryBuilder.setFailureMode(
failureString == null ? FAILURE_MODE_PROPERTY_DEFAULT
: FailureMode.valueOf(failureString));
// Note: this only works with IPv4 addresses due to its assumption of
// ":" being the separator of hostname/IP and port; this is not the case
// when dealing with IPv6 addresses.
//
// TODO(mbrukman): fix this.
List<InetSocketAddress> addresses = new ArrayList<InetSocketAddress>();
String[] hosts = getProperties().getProperty(HOSTS_PROPERTY).split(",");
for (String address : hosts) {
int colon = address.indexOf(":");
int port = DEFAULT_PORT;
String host = address;
if (colon != -1) {
port = Integer.parseInt(address.substring(colon + 1));
host = address.substring(0, colon);
}
addresses.add(new InetSocketAddress(host, port));
}
return new net.spy.memcached.MemcachedClient(
connectionFactoryBuilder.build(), addresses);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public boolean doTransaction(DB db, Object threadstate) {
switch (operationchooser.nextString()) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
#location 3
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void cleanup() throws DBException {
// Get the measurements instance as this is the only client that should
// count clean up time like an update if client-side buffering is
// enabled.
Measurements measurements = Measurements.getMeasurements();
try {
long st = System.nanoTime();
if (bufferedMutator != null) {
bufferedMutator.close();
}
if (currentTable != null) {
currentTable.close();
}
long en = System.nanoTime();
final String type = clientSideBuffering ? "UPDATE" : "CLEANUP";
measurements.measure(type, (int) ((en - st) / 1000));
synchronized(threadCount) {
--threadCount;
if (threadCount <= 0 && connection != null) {
connection.close();
connection = null;
}
}
} catch (IOException e) {
throw new DBException(e);
}
}
#location 18
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void cleanup() throws DBException {
// Get the measurements instance as this is the only client that should
// count clean up time like an update if client-side buffering is
// enabled.
Measurements measurements = Measurements.getMeasurements();
try {
long st = System.nanoTime();
if (bufferedMutator != null) {
bufferedMutator.close();
}
if (currentTable != null) {
currentTable.close();
}
long en = System.nanoTime();
final String type = clientSideBuffering ? "UPDATE" : "CLEANUP";
measurements.measure(type, (int) ((en - st) / 1000));
threadCount.decrementAndGet();
if (threadCount.get() <= 0) {
// Means we are done so ok to shut down the Connection.
synchronized (CONNECTION_LOCK) {
if (connection != null) {
connection.close();
connection = null;
}
}
}
} catch (IOException e) {
throw new DBException(e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private Map<String, byte[]> convertToBytearrayMap(Map<String,ByteIterator> values) {
Map<String, byte[]> retVal = new HashMap<String, byte[]>();
for (String key : values.keySet()) {
retVal.put(key, values.get(key).toArray());
}
return retVal;
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private Map<String, byte[]> convertToBytearrayMap(Map<String,ByteIterator> values) {
Map<String, byte[]> retVal = new HashMap<String, byte[]>();
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
retVal.put(entry.getKey(), entry.getValue().toArray());
}
return retVal;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status delete(String table, String key) {
if (debug) {
System.out.println("Doing delete for key: " + key);
}
setTable(table);
final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder()
.setRowKey(ByteString.copyFromUtf8(key))
.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
rowMutation.addMutationsBuilder().setDeleteFromRow(
DeleteFromRow.getDefaultInstance());
try {
if (clientSideBuffering) {
asyncExecutor.mutateRowAsync(rowMutation.build());
} else {
client.mutateRow(rowMutation.build());
}
return Status.OK;
} catch (ServiceException e) {
System.err.println("Failed to delete key: " + key + " " + e.getMessage());
return Status.ERROR;
} catch (InterruptedException e) {
System.err.println("Interrupted while delete key: " + key + " "
+ e.getMessage());
Thread.currentThread().interrupt();
return Status.ERROR; // never get here, but lets make the compiler happy
}
}
#location 17
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status delete(String table, String key) {
if (debug) {
System.out.println("Doing delete for key: " + key);
}
setTable(table);
final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder()
.setRowKey(ByteString.copyFromUtf8(key))
.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
rowMutation.addMutationsBuilder().setDeleteFromRow(
DeleteFromRow.getDefaultInstance());
try {
if (clientSideBuffering) {
bulkMutation.add(rowMutation.build());
} else {
client.mutateRow(rowMutation.build());
}
return Status.OK;
} catch (RuntimeException e) {
System.err.println("Failed to delete key: " + key + " " + e.getMessage());
return Status.ERROR;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void cleanup() throws DBException {
if (asyncExecutor != null) {
try {
asyncExecutor.flush();
} catch (IOException e) {
throw new DBException(e);
}
}
synchronized (threadCount) {
--threadCount;
if (threadCount <= 0) {
try {
session.close();
} catch (IOException e) {
throw new DBException(e);
}
}
}
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void cleanup() throws DBException {
if (asyncExecutor != null) {
try {
asyncExecutor.flush();
} catch (IOException e) {
throw new DBException(e);
}
}
synchronized (CONFIG) {
--threadCount;
if (threadCount <= 0) {
try {
session.close();
} catch (IOException e) {
throw new DBException(e);
}
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void init() throws DBException {
if ("true"
.equals(getProperties().getProperty("clientbuffering", "false"))) {
this.clientSideBuffering = true;
}
if (getProperties().containsKey("writebuffersize")) {
writeBufferSize =
Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if (getProperties().getProperty("durability") != null) {
this.durability =
Durability.valueOf(getProperties().getProperty("durability"));
}
if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
config.set("hadoop.security.authentication", "Kerberos");
UserGroupInformation.setConfiguration(config);
}
if ((getProperties().getProperty("principal")!=null)
&& (getProperties().getProperty("keytab")!=null)) {
try {
UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
getProperties().getProperty("keytab"));
} catch (IOException e) {
System.err.println("Keytab file is not readable or not found");
throw new DBException(e);
}
}
try {
synchronized(threadCount) {
++threadCount;
if (connection == null) {
connection = ConnectionFactory.createConnection(config);
}
}
} catch (java.io.IOException e) {
throw new DBException(e);
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
if ("false"
.equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
usePageFilter = false;
}
columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
try {
final TableName tName = TableName.valueOf(table);
connection.getTable(tName).getTableDescriptor();
} catch (IOException e) {
throw new DBException(e);
}
}
#location 34
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void init() throws DBException {
if ("true"
.equals(getProperties().getProperty("clientbuffering", "false"))) {
this.clientSideBuffering = true;
}
if (getProperties().containsKey("writebuffersize")) {
writeBufferSize =
Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if (getProperties().getProperty("durability") != null) {
this.durability =
Durability.valueOf(getProperties().getProperty("durability"));
}
if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
config.set("hadoop.security.authentication", "Kerberos");
UserGroupInformation.setConfiguration(config);
}
if ((getProperties().getProperty("principal")!=null)
&& (getProperties().getProperty("keytab")!=null)) {
try {
UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
getProperties().getProperty("keytab"));
} catch (IOException e) {
System.err.println("Keytab file is not readable or not found");
throw new DBException(e);
}
}
try {
threadCount.getAndIncrement();
synchronized (CONNECTION_LOCK) {
if (connection == null) {
// Initialize if not set up already.
connection = ConnectionFactory.createConnection(config);
}
}
} catch (java.io.IOException e) {
throw new DBException(e);
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
if ("false"
.equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
usePageFilter = false;
}
columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
try {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
connection.getTable(tName).getTableDescriptor();
}
} catch (IOException e) {
throw new DBException(e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
}
#location 7
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
this.currentTable = this.connection.getTable(tName);
// suggestions from
// http://ryantwopointoh.blogspot.com/2009/01/
// performance-of-hbase-importing.html
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = this.connection.getBufferedMutator(p);
}
}
#location 7
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void init() throws DBException {
// initialize OrientDB driver
Properties props = getProperties();
String url;
if (System.getProperty("os.name").toLowerCase().contains("win"))
url = props.getProperty("orientdb.url", "plocal:C:/temp/databases/ycsb");
else
url = props.getProperty("orientdb.url", "plocal:/temp/databases/ycsb");
String user = props.getProperty("orientdb.user", "admin");
String password = props.getProperty("orientdb.password", "admin");
Boolean newdb = Boolean.parseBoolean(props.getProperty("orientdb.newdb", "false"));
try {
System.out.println("OrientDB loading database url = " + url);
OGlobalConfiguration.STORAGE_KEEP_OPEN.setValue(false);
db = new ODatabaseDocumentTx(url);
if (db.exists()) {
db.open(user, password);
if (newdb) {
System.out.println("OrientDB drop and recreate fresh db");
db.drop();
db.create();
}
} else {
System.out.println("OrientDB database not found, create fresh db");
db.create();
}
System.out.println("OrientDB connection created with " + url);
dictionary = db.getMetadata().getIndexManager().getDictionary();
if (!db.getMetadata().getSchema().existsClass(CLASS))
db.getMetadata().getSchema().createClass(CLASS);
db.declareIntent(new OIntentMassiveInsert());
} catch (Exception e1) {
System.err.println("Could not initialize OrientDB connection pool for Loader: " + e1.toString());
e1.printStackTrace();
return;
}
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void init() throws DBException {
Properties props = getProperties();
String url = props.getProperty(URL_PROPERTY);
String user = props.getProperty(USER_PROPERTY, USER_PROPERTY_DEFAULT);
String password = props.getProperty(PASSWORD_PROPERTY, PASSWORD_PROPERTY_DEFAULT);
Boolean newdb = Boolean.parseBoolean(props.getProperty(NEWDB_PROPERTY, NEWDB_PROPERTY_DEFAULT));
if (url == null) {
throw new DBException(String.format("Required property \"%s\" missing for OrientDBClient", URL_PROPERTY));
}
try {
System.out.println("OrientDB loading database url = " + url);
OGlobalConfiguration.STORAGE_KEEP_OPEN.setValue(false);
db = new ODatabaseDocumentTx(url);
if (db.exists()) {
db.open(user, password);
if (newdb) {
System.out.println("OrientDB drop and recreate fresh db");
db.drop();
db.create();
}
} else {
System.out.println("OrientDB database not found, create fresh db");
db.create();
}
System.out.println("OrientDB connection created with " + url);
dictionary = db.getMetadata().getIndexManager().getDictionary();
if (!db.getMetadata().getSchema().existsClass(CLASS))
db.getMetadata().getSchema().createClass(CLASS);
db.declareIntent(new OIntentMassiveInsert());
} catch (Exception e1) {
System.err.println("Could not initialize OrientDB connection pool for Loader: " + e1.toString());
e1.printStackTrace();
return;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void cleanup() throws DBException {
if (bulkMutation != null) {
try {
bulkMutation.flush();
} catch(RuntimeException e){
throw new DBException(e);
}
}
if (asyncExecutor != null) {
try {
asyncExecutor.flush();
} catch (IOException e) {
throw new DBException(e);
}
}
synchronized (CONFIG) {
--threadCount;
if (threadCount <= 0) {
try {
session.close();
} catch (IOException e) {
throw new DBException(e);
}
}
}
}
#location 12
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void cleanup() throws DBException {
if (bulkMutation != null) {
try {
bulkMutation.flush();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new DBException(e);
} catch(RuntimeException e){
throw new DBException(e);
}
}
synchronized (CONFIG) {
--threadCount;
if (threadCount <= 0) {
try {
session.close();
} catch (IOException e) {
throw new DBException(e);
}
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status update(String table, String key,
HashMap<String, ByteIterator> values) {
if (debug) {
System.out.println("Setting up put for key: " + key);
}
setTable(table);
final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder();
rowMutation.setRowKey(ByteString.copyFromUtf8(key));
rowMutation.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
for (final Entry<String, ByteIterator> entry : values.entrySet()) {
final Mutation.Builder mutationBuilder = rowMutation.addMutationsBuilder();
final SetCell.Builder setCellBuilder = mutationBuilder.getSetCellBuilder();
setCellBuilder.setFamilyNameBytes(ByteStringer.wrap(columnFamilyBytes));
setCellBuilder.setColumnQualifier(ByteStringer.wrap(entry.getKey().getBytes()));
setCellBuilder.setValue(ByteStringer.wrap(entry.getValue().toArray()));
// Bigtable uses a 1ms granularity
setCellBuilder.setTimestampMicros(System.currentTimeMillis() * 1000);
}
try {
if (clientSideBuffering) {
asyncExecutor.mutateRowAsync(rowMutation.build());
} else {
client.mutateRow(rowMutation.build());
}
return Status.OK;
} catch (ServiceException e) {
System.err.println("Failed to insert key: " + key + " " + e.getMessage());
return Status.ERROR;
} catch (InterruptedException e) {
System.err.println("Interrupted while inserting key: " + key + " "
+ e.getMessage());
Thread.currentThread().interrupt();
return Status.ERROR; // never get here, but lets make the compiler happy
}
}
#location 28
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status update(String table, String key,
HashMap<String, ByteIterator> values) {
if (debug) {
System.out.println("Setting up put for key: " + key);
}
setTable(table);
final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder();
rowMutation.setRowKey(ByteString.copyFromUtf8(key));
rowMutation.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
for (final Entry<String, ByteIterator> entry : values.entrySet()) {
final Mutation.Builder mutationBuilder = rowMutation.addMutationsBuilder();
final SetCell.Builder setCellBuilder = mutationBuilder.getSetCellBuilder();
setCellBuilder.setFamilyNameBytes(ByteStringer.wrap(columnFamilyBytes));
setCellBuilder.setColumnQualifier(ByteStringer.wrap(entry.getKey().getBytes()));
setCellBuilder.setValue(ByteStringer.wrap(entry.getValue().toArray()));
// Bigtable uses a 1ms granularity
setCellBuilder.setTimestampMicros(System.currentTimeMillis() * 1000);
}
try {
if (clientSideBuffering) {
bulkMutation.add(rowMutation.build());
} else {
client.mutateRow(rowMutation.build());
}
return Status.OK;
} catch (RuntimeException e) {
System.err.println("Failed to insert key: " + key + " " + e.getMessage());
return Status.ERROR;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void acknowledge(int value)
{
if (value > limit + WINDOW_SIZE) {
throw new RuntimeException("This should be a different exception.");
}
window[value % WINDOW_SIZE] = true;
if (lock.tryLock()) {
// move a contiguous sequence from the window
// over to the "limit" variable
try {
int index;
for (index = limit + 1; index <= value; ++index) {
int slot = index % WINDOW_SIZE;
if (!window[slot]) {
break;
}
window[slot] = false;
}
limit = index - 1;
} finally {
lock.unlock();
}
}
}
#location 3
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void acknowledge(int value)
{
// read volatile variable to see other threads' changes
limit = limit;
if (value > limit + WINDOW_SIZE) {
throw new RuntimeException("Too many unacknowledged insertion keys.");
}
window[value % WINDOW_SIZE] = true;
if (lock.tryLock()) {
// move a contiguous sequence from the window
// over to the "limit" variable
try {
int index;
for (index = limit + 1; index <= value; ++index) {
int slot = index % WINDOW_SIZE;
if (!window[slot]) {
break;
}
window[slot] = false;
}
limit = index - 1;
} finally {
lock.unlock();
}
}
// write volatile variable to make other threads see changes
limit = limit;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void init() throws DBException {
Properties props = getProperties();
// Defaults the user can override if needed
CONFIG.set("google.bigtable.auth.service.account.enable", "true");
// make it easy on ourselves by copying all CLI properties into the config object.
final Iterator<Entry<Object, Object>> it = props.entrySet().iterator();
while (it.hasNext()) {
Entry<Object, Object> entry = it.next();
CONFIG.set((String)entry.getKey(), (String)entry.getValue());
}
clientSideBuffering = getProperties().getProperty(CLIENT_SIDE_BUFFERING, "false")
.equals("true") ? true : false;
System.err.println("Running Google Bigtable with Proto API" +
(clientSideBuffering ? " and client side buffering." : "."));
synchronized (threadCount) {
++threadCount;
if (session == null) {
try {
options = BigtableOptionsFactory.fromConfiguration(CONFIG);
session = new BigtableSession(options);
// important to instantiate the first client here, otherwise the
// other threads may receive an NPE from the options when they try
// to read the cluster name.
client = session.getDataClient();
} catch (IOException e) {
throw new DBException("Error loading options from config: ", e);
}
} else {
client = session.getDataClient();
}
if (clientSideBuffering) {
heapSizeManager = new HeapSizeManager(
Long.parseLong(
getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY,
Long.toString(AsyncExecutor.ASYNC_MUTATOR_MAX_MEMORY_DEFAULT))),
Integer.parseInt(
getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS,
Integer.toString(AsyncExecutor.MAX_INFLIGHT_RPCS_DEFAULT))));
asyncExecutor = new AsyncExecutor(client, heapSizeManager);
}
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
final String columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for Bigtable table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
}
#location 21
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void init() throws DBException {
Properties props = getProperties();
// Defaults the user can override if needed
CONFIG.set("google.bigtable.auth.service.account.enable", "true");
// make it easy on ourselves by copying all CLI properties into the config object.
final Iterator<Entry<Object, Object>> it = props.entrySet().iterator();
while (it.hasNext()) {
Entry<Object, Object> entry = it.next();
CONFIG.set((String)entry.getKey(), (String)entry.getValue());
}
clientSideBuffering = getProperties().getProperty(CLIENT_SIDE_BUFFERING, "false")
.equals("true") ? true : false;
System.err.println("Running Google Bigtable with Proto API" +
(clientSideBuffering ? " and client side buffering." : "."));
synchronized (CONFIG) {
++threadCount;
if (session == null) {
try {
options = BigtableOptionsFactory.fromConfiguration(CONFIG);
session = new BigtableSession(options);
// important to instantiate the first client here, otherwise the
// other threads may receive an NPE from the options when they try
// to read the cluster name.
client = session.getDataClient();
} catch (IOException e) {
throw new DBException("Error loading options from config: ", e);
}
} else {
client = session.getDataClient();
}
if (clientSideBuffering) {
heapSizeManager = new HeapSizeManager(
Long.parseLong(
getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY,
Long.toString(AsyncExecutor.ASYNC_MUTATOR_MAX_MEMORY_DEFAULT))),
Integer.parseInt(
getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS,
Integer.toString(AsyncExecutor.MAX_INFLIGHT_RPCS_DEFAULT))));
asyncExecutor = new AsyncExecutor(client, heapSizeManager);
}
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
final String columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for Bigtable table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void init() throws DBException {
if ("true"
.equals(getProperties().getProperty("clientbuffering", "false"))) {
this.clientSideBuffering = true;
}
if (getProperties().containsKey("writebuffersize")) {
writeBufferSize =
Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if (getProperties().getProperty("durability") != null) {
this.durability =
Durability.valueOf(getProperties().getProperty("durability"));
}
if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
config.set("hadoop.security.authentication", "Kerberos");
UserGroupInformation.setConfiguration(config);
}
if ((getProperties().getProperty("principal")!=null)
&& (getProperties().getProperty("keytab")!=null)) {
try {
UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
getProperties().getProperty("keytab"));
} catch (IOException e) {
System.err.println("Keytab file is not readable or not found");
throw new DBException(e);
}
}
try {
synchronized(threadCount) {
++threadCount;
if (connection == null) {
connection = ConnectionFactory.createConnection(config);
}
}
} catch (java.io.IOException e) {
throw new DBException(e);
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
if ("false"
.equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
usePageFilter = false;
}
columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
try {
final TableName tName = TableName.valueOf(table);
connection.getTable(tName).getTableDescriptor();
} catch (IOException e) {
throw new DBException(e);
}
}
#location 67
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public void init() throws DBException {
if ("true"
.equals(getProperties().getProperty("clientbuffering", "false"))) {
this.clientSideBuffering = true;
}
if (getProperties().containsKey("writebuffersize")) {
writeBufferSize =
Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if (getProperties().getProperty("durability") != null) {
this.durability =
Durability.valueOf(getProperties().getProperty("durability"));
}
if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
config.set("hadoop.security.authentication", "Kerberos");
UserGroupInformation.setConfiguration(config);
}
if ((getProperties().getProperty("principal")!=null)
&& (getProperties().getProperty("keytab")!=null)) {
try {
UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
getProperties().getProperty("keytab"));
} catch (IOException e) {
System.err.println("Keytab file is not readable or not found");
throw new DBException(e);
}
}
try {
threadCount.getAndIncrement();
synchronized (CONNECTION_LOCK) {
if (connection == null) {
// Initialize if not set up already.
connection = ConnectionFactory.createConnection(config);
}
}
} catch (java.io.IOException e) {
throw new DBException(e);
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
if ("false"
.equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
usePageFilter = false;
}
columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
try {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
connection.getTable(tName).getTableDescriptor();
}
} catch (IOException e) {
throw new DBException(e);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Before
public void setup() throws DBException {
orientDBClient = new OrientDBClient();
Properties p = new Properties();
// TODO: Extract the property names into final variables in OrientDBClient
p.setProperty("orientdb.url", TEST_DB_URL);
orientDBClient.setProperties(p);
orientDBClient.init();
orientDBConnection = new ODatabaseDocumentTx(TEST_DB_URL).open("admin","admin");
orientDBDictionary = orientDBConnection.getMetadata().getIndexManager().getDictionary();
}
#location 11
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Before
public void setup() throws DBException {
orientDBClient = new OrientDBClient();
Properties p = new Properties();
// TODO: Extract the property names into final variables in OrientDBClient
p.setProperty("orientdb.url", TEST_DB_URL);
orientDBClient.setProperties(p);
orientDBClient.init();
orientDBDictionary = orientDBClient.db.getDictionary();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
String documentLink = getDocumentLink(this.databaseName, table, key);
Document document = getDocumentDefinition(key, values);
RequestOptions reqOptions = getRequestOptions(key);
if (reqOptions == null) {
reqOptions = new RequestOptions();
}
AccessCondition accessCondition = new AccessCondition();
accessCondition.setCondition(document.getETag());
accessCondition.setType(AccessConditionType.IfMatch);
reqOptions.setAccessCondition(accessCondition);
ResourceResponse<Document> updatedResource = null;
long startTime = System.nanoTime();
try {
updatedResource = AzureCosmosClient.client.replaceDocument(documentLink, document, reqOptions);
} catch (DocumentClientException e) {
if (!this.includeExceptionStackInLog) {
e = null;
}
LOGGER.error("Failed to update key {}", key, e);
return Status.ERROR;
} finally {
long elapsed = (System.nanoTime() - startTime) / NS_IN_US;
LOGGER.debug("Updated key {} in {}us - ActivityID: {}", key, elapsed,
updatedResource != null ? updatedResource.getActivityId() : NA_STRING);
}
return Status.OK;
}
#location 4
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
// Azure Cosmos does not have patch support. Until then we need to read
// the document, update in place, and then write back.
// This could actually be made more efficient by using a stored procedure
// and doing the read/modify write on the server side. Perhaps
// that will be a future improvement.
String documentLink = getDocumentLink(this.databaseName, table, key);
ResourceResponse<Document> updatedResource = null;
ResourceResponse<Document> readResouce = null;
RequestOptions reqOptions = null;
Document document = null;
try {
reqOptions = getRequestOptions(key);
readResouce = AzureCosmosClient.client.readDocument(documentLink, reqOptions);
document = readResouce.getResource();
} catch (DocumentClientException e) {
if (!this.includeExceptionStackInLog) {
e = null;
}
LOGGER.error("Failed to read key {} in collection {} in database {} during update operation",
key, table, this.databaseName, e);
return Status.ERROR;
}
// Update values
for (Entry<String, ByteIterator> entry : values.entrySet()) {
document.set(entry.getKey(), entry.getValue().toString());
}
AccessCondition accessCondition = new AccessCondition();
accessCondition.setCondition(document.getETag());
accessCondition.setType(AccessConditionType.IfMatch);
reqOptions.setAccessCondition(accessCondition);
try {
updatedResource = AzureCosmosClient.client.replaceDocument(documentLink, document, reqOptions);
} catch (DocumentClientException e) {
if (!this.includeExceptionStackInLog) {
e = null;
}
LOGGER.error("Failed to update key {}", key, e);
return Status.ERROR;
}
return Status.OK;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
this.currentTable = this.connection.getTable(tName);
// suggestions from
// http://ryantwopointoh.blogspot.com/2009/01/
// performance-of-hbase-importing.html
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = this.connection.getBufferedMutator(p);
}
}
#location 10
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public boolean doTransaction(DB db, Object threadstate) {
switch (operationchooser.nextString()) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
#location 3
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public int insert(String table, String key,
HashMap<String, ByteIterator> values) {
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document toInsert = new Document("_id", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
toInsert.put(entry.getKey(), entry.getValue().toArray());
}
bulkInserts.add(toInsert);
if (bulkInserts.size() == batchSize) {
collection.withWriteConcern(writeConcern)
.insertMany(bulkInserts, INSERT_MANY_OPTIONS);
bulkInserts.clear();
}
return 0;
}
catch (Exception e) {
System.err.println("Exception while trying bulk insert with "
+ bulkInserts.size());
e.printStackTrace();
return 1;
}
}
#location 14
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public int insert(String table, String key,
HashMap<String, ByteIterator> values) {
try {
MongoCollection<Document> collection = database
.getCollection(table);
Document toInsert = new Document("_id", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
toInsert.put(entry.getKey(), entry.getValue().toArray());
}
bulkInserts.add(toInsert);
if (bulkInserts.size() == batchSize) {
collection.insertMany(bulkInserts, INSERT_MANY_OPTIONS);
bulkInserts.clear();
}
return 0;
}
catch (Exception e) {
System.err.println("Exception while trying bulk insert with "
+ bulkInserts.size());
e.printStackTrace();
return 1;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
synchronized (CONNECTION_LOCK) {
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
}
#location 5
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
List<Document> documents;
FeedResponse<Document> feedResponse = null;
try {
feedResponse = AzureCosmosClient.client.queryDocuments(getDocumentCollectionLink(this.databaseName, table),
new SqlQuerySpec(queryText,
new SqlParameterCollection(new SqlParameter("@recordcount", recordcount),
new SqlParameter("@startkey", startkey))),
getFeedOptions(startkey));
documents = feedResponse.getQueryIterable().toList();
} catch (Exception e) {
if (!this.includeExceptionStackInLog) {
e = null;
}
LOGGER.error("Failed to scan with startKey={}, recordCount={}", startkey, recordcount, e);
return Status.ERROR;
}
if (documents != null) {
for (Document document : documents) {
result.add(this.extractResult(document));
}
}
return Status.OK;
}
#location 11
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
List<Document> documents;
FeedResponse<Document> feedResponse = null;
try {
FeedOptions feedOptions = new FeedOptions();
feedOptions.setEnableCrossPartitionQuery(true);
feedOptions.setMaxDegreeOfParallelism(this.maxDegreeOfParallelismForQuery);
feedResponse = AzureCosmosClient.client.queryDocuments(getDocumentCollectionLink(this.databaseName, table),
new SqlQuerySpec("SELECT TOP @recordcount * FROM root r WHERE r.id >= @startkey",
new SqlParameterCollection(new SqlParameter("@recordcount", recordcount),
new SqlParameter("@startkey", startkey))),
feedOptions);
documents = feedResponse.getQueryIterable().toList();
} catch (Exception e) {
if (!this.includeExceptionStackInLog) {
e = null;
}
LOGGER.error("Failed to scan with startKey={}, recordCount={}", startkey, recordcount, e);
return Status.ERROR;
}
if (documents != null) {
for (Document document : documents) {
result.add(this.extractResult(document));
}
}
return Status.OK;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void writeTo( Writer writer, WriterConfig config ) throws IOException {
WritingBuffer buffer = new WritingBuffer( writer, 128 );
write( config == null ? new JsonWriter( buffer ) : config.createWriter( buffer ) );
buffer.flush();
}
#location 4
#vulnerability type RESOURCE_LEAK
|
#fixed code
public void writeTo( Writer writer, WriterConfig config ) throws IOException {
if( writer == null ) {
throw new NullPointerException( "writer is null" );
}
if( config == null ) {
throw new NullPointerException( "config is null" );
}
WritingBuffer buffer = new WritingBuffer( writer, 128 );
write( config.createWriter( buffer ) );
buffer.flush();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void main(String[] args) throws IOException {
final InputReader in = new InputReader(System.in);
V = in.readInt();
final int Q = in.readInt();
adj = new ArrayList[V + 1];
for (int i = 1; i <= V; i++) {
adj[i] = new ArrayList<>();
}
val = new int[V + 1];
for (int i = 1; i <= V; i++) {
val[i] = in.readInt();
}
final Map<Integer, Integer> map = new HashMap<>();
for (int i = 1; i <= V; i++) {
if (!map.containsKey(val[i])) {
map.put(val[i], map.size());
}
val[i] = map.get(val[i]);
}
final int currVal[] = new int[V + 1];
System.arraycopy(val, 0, currVal, 0, V + 1);
final int edges = V - 1;
for (int i = 0; i < edges; i++) {
final int u = in.readInt();
final int v = in.readInt();
adj[u].add(v);
adj[v].add(u);
}
start = new int[V + 1];
end = new int[V + 1];
final int[] eulerTour = new int[2 * (V + 1)];
level = new int[V + 1];
marked = new boolean[V + 1];
DP = new int[log(V) + 1][V + 1];
parent = new int[V + 1];
final int block[] = new int[2 * (V + 1)];
dfs(1, 0, 0, eulerTour);
binaryLift();
int numberOfQueries = 0, numberOfUpdates = 0;
final Query queries[] = new Query[MAX];
final Update updates[] = new Update[MAX];
for (int i = 0; i < Q; i++) {
if (in.readInt() == 1) { // Query
final int u = in.readInt();
final int v = in.readInt();
final Query q;
if (end[u] < start[v]) // Cousin Nodes
{
q = new Query(end[u], start[v], numberOfUpdates, LCA(u, v), numberOfQueries);
} else if (start[u] > end[v]) {
q = new Query(end[v], start[u], numberOfUpdates, LCA(u, v), numberOfQueries);
} else // Ancestors
{
q = new Query(Math.min(start[u], start[v]),
Math.max(start[u], start[v]),
numberOfUpdates,
-1,
numberOfQueries);
}
queries[numberOfQueries++] = q;
} else {
final int idx = in.readInt();
int newVal = in.readInt();
if (!map.containsKey(newVal)) {
map.put(newVal, map.size());
}
newVal = map.get(newVal);
updates[numberOfUpdates++] = new Update(idx, newVal, currVal[idx]);
currVal[idx] = newVal;
}
}
final int BLOCK_SIZE = (int) (Math.pow(2 * V, 2.0 / 3.0) + 1);
for (int i = 0; i < block.length; i++) {
block[i] = i / BLOCK_SIZE;
}
Arrays.sort(queries, 0, numberOfQueries, (o1, o2) -> {
if (block[o1.L] != block[o2.L]) {
return block[o1.L] - block[o2.L];
} else if (block[o1.R] != block[o2.R]) {
return block[o1.R] - block[o2.R];
} else {
return o1.updatesTillNow - o2.updatesTillNow;
}
});
final int ans[] = new int[numberOfQueries];
int moLeft = -1, moRight = -1;
int currentUpdateCount = 0;
final int[] freq = new int[map.size()];
for (int i = 0; i < numberOfQueries; i++) {
final Query query = queries[i];
while (currentUpdateCount < query.updatesTillNow) {
final Update update = updates[currentUpdateCount];
update(update.idx, update.newVal, freq);
currentUpdateCount++;
}
while (currentUpdateCount > query.updatesTillNow) {
currentUpdateCount--;
final Update update = updates[currentUpdateCount];
update(update.idx, update.prevVal, freq);
}
while (moLeft < query.L - 1) {
moLeft++;
visit(eulerTour[moLeft], freq);
}
while (moLeft >= query.L) {
visit(eulerTour[moLeft], freq);
moLeft--;
}
while (moRight < query.R) {
moRight++;
visit(eulerTour[moRight], freq);
}
while (moRight > query.R) {
visit(eulerTour[moRight], freq);
moRight--;
}
if (query.LCA != -1) {
visit(query.LCA, freq);
}
ans[query.id] = distinctCount;
if (query.LCA != -1) {
visit(query.LCA, freq);
}
}
final StringBuilder stringBuilder = new StringBuilder();
for (final int a : ans) {
stringBuilder.append(a).append('\n');
}
System.out.println(stringBuilder);
}
#location 18
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public static void main(String[] args) throws IOException {
final InputReader in = new InputReader(System.in);
V = in.readInt();
final int Q = in.readInt();
adj = new ArrayList[V + 1];
for (int i = 1; i <= V; i++) {
adj[i] = new ArrayList<>();
}
vertices = new int[V + 1];
for (int i = 1; i <= V; i++) {
vertices[i] = in.readInt();
}
final Map<Integer, Integer> map = new HashMap<>();
for (int i = 1; i <= V; i++) {
map.putIfAbsent(vertices[i], map.size());
vertices[i] = map.get(vertices[i]);
}
final int verticesCopy[] = new int[V + 1];
System.arraycopy(vertices, 0, verticesCopy, 0, V + 1);
final int edges = V - 1;
for (int i = 0; i < edges; i++) {
final int u = in.readInt();
final int v = in.readInt();
adj[u].add(v);
adj[v].add(u);
}
start = new int[V + 1];
end = new int[V + 1];
final int[] eulerTour = new int[2 * (V + 1)];
level = new int[V + 1];
marked = new boolean[V + 1];
DP = new int[log(V) + 1][V + 1];
parent = new int[V + 1];
dfs(1, 0, 0, eulerTour);
findAncestorsAtEachLevel();
int numberOfQueries = 0, numberOfUpdates = 0;
final Query queries[] = new Query[Q];
final Update updates[] = new Update[Q];
for (int i = 0; i < Q; i++) {
if (in.readInt() == 1) {
final int u = in.readInt();
final int v = in.readInt();
final Query q;
if (start[v] > end[u]) {
q = new Query(end[u], start[v], numberOfUpdates, LCA(u, v), numberOfQueries);
} else if (start[u] > end[v]) {
q = new Query(end[v], start[u], numberOfUpdates, LCA(u, v), numberOfQueries);
} else {
q = new Query(Math.min(start[u], start[v]),
Math.max(start[u], start[v]),
numberOfUpdates,
-1,
numberOfQueries);
}
queries[numberOfQueries++] = q;
} else {
final int idx = in.readInt();
int newVal = in.readInt();
map.putIfAbsent(newVal, map.size());
newVal = map.get(newVal);
updates[numberOfUpdates++] = new Update(idx, newVal, verticesCopy[idx]);
verticesCopy[idx] = newVal;
}
}
final int BLOCK_SIZE = (int) (Math.pow(2 * V, 2.0 / 3.0) + 1);
Arrays.sort(queries, 0, numberOfQueries, (first, second) -> {
if (first.L / BLOCK_SIZE != second.L / BLOCK_SIZE) {
return first.L / BLOCK_SIZE - second.L / BLOCK_SIZE;
} else if (first.R / BLOCK_SIZE != second.R / BLOCK_SIZE) {
return first.R / BLOCK_SIZE - second.R / BLOCK_SIZE;
} else {
return first.updatesTillNow - second.updatesTillNow;
}
});
final int ans[] = new int[numberOfQueries];
int moLeft = -1, moRight = -1;
int currentUpdateCount = 0;
final int[] frequency = new int[map.size()];
for (int i = 0; i < numberOfQueries; i++) {
final Query query = queries[i];
while (currentUpdateCount < query.updatesTillNow) {
final Update update = updates[currentUpdateCount++];
update(update.idx, update.newValue, frequency);
}
while (currentUpdateCount > query.updatesTillNow) {
final Update update = updates[--currentUpdateCount];
update(update.idx, update.previousValue, frequency);
}
while (moLeft < query.L - 1) {
moLeft++;
visit(eulerTour[moLeft], frequency);
}
while (moLeft >= query.L) {
visit(eulerTour[moLeft], frequency);
moLeft--;
}
while (moRight < query.R) {
moRight++;
visit(eulerTour[moRight], frequency);
}
while (moRight > query.R) {
visit(eulerTour[moRight], frequency);
moRight--;
}
if (query.LCA != -1) {
visit(query.LCA, frequency);
}
ans[query.id] = distinctCount;
if (query.LCA != -1) {
visit(query.LCA, frequency);
}
}
final StringBuilder stringBuilder = new StringBuilder();
for (final int a : ans) {
stringBuilder.append(a).append('\n');
}
System.out.println(stringBuilder);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public long query(int end) {
end--;
final int blockIndex = end / sqrt;
final Map<Integer, Integer> map = new HashMap<>();
final int[][] elements = new int[sqrt][2];
int count = 0;
final int endIndex = end % sqrt;
for (int i = 0; i <= endIndex; i++) {
if (!map.containsKey(a[blockIndex * sqrt + i])) {
map.put(a[blockIndex * sqrt + i], count);
elements[count][0] = a[blockIndex * sqrt + i];
count++;
}
elements[map.get(a[blockIndex * sqrt + i])][1]++;
}
BigInteger result = blockIndex > 0 ? blocks[blockIndex - 1] : BigInteger.ONE;
for (final Map.Entry<Integer, Integer> entry : map.entrySet()) {
final int previous = blockIndex > 0 ? frequencies[blockIndex - 1][entry.getKey()] : 0;
result = result
.multiply(invPowers[previous])
.mod(modo)
.multiply(powers[previous + elements[entry.getValue()][1]])
.mod(modo);
}
return result.longValue();
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public long query(int end) {
end--;
final int blockIndex = end / sqrt;
final int freq[] = new int[powers.length];
final int queue[] = new int[sqrt];
int count = 0;
final int endIndex = end % sqrt;
for (int i = 0; i <= endIndex; i++) {
if (freq[a[blockIndex * sqrt + i]] == 0) {
queue[count++] = a[blockIndex * sqrt + i];
}
freq[a[blockIndex * sqrt + i]]++;
}
long result = blockIndex > 0 ? blocks[blockIndex - 1] : 1;
for (int i = 0; i < count; i++) {
final int previous = blockIndex > 0 ? frequencies[blockIndex - 1][queue[i]] : 0;
result = ((((result
* invPowers[previous])
% mod)
* powers[previous + freq[queue[i]]])
% mod);
}
return result;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void main(String[] args) throws IOException {
final InputReader in = new InputReader(System.in);
int qSZ = 0, uSZ = 0;
V = in.readInt();
int Q = in.readInt();
int E = V - 1;
Query queries[] = new Query[MAX];
Update updates[] = new Update[MAX];
map = new HashMap<>(); // Used to compress the keys
adj = new ArrayList[V + 1];
for (int i = 1; i <= V; i++) {
adj[i] = new ArrayList<>();
}
val = new int[V + 1];
for (int i = 1; i <= V; i++) {
val[i] = in.readInt();
}
for (int i = 1; i <= V; i++) {
if (!map.containsKey(val[i])) {
map.put(val[i], map.size());
}
val[i] = map.get(val[i]);
}
int currVal[] = new int[V + 1];
System.arraycopy(val, 0, currVal, 0, V + 1);
while (E-- > 0) {
final int u = in.readInt();
final int v = in.readInt();
adj[u].add(v);
adj[v].add(u);
}
start = new int[V + 1];
end = new int[V + 1];
eulerTour = new int[2 * (V + 1)];
level = new int[V + 1];
marked = new boolean[V + 1];
DP = new int[log(V) + 1][V + 1];
parent = new int[V + 1];
blockCache = new int[2 * (V + 1)];
dfs(1, 0, 0);
binaryLift();
while (Q-- > 0) {
if (in.readInt() == 1) { // Query
final int u = in.readInt();
final int v = in.readInt();
final Query q;
if (end[u] < start[v]) // Cousin Nodes
{
q = new Query(end[u], start[v], uSZ, LCA(u, v), qSZ);
} else if (start[u] > end[v]) {
q = new Query(end[v], start[u], uSZ, LCA(u, v), qSZ);
} else // Ancestors
{
q = new Query(Math.min(start[u], start[v]), Math.max(start[u], start[v]), uSZ, -1, qSZ);
}
queries[qSZ++] = q;
} else {
final int idx = in.readInt();
int newVal = in.readInt();
if (!map.containsKey(newVal)) {
map.put(newVal, map.size());
}
newVal = map.get(newVal);
updates[uSZ++] = new Update(idx, newVal, currVal[idx]);
currVal[idx] = newVal;
}
}
freq = new int[map.size()];
BLOCK_SIZE = (int) (Math.pow(2 * V, 2.0 / 3.0) + 1);
for (int i = 0; i < blockCache.length; i++) {
blockCache[i] = i / BLOCK_SIZE;
}
Arrays.sort(queries, 0, qSZ, new MoComparator());
final int ans[] = new int[qSZ];
int moLeft = -1, moRight = -1;
int currUpd = 0;
for (int i = 0; i < qSZ; i++) {
final Query q = queries[i];
while (currUpd < q.numUpdatesLess) {
final Update u = updates[currUpd];
update(u.idx, u.newVal);
currUpd++;
}
while (currUpd > q.numUpdatesLess) {
final Update u = updates[currUpd - 1];
update(u.idx, u.prevVal);
currUpd--;
}
while (moLeft < q.L - 1) {
moLeft++;
visit(eulerTour[moLeft]);
}
while (moLeft >= q.L) {
visit(eulerTour[moLeft]);
moLeft--;
}
while (moRight < q.R) {
moRight++;
visit(eulerTour[moRight]);
}
while (moRight > q.R) {
visit(eulerTour[moRight]);
moRight--;
}
if (q.LCA != -1) {
visit(q.LCA);
}
ans[q.id] = distinctCount;
if (q.LCA != -1) {
visit(q.LCA);
}
}
for (final int a : ans) {
System.out.println(a);
}
}
#location 23
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public static void main(String[] args) throws IOException {
final InputReader in = new InputReader(System.in);
V = in.readInt();
final int Q = in.readInt();
final Query queries[] = new Query[MAX];
final Update updates[] = new Update[MAX];
final Map<Integer, Integer> map = new HashMap<>(); // Used to compress the keys
adj = new ArrayList[V + 1];
for (int i = 1; i <= V; i++) {
adj[i] = new ArrayList<>();
}
val = new int[V + 1];
for (int i = 1; i <= V; i++) {
val[i] = in.readInt();
}
for (int i = 1; i <= V; i++) {
if (!map.containsKey(val[i])) {
map.put(val[i], map.size());
}
val[i] = map.get(val[i]);
}
final int currVal[] = new int[V + 1];
System.arraycopy(val, 0, currVal, 0, V + 1);
final int edges = V - 1;
for (int i = 0; i < edges; i++) {
final int u = in.readInt();
final int v = in.readInt();
adj[u].add(v);
adj[v].add(u);
}
start = new int[V + 1];
end = new int[V + 1];
eulerTour = new int[2 * (V + 1)];
level = new int[V + 1];
marked = new boolean[V + 1];
DP = new int[log(V) + 1][V + 1];
parent = new int[V + 1];
final int block[] = new int[2 * (V + 1)];
dfs(1, 0, 0);
binaryLift();
int numberOfQueries = 0, numberOfUpdates = 0;
for (int i = 0; i < Q; i++) {
if (in.readInt() == 1) { // Query
final int u = in.readInt();
final int v = in.readInt();
final Query q;
if (end[u] < start[v]) // Cousin Nodes
{
q = new Query(end[u], start[v], numberOfUpdates, LCA(u, v), numberOfQueries);
} else if (start[u] > end[v]) {
q = new Query(end[v], start[u], numberOfUpdates, LCA(u, v), numberOfQueries);
} else // Ancestors
{
q = new Query(Math.min(start[u], start[v]),
Math.max(start[u], start[v]),
numberOfUpdates,
-1,
numberOfQueries);
}
queries[numberOfQueries++] = q;
} else {
final int idx = in.readInt();
int newVal = in.readInt();
if (!map.containsKey(newVal)) {
map.put(newVal, map.size());
}
newVal = map.get(newVal);
updates[numberOfUpdates++] = new Update(idx, newVal, currVal[idx]);
currVal[idx] = newVal;
}
}
freq = new int[map.size()];
final int BLOCK_SIZE = (int) (Math.pow(2 * V, 2.0 / 3.0) + 1);
for (int i = 0; i < block.length; i++) {
block[i] = i / BLOCK_SIZE;
}
Arrays.sort(queries, 0, numberOfQueries, (o1, o2) -> {
if (block[o1.L] != block[o2.L]) {
return block[o1.L] - block[o2.L];
} else if (block[o1.R] != block[o2.R]) {
return block[o1.R] - block[o2.R];
} else {
return o1.updatesTillNow - o2.updatesTillNow;
}
});
final int ans[] = new int[numberOfQueries];
int moLeft = -1, moRight = -1;
int currentUpdateCount = 0;
for (int i = 0; i < numberOfQueries; i++) {
final Query query = queries[i];
while (currentUpdateCount < query.updatesTillNow) {
final Update update = updates[currentUpdateCount];
update(update.idx, update.newVal);
currentUpdateCount++;
}
while (currentUpdateCount > query.updatesTillNow) {
currentUpdateCount--;
final Update update = updates[currentUpdateCount];
update(update.idx, update.prevVal);
}
while (moLeft < query.L - 1) {
moLeft++;
visit(eulerTour[moLeft]);
}
while (moLeft >= query.L) {
visit(eulerTour[moLeft]);
moLeft--;
}
while (moRight < query.R) {
moRight++;
visit(eulerTour[moRight]);
}
while (moRight > query.R) {
visit(eulerTour[moRight]);
moRight--;
}
if (query.LCA != -1) {
visit(query.LCA);
}
ans[query.id] = distinctCount;
if (query.LCA != -1) {
visit(query.LCA);
}
}
final StringBuilder stringBuilder=new StringBuilder();
for (final int a : ans) {
stringBuilder.append(a).append('\n');
}
System.out.println(stringBuilder);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
@CacheEvict(value={"metaCaches","metaCache"},allEntries=true,beforeInvocation=true)
public void saveMeta(String type, String name, Integer mid) {
if (StringUtils.isNotBlank(type) && StringUtils.isNotBlank(name)){
MetaCond metaCond = new MetaCond();
metaCond.setName(name);
metaCond.setType(type);
List<MetaDomain> metas = metaDao.getMetasByCond(metaCond);
if (null == metas || metas.size() == 0){
MetaDomain metaDomain = new MetaDomain();
metaDomain.setName(name);
if (null != mid){
MetaDomain meta = metaDao.getMetaById(mid);
if (null != meta)
metaDomain.setMid(mid);
metaDao.updateMeta(metaDomain);
//更新原有的文章分类
contentService.updateCategory(meta.getName(), name);
} else {
metaDomain.setType(type);
metaDao.addMeta(metaDomain);
}
} else {
throw BusinessException.withErrorCode(ErrorConstant.Meta.META_IS_EXIST);
}
}
}
#location 19
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
@CacheEvict(value={"metaCaches","metaCache"},allEntries=true,beforeInvocation=true)
public void saveMeta(String type, String name, Integer mid) {
if (StringUtils.isNotBlank(type) && StringUtils.isNotBlank(name)){
MetaCond metaCond = new MetaCond();
metaCond.setName(name);
metaCond.setType(type);
List<MetaDomain> metas = metaDao.getMetasByCond(metaCond);
if (null == metas || metas.size() == 0){
MetaDomain metaDomain = new MetaDomain();
metaDomain.setName(name);
if (null != mid){
MetaDomain meta = metaDao.getMetaById(mid);
if (null != meta)
metaDomain.setMid(mid);
metaDao.updateMeta(metaDomain);
//更新原有的文章分类
if(meta !=null) {
contentService.updateCategory(meta.getName(), name);
}
} else {
metaDomain.setType(type);
metaDao.addMeta(metaDomain);
}
} else {
throw BusinessException.withErrorCode(ErrorConstant.Meta.META_IS_EXIST);
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String indexDataset(Dataset dataset) {
logger.info("indexing dataset " + dataset.getId());
/**
* @todo should we use solrDocIdentifierDataset or
* IndexableObject.IndexableTypes.DATASET.getName() + "_" ?
*/
// String solrIdPublished = solrDocIdentifierDataset + dataset.getId();
String solrIdPublished = determinePublishedDatasetSolrDocId(dataset);
String solrIdDraftDataset = IndexableObject.IndexableTypes.DATASET.getName() + "_" + dataset.getId() + IndexableDataset.DatasetState.WORKING_COPY.getSuffix();
// String solrIdDeaccessioned = IndexableObject.IndexableTypes.DATASET.getName() + "_" + dataset.getId() + IndexableDataset.DatasetState.DEACCESSIONED.getSuffix();
String solrIdDeaccessioned = determineDeaccesionedDatasetId(dataset);
StringBuilder debug = new StringBuilder();
debug.append("\ndebug:\n");
int numReleasedVersions = 0;
List<DatasetVersion> versions = dataset.getVersions();
for (DatasetVersion datasetVersion : versions) {
Long versionDatabaseId = datasetVersion.getId();
String versionTitle = datasetVersion.getTitle();
String semanticVersion = datasetVersion.getSemanticVersion();
DatasetVersion.VersionState versionState = datasetVersion.getVersionState();
if (versionState.equals(DatasetVersion.VersionState.RELEASED)) {
/**
* @todo for performance, should just query this rather than
* iterating. Would need a new SQL query/method
*/
numReleasedVersions += 1;
}
debug.append("version found with database id " + versionDatabaseId + "\n");
debug.append("- title: " + versionTitle + "\n");
debug.append("- semanticVersion-VersionState: " + semanticVersion + "-" + versionState + "\n");
List<FileMetadata> fileMetadatas = datasetVersion.getFileMetadatas();
List<String> fileInfo = new ArrayList<>();
for (FileMetadata fileMetadata : fileMetadatas) {
fileInfo.add(fileMetadata.getDataFile().getId() + ":" + fileMetadata.getLabel());
}
int numFiles = 0;
if (fileMetadatas != null) {
numFiles = fileMetadatas.size();
}
debug.append("- files: " + numFiles + " " + fileInfo.toString() + "\n");
}
DatasetVersion latestVersion = dataset.getLatestVersion();
String latestVersionStateString = latestVersion.getVersionState().name();
DatasetVersion.VersionState latestVersionState = latestVersion.getVersionState();
DatasetVersion releasedVersion = dataset.getReleasedVersion();
if (releasedVersion.getVersionState().equals(DatasetVersion.VersionState.DEACCESSIONED)) {
logger.severe("WARNING: called dataset.getReleasedVersion() but version returned was deaccessioned. Look out for strange indexing results.");
}
Map<DatasetVersion.VersionState, Boolean> desiredCards = new LinkedHashMap<>();
/**
* @todo refactor all of this below and have a single method that takes
* the map of desired cards (which correspond to Solr documents) as one
* of the arguments and does all the operations necessary to achieve the
* desired state.
*/
StringBuilder results = new StringBuilder();
if (numReleasedVersions == 0) {
results.append("No published version, nothing will be indexed as ")
.append(solrIdPublished).append("\n");
if (latestVersionState.equals(DatasetVersion.VersionState.DRAFT)) {
desiredCards.put(DatasetVersion.VersionState.DRAFT, true);
IndexableDataset indexableDraftVersion = new IndexableDataset(latestVersion);
String indexDraftResult = addOrUpdateDataset(indexableDraftVersion);
results.append("The latest version is a working copy (latestVersionState: ")
.append(latestVersionStateString).append(") and indexing was attempted for ")
.append(solrIdDraftDataset).append(" (limited discoverability). Result: ")
.append(indexDraftResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, false);
String deleteDeaccessionedResult = removeDeaccessioned(dataset);
results.append("Draft exists, no need for deaccessioned version. Deletion attempted for ")
.append(solrIdDeaccessioned).append(" (and files). Result: ").append(deleteDeaccessionedResult);
desiredCards.put(DatasetVersion.VersionState.RELEASED, false);
/**
* @todo delete published?
*/
/**
* Desired state for existence of cards: {DRAFT=true,
* DEACCESSIONED=false, RELEASED=false}
*
* No published version, nothing will be indexed as dataset_17
*
* The latest version is a working copy (latestVersionState:
* DRAFT) and indexing was attempted for dataset_17_draft
* (limited discoverability). Result: indexed dataset 17 as
* dataset_17_draft. filesIndexed: [datafile_18_draft]
*
* Draft exists, no need for deaccessioned version. Deletion
* attempted for dataset_17_deaccessioned (and files). Result:
* Attempted to delete dataset_17_deaccessioned from Solr index.
* updateReponse was:
* {responseHeader={status=0,QTime=0}}Attempted to delete
* datafile_18_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else if (latestVersionState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, true);
IndexableDataset indexableDeaccessionedVersion = new IndexableDataset(latestVersion);
String indexDeaccessionedVersionResult = addOrUpdateDataset(indexableDeaccessionedVersion);
results.append("No draft version. Attempting to index as deaccessioned. Result: ").append(indexDeaccessionedVersionResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.RELEASED, false);
String deletePublishedResults = removePublished(dataset);
results.append("No published version. Attempting to delete traces of published version from index. Result: ").append(deletePublishedResults);
desiredCards.put(DatasetVersion.VersionState.DRAFT, false);
/**
* @todo delete drafts?
*/
/**
* Desired state for existence of cards: {DEACCESSIONED=true,
* RELEASED=false, DRAFT=false}
*
* No published version, nothing will be indexed as dataset_17
*
* No draft version. Attempting to index as deaccessioned.
* Result: indexed dataset 17 as dataset_17_deaccessioned.
* filesIndexed: []
*
* No published version. Attempting to delete traces of
* published version from index. Result: Attempted to delete
* dataset_17 from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=1}}Attempted to delete
* datafile_18 from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else {
return "No-op. Unexpected condition reached: No released version and latest version is neither draft nor deaccesioned";
}
} else if (numReleasedVersions > 0) {
results.append("Released versions found: ").append(numReleasedVersions)
.append(". Will attempt to index as ").append(solrIdPublished).append(" (discoverable by anonymous)\n");
if (latestVersionState.equals(DatasetVersion.VersionState.RELEASED)
|| latestVersionState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
desiredCards.put(DatasetVersion.VersionState.RELEASED, true);
IndexableDataset indexableReleasedVersion = new IndexableDataset(releasedVersion);
String indexReleasedVersionResult = addOrUpdateDataset(indexableReleasedVersion);
results.append("Attempted to index " + solrIdPublished).append(". Result: ").append(indexReleasedVersionResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.DRAFT, false);
List<String> solrDocIdsForDraftFilesToDelete = findSolrDocIdsForDraftFilesToDelete(dataset);
String deleteDraftDatasetVersionResult = removeSolrDocFromIndex(solrIdDraftDataset);
StringBuilder deleteDraftFilesResults = new StringBuilder();
for (String doomed : solrDocIdsForDraftFilesToDelete) {
String result = removeSolrDocFromIndex(doomed);
deleteDraftFilesResults.append(result);
}
results.append("The latest version is published. Attempting to delete drafts. Result: ")
.append(deleteDraftDatasetVersionResult).append(deleteDraftFilesResults).append("\n");
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, false);
String deleteDeaccessionedResult = removeDeaccessioned(dataset);
results.append("No need for deaccessioned version. Deletion attempted for ")
.append(solrIdDeaccessioned).append(". Result: ").append(deleteDeaccessionedResult);
/**
* Desired state for existence of cards: {RELEASED=true,
* DRAFT=false, DEACCESSIONED=false}
*
* Released versions found: 1. Will attempt to index as
* dataset_17 (discoverable by anonymous)
*
* Attempted to index dataset_17. Result: indexed dataset 17 as
* dataset_17. filesIndexed: [datafile_18]
*
* The latest version is published. Attempting to delete drafts.
* Result: Attempted to delete dataset_17_draft from Solr index.
* updateReponse was: {responseHeader={status=0,QTime=1}}
*
* No need for deaccessioned version. Deletion attempted for
* dataset_17_deaccessioned. Result: Attempted to delete
* dataset_17_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=1}}Attempted to delete
* datafile_18_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else if (latestVersionState.equals(DatasetVersion.VersionState.DRAFT)) {
IndexableDataset indexableDraftVersion = new IndexableDataset(latestVersion);
desiredCards.put(DatasetVersion.VersionState.DRAFT, true);
String indexDraftResult = addOrUpdateDataset(indexableDraftVersion);
results.append("The latest version is a working copy (latestVersionState: ")
.append(latestVersionStateString).append(") and will be indexed as ")
.append(solrIdDraftDataset).append(" (limited visibility). Result: ").append(indexDraftResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.RELEASED, true);
IndexableDataset indexableReleasedVersion = new IndexableDataset(releasedVersion);
String indexReleasedVersionResult = addOrUpdateDataset(indexableReleasedVersion);
results.append("There is a published version we will attempt to index. Result: ").append(indexReleasedVersionResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, false);
String deleteDeaccessionedResult = removeDeaccessioned(dataset);
results.append("No need for deaccessioned version. Deletion attempted for ")
.append(solrIdDeaccessioned).append(". Result: ").append(deleteDeaccessionedResult);
/**
* Desired state for existence of cards: {DRAFT=true,
* RELEASED=true, DEACCESSIONED=false}
*
* Released versions found: 1. Will attempt to index as
* dataset_17 (discoverable by anonymous)
*
* The latest version is a working copy (latestVersionState:
* DRAFT) and will be indexed as dataset_17_draft (limited
* visibility). Result: indexed dataset 17 as dataset_17_draft.
* filesIndexed: [datafile_18_draft]
*
* There is a published version we will attempt to index.
* Result: indexed dataset 17 as dataset_17. filesIndexed:
* [datafile_18]
*
* No need for deaccessioned version. Deletion attempted for
* dataset_17_deaccessioned. Result: Attempted to delete
* dataset_17_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=1}}Attempted to delete
* datafile_18_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else {
return "No-op. Unexpected condition reached: There is at least one published version but the latest version is neither published nor draft";
}
} else {
return "No-op. Unexpected condition reached: Negative number of released versions? Count was: " + numReleasedVersions;
}
}
#location 46
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String indexDataset(Dataset dataset) {
logger.info("indexing dataset " + dataset.getId());
/**
* @todo should we use solrDocIdentifierDataset or
* IndexableObject.IndexableTypes.DATASET.getName() + "_" ?
*/
// String solrIdPublished = solrDocIdentifierDataset + dataset.getId();
String solrIdPublished = determinePublishedDatasetSolrDocId(dataset);
String solrIdDraftDataset = IndexableObject.IndexableTypes.DATASET.getName() + "_" + dataset.getId() + IndexableDataset.DatasetState.WORKING_COPY.getSuffix();
// String solrIdDeaccessioned = IndexableObject.IndexableTypes.DATASET.getName() + "_" + dataset.getId() + IndexableDataset.DatasetState.DEACCESSIONED.getSuffix();
String solrIdDeaccessioned = determineDeaccesionedDatasetId(dataset);
StringBuilder debug = new StringBuilder();
debug.append("\ndebug:\n");
int numReleasedVersions = 0;
List<DatasetVersion> versions = dataset.getVersions();
for (DatasetVersion datasetVersion : versions) {
Long versionDatabaseId = datasetVersion.getId();
String versionTitle = datasetVersion.getTitle();
String semanticVersion = datasetVersion.getSemanticVersion();
DatasetVersion.VersionState versionState = datasetVersion.getVersionState();
if (versionState.equals(DatasetVersion.VersionState.RELEASED)) {
/**
* @todo for performance, should just query this rather than
* iterating. Would need a new SQL query/method
*/
numReleasedVersions += 1;
}
debug.append("version found with database id " + versionDatabaseId + "\n");
debug.append("- title: " + versionTitle + "\n");
debug.append("- semanticVersion-VersionState: " + semanticVersion + "-" + versionState + "\n");
List<FileMetadata> fileMetadatas = datasetVersion.getFileMetadatas();
List<String> fileInfo = new ArrayList<>();
for (FileMetadata fileMetadata : fileMetadatas) {
fileInfo.add(fileMetadata.getDataFile().getId() + ":" + fileMetadata.getLabel());
}
int numFiles = 0;
if (fileMetadatas != null) {
numFiles = fileMetadatas.size();
}
debug.append("- files: " + numFiles + " " + fileInfo.toString() + "\n");
}
DatasetVersion latestVersion = dataset.getLatestVersion();
String latestVersionStateString = latestVersion.getVersionState().name();
DatasetVersion.VersionState latestVersionState = latestVersion.getVersionState();
DatasetVersion releasedVersion = dataset.getReleasedVersion();
if (releasedVersion != null) {
if (releasedVersion.getVersionState().equals(DatasetVersion.VersionState.DEACCESSIONED)) {
DatasetVersion lookupAttempt2 = releasedVersion.getMostRecentlyReleasedVersion();
String message = "WARNING: called dataset.getReleasedVersion() but version returned was deaccessioned (database id "
+ releasedVersion.getId()
+ "). (releasedVersion.getMostRecentlyReleasedVersion() returns database id "
+ lookupAttempt2.getId() + " so that method may be better?). Look out for strange indexing results.";
logger.severe(message);
debug.append(message);
}
}
Map<DatasetVersion.VersionState, Boolean> desiredCards = new LinkedHashMap<>();
/**
* @todo refactor all of this below and have a single method that takes
* the map of desired cards (which correspond to Solr documents) as one
* of the arguments and does all the operations necessary to achieve the
* desired state.
*/
StringBuilder results = new StringBuilder();
if (numReleasedVersions == 0) {
results.append("No published version, nothing will be indexed as ")
.append(solrIdPublished).append("\n");
if (latestVersionState.equals(DatasetVersion.VersionState.DRAFT)) {
desiredCards.put(DatasetVersion.VersionState.DRAFT, true);
IndexableDataset indexableDraftVersion = new IndexableDataset(latestVersion);
String indexDraftResult = addOrUpdateDataset(indexableDraftVersion);
results.append("The latest version is a working copy (latestVersionState: ")
.append(latestVersionStateString).append(") and indexing was attempted for ")
.append(solrIdDraftDataset).append(" (limited discoverability). Result: ")
.append(indexDraftResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, false);
String deleteDeaccessionedResult = removeDeaccessioned(dataset);
results.append("Draft exists, no need for deaccessioned version. Deletion attempted for ")
.append(solrIdDeaccessioned).append(" (and files). Result: ").append(deleteDeaccessionedResult);
desiredCards.put(DatasetVersion.VersionState.RELEASED, false);
/**
* @todo delete published?
*/
/**
* Desired state for existence of cards: {DRAFT=true,
* DEACCESSIONED=false, RELEASED=false}
*
* No published version, nothing will be indexed as dataset_17
*
* The latest version is a working copy (latestVersionState:
* DRAFT) and indexing was attempted for dataset_17_draft
* (limited discoverability). Result: indexed dataset 17 as
* dataset_17_draft. filesIndexed: [datafile_18_draft]
*
* Draft exists, no need for deaccessioned version. Deletion
* attempted for dataset_17_deaccessioned (and files). Result:
* Attempted to delete dataset_17_deaccessioned from Solr index.
* updateReponse was:
* {responseHeader={status=0,QTime=0}}Attempted to delete
* datafile_18_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else if (latestVersionState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, true);
IndexableDataset indexableDeaccessionedVersion = new IndexableDataset(latestVersion);
String indexDeaccessionedVersionResult = addOrUpdateDataset(indexableDeaccessionedVersion);
results.append("No draft version. Attempting to index as deaccessioned. Result: ").append(indexDeaccessionedVersionResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.RELEASED, false);
String deletePublishedResults = removePublished(dataset);
results.append("No published version. Attempting to delete traces of published version from index. Result: ").append(deletePublishedResults);
desiredCards.put(DatasetVersion.VersionState.DRAFT, false);
/**
* @todo delete drafts?
*/
/**
* Desired state for existence of cards: {DEACCESSIONED=true,
* RELEASED=false, DRAFT=false}
*
* No published version, nothing will be indexed as dataset_17
*
* No draft version. Attempting to index as deaccessioned.
* Result: indexed dataset 17 as dataset_17_deaccessioned.
* filesIndexed: []
*
* No published version. Attempting to delete traces of
* published version from index. Result: Attempted to delete
* dataset_17 from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=1}}Attempted to delete
* datafile_18 from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else {
return "No-op. Unexpected condition reached: No released version and latest version is neither draft nor deaccesioned";
}
} else if (numReleasedVersions > 0) {
results.append("Released versions found: ").append(numReleasedVersions)
.append(". Will attempt to index as ").append(solrIdPublished).append(" (discoverable by anonymous)\n");
if (latestVersionState.equals(DatasetVersion.VersionState.RELEASED)
|| latestVersionState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
desiredCards.put(DatasetVersion.VersionState.RELEASED, true);
IndexableDataset indexableReleasedVersion = new IndexableDataset(releasedVersion);
String indexReleasedVersionResult = addOrUpdateDataset(indexableReleasedVersion);
results.append("Attempted to index " + solrIdPublished).append(". Result: ").append(indexReleasedVersionResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.DRAFT, false);
List<String> solrDocIdsForDraftFilesToDelete = findSolrDocIdsForDraftFilesToDelete(dataset);
String deleteDraftDatasetVersionResult = removeSolrDocFromIndex(solrIdDraftDataset);
StringBuilder deleteDraftFilesResults = new StringBuilder();
for (String doomed : solrDocIdsForDraftFilesToDelete) {
String result = removeSolrDocFromIndex(doomed);
deleteDraftFilesResults.append(result);
}
results.append("The latest version is published. Attempting to delete drafts. Result: ")
.append(deleteDraftDatasetVersionResult).append(deleteDraftFilesResults).append("\n");
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, false);
String deleteDeaccessionedResult = removeDeaccessioned(dataset);
results.append("No need for deaccessioned version. Deletion attempted for ")
.append(solrIdDeaccessioned).append(". Result: ").append(deleteDeaccessionedResult);
/**
* Desired state for existence of cards: {RELEASED=true,
* DRAFT=false, DEACCESSIONED=false}
*
* Released versions found: 1. Will attempt to index as
* dataset_17 (discoverable by anonymous)
*
* Attempted to index dataset_17. Result: indexed dataset 17 as
* dataset_17. filesIndexed: [datafile_18]
*
* The latest version is published. Attempting to delete drafts.
* Result: Attempted to delete dataset_17_draft from Solr index.
* updateReponse was: {responseHeader={status=0,QTime=1}}
*
* No need for deaccessioned version. Deletion attempted for
* dataset_17_deaccessioned. Result: Attempted to delete
* dataset_17_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=1}}Attempted to delete
* datafile_18_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else if (latestVersionState.equals(DatasetVersion.VersionState.DRAFT)) {
IndexableDataset indexableDraftVersion = new IndexableDataset(latestVersion);
desiredCards.put(DatasetVersion.VersionState.DRAFT, true);
String indexDraftResult = addOrUpdateDataset(indexableDraftVersion);
results.append("The latest version is a working copy (latestVersionState: ")
.append(latestVersionStateString).append(") and will be indexed as ")
.append(solrIdDraftDataset).append(" (limited visibility). Result: ").append(indexDraftResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.RELEASED, true);
IndexableDataset indexableReleasedVersion = new IndexableDataset(releasedVersion);
String indexReleasedVersionResult = addOrUpdateDataset(indexableReleasedVersion);
results.append("There is a published version we will attempt to index. Result: ").append(indexReleasedVersionResult).append("\n");
desiredCards.put(DatasetVersion.VersionState.DEACCESSIONED, false);
String deleteDeaccessionedResult = removeDeaccessioned(dataset);
results.append("No need for deaccessioned version. Deletion attempted for ")
.append(solrIdDeaccessioned).append(". Result: ").append(deleteDeaccessionedResult);
/**
* Desired state for existence of cards: {DRAFT=true,
* RELEASED=true, DEACCESSIONED=false}
*
* Released versions found: 1. Will attempt to index as
* dataset_17 (discoverable by anonymous)
*
* The latest version is a working copy (latestVersionState:
* DRAFT) and will be indexed as dataset_17_draft (limited
* visibility). Result: indexed dataset 17 as dataset_17_draft.
* filesIndexed: [datafile_18_draft]
*
* There is a published version we will attempt to index.
* Result: indexed dataset 17 as dataset_17. filesIndexed:
* [datafile_18]
*
* No need for deaccessioned version. Deletion attempted for
* dataset_17_deaccessioned. Result: Attempted to delete
* dataset_17_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=1}}Attempted to delete
* datafile_18_deaccessioned from Solr index. updateReponse was:
* {responseHeader={status=0,QTime=0}}
*/
String result = getDesiredCardState(desiredCards) + results.toString() + debug.toString();
logger.info(result);
return result;
} else {
return "No-op. Unexpected condition reached: There is at least one published version but the latest version is neither published nor draft";
}
} else {
return "No-op. Unexpected condition reached: Negative number of released versions? Count was: " + numReleasedVersions;
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String save() {
Command<Dataverse> cmd = null;
if ( editMode == EditMode.INFO ) {
dataverse.setOwner(ownerId != null ? dataverseService.find(ownerId) : null);
cmd = new CreateDataverseCommand(dataverse, session.getUser());
} else if ( editMode == EditMode.SETUP ) {
cmd = new UpdateDataverseCommand(dataverse, facets.getTarget(), session.getUser());
}
try {
dataverse = commandEngine.submit(cmd);
userNotificationService.sendNotification(session.getUser(), dataverse.getCreateDate(), Type.CREATEDV, dataverse.getId());
editMode = null;
} catch (CommandException ex) {
JH.addMessage(FacesMessage.SEVERITY_ERROR, ex.getMessage());
return null;
}
return "/dataverse.xhtml?id=" + dataverse.getId() +"&faces-redirect=true";
}
#location 13
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String save() {
Command<Dataverse> cmd = null;
//TODO change to Create - for now the page is expecting INFO instead.
if (dataverse.getId() == null){
dataverse.setOwner(ownerId != null ? dataverseService.find(ownerId) : null);
cmd = new CreateDataverseCommand(dataverse, session.getUser());
} else {
cmd = new UpdateDataverseCommand(dataverse, facets.getTarget(), session.getUser());
}
try {
dataverse = commandEngine.submit(cmd);
userNotificationService.sendNotification(session.getUser(), dataverse.getCreateDate(), Type.CREATEDV, dataverse.getId());
editMode = null;
} catch (CommandException ex) {
JH.addMessage(FacesMessage.SEVERITY_ERROR, ex.getMessage());
return null;
}
return "/dataverse.xhtml?id=" + dataverse.getId() +"&faces-redirect=true";
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void init() {
if (dataset.getId() != null) { // view mode for a dataset
dataset = datasetService.find(dataset.getId());
editVersion = dataset.getLatestVersion();
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
ownerId = dataset.getOwner().getId();
} else if (ownerId != null) { // create mode for a new child dataset
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setVersions(new ArrayList());
editVersion.setDataset(dataset);
editVersion.setFileMetadatas(new ArrayList());
editVersion.setDatasetFieldValues(null);
editVersion.setVersionState(VersionState.DRAFT);
editVersion.setDatasetFieldValues(editVersion.initDatasetFieldValues());
editVersion.setVersionNumber(new Long(1));
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
dataset.getVersions().add(editVersion);
} else {
throw new RuntimeException("On Dataset page without id or ownerid."); // improve error handling
}
setCitationFields(dataverseService.findCitationDatasetFieldsByDataverseId(ownerId));
setOtherMetadataFields(dataverseService.findOtherMetadataDatasetFieldsByDataverseId(ownerId));
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void init() {
if (dataset.getId() != null) { // view mode for a dataset
dataset = datasetService.find(dataset.getId());
editVersion = dataset.getLatestVersion();
editVersion.setDatasetFieldValues(editVersion.initDatasetFieldValues());
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
ownerId = dataset.getOwner().getId();
} else if (ownerId != null) { // create mode for a new child dataset
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setVersions(new ArrayList());
editVersion.setDataset(dataset);
editVersion.setFileMetadatas(new ArrayList());
editVersion.setDatasetFieldValues(null);
editVersion.setVersionState(VersionState.DRAFT);
editVersion.setDatasetFieldValues(editVersion.initDatasetFieldValues());
editVersion.setVersionNumber(new Long(1));
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
dataset.getVersions().add(editVersion);
} else {
throw new RuntimeException("On Dataset page without id or ownerid."); // improve error handling
}
setCitationFields(dataverseService.findCitationDatasetFieldsByDataverseId(ownerId));
setOtherMetadataFields(dataverseService.findOtherMetadataDatasetFieldsByDataverseId(ownerId));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void writeTo(DownloadInstance di, Class<?> clazz, Type type, Annotation[] annotation, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream outstream) throws IOException, WebApplicationException {
if (di.getDownloadInfo() != null && di.getDownloadInfo().getDataFile() != null) {
DataAccessRequest daReq = new DataAccessRequest();
DataFile sf = di.getDownloadInfo().getDataFile();
DataAccessObject accessObject = DataAccess.createDataAccessObject(sf, daReq);
if (accessObject != null) {
accessObject.open();
if (di.getConversionParam() != null) {
// Image Thumbnail conversion:
if (di.getConversionParam().equals("imageThumb")) {
accessObject = ImageThumbConverter.getImageThumb(sf, (FileAccessObject)accessObject);
}
/* No other download services are supported just yet.
else if (di.getConversionParam().equals("TermsOfUse")) {
accessObject = ExportTermsOfUse.export(sf.getStudy());
} else if (di.getConversionParam().equals("package")) {
if ("WithTermsOfUse".equals(di.getConversionParamValue())) {
accessObject = PackageWithTermsOfUse.repackage(sf, (FileAccessObject)accessObject);
}
}
*/
/* No special services for "Subsettable" files just yet:
if (sf.isTabularData()) {
if (di.getConversionParam().equals("noVarHeader")) {
accessObject.setNoVarHeader(Boolean.TRUE);
accessObject.setVarHeader(null);
} else if (di.getConversionParam().equals("fileFormat")) {
if ("original".equals(di.getConversionParamValue())) {
accessObject = StoredOriginalFile.retrieve(sf, (FileAccessObject)accessObject);
} else {
// Other format conversions:
String requestedMimeType = di.getServiceFormatType(di.getConversionParam(), di.getConversionParamValue());
if (requestedMimeType == null) {
// default mime type, in case real type is unknown;
// (this shouldn't happen in real life - but just in case):
requestedMimeType = "application/octet-stream";
}
accessObject =
DataFileConverter.performFormatConversion(
sf,
(FileAccessObject)accessObject,
di.getConversionParamValue(), requestedMimeType);
}
}
}
*/
if (accessObject == null) {
throw new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE);
}
}
InputStream instream = accessObject.getInputStream();
if (instream != null) {
// headers:
String fileName = accessObject.getFileName();
String mimeType = accessObject.getMimeType();
// Provide both the "Content-disposition" and "Content-Type" headers,
// to satisfy the widest selection of browsers out there.
httpHeaders.add("Content-disposition", "attachment; filename=\"" + fileName + "\"");
httpHeaders.add("Content-Type", mimeType + "; name=\"" + fileName);
// (the httpHeaders map must be modified *before* writing any
// data in the output stream!
int bufsize;
byte [] bffr = new byte[4*8192];
// before writing out any bytes from the input stream, flush
// any extra content, such as the variable header for the
// subsettable files:
if (accessObject.getVarHeader() != null) {
outstream.write(accessObject.getVarHeader().getBytes());
}
while ((bufsize = instream.read(bffr)) != -1) {
outstream.write(bffr, 0, bufsize);
}
instream.close();
return;
}
}
}
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
#location 101
#vulnerability type RESOURCE_LEAK
|
#fixed code
@Override
public void writeTo(DownloadInstance di, Class<?> clazz, Type type, Annotation[] annotation, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream outstream) throws IOException, WebApplicationException {
if (di.getDownloadInfo() != null && di.getDownloadInfo().getDataFile() != null) {
DataAccessRequest daReq = new DataAccessRequest();
DataFile sf = di.getDownloadInfo().getDataFile();
DataAccessObject accessObject = DataAccess.createDataAccessObject(sf, daReq);
if (accessObject != null) {
accessObject.open();
if (di.getConversionParam() != null) {
// Image Thumbnail conversion:
if (di.getConversionParam().equals("imageThumb")) {
accessObject = ImageThumbConverter.getImageThumb(sf, (FileAccessObject)accessObject);
}
/* No other download services are supported just yet.
else if (di.getConversionParam().equals("TermsOfUse")) {
accessObject = ExportTermsOfUse.export(sf.getStudy());
} else if (di.getConversionParam().equals("package")) {
if ("WithTermsOfUse".equals(di.getConversionParamValue())) {
accessObject = PackageWithTermsOfUse.repackage(sf, (FileAccessObject)accessObject);
}
}
*/
if (sf.isTabularData()) {
if (di.getConversionParam().equals("noVarHeader")) {
accessObject.setNoVarHeader(Boolean.TRUE);
accessObject.setVarHeader(null);
} else if (di.getConversionParam().equals("fileFormat")) {
if ("original".equals(di.getConversionParamValue())) {
accessObject = StoredOriginalFile.retrieve(sf, (FileAccessObject)accessObject);
} /* else {
// Other format conversions:
String requestedMimeType = di.getServiceFormatType(di.getConversionParam(), di.getConversionParamValue());
if (requestedMimeType == null) {
// default mime type, in case real type is unknown;
// (this shouldn't happen in real life - but just in case):
requestedMimeType = "application/octet-stream";
}
accessObject =
DataFileConverter.performFormatConversion(
sf,
(FileAccessObject)accessObject,
di.getConversionParamValue(), requestedMimeType);
} */
}
}
if (accessObject == null) {
throw new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE);
}
}
InputStream instream = accessObject.getInputStream();
if (instream != null) {
// headers:
String fileName = accessObject.getFileName();
String mimeType = accessObject.getMimeType();
// Provide both the "Content-disposition" and "Content-Type" headers,
// to satisfy the widest selection of browsers out there.
httpHeaders.add("Content-disposition", "attachment; filename=\"" + fileName + "\"");
httpHeaders.add("Content-Type", mimeType + "; name=\"" + fileName);
// (the httpHeaders map must be modified *before* writing any
// data in the output stream!
int bufsize;
byte [] bffr = new byte[4*8192];
// before writing out any bytes from the input stream, flush
// any extra content, such as the variable header for the
// subsettable files:
if (accessObject.getVarHeader() != null) {
outstream.write(accessObject.getVarHeader().getBytes());
}
while ((bufsize = instream.read(bffr)) != -1) {
outstream.write(bffr, 0, bufsize);
}
instream.close();
return;
}
}
}
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String indexDataset(Dataset dataset) {
logger.info("indexing dataset " + dataset.getId());
Collection<SolrInputDocument> docs = new ArrayList<>();
List<String> dataversePathSegmentsAccumulator = new ArrayList<>();
// List<String> dataverseSegments = null;
// try {
// dataverseSegments = findPathSegments(dataset.getOwner(), dataversePathSegmentsAccumulator);
// } catch (Exception ex) {
// logger.info("failed to find dataverseSegments for dataversePaths for " + SearchFields.SUBTREE + ": " + ex);
// }
// List<String> dataversePaths = getDataversePathsFromSegments(dataverseSegments);
SolrInputDocument solrInputDocument = new SolrInputDocument();
solrInputDocument.addField(SearchFields.ID, "dataset_" + dataset.getId());
solrInputDocument.addField(SearchFields.ENTITY_ID, dataset.getId());
solrInputDocument.addField(SearchFields.TYPE, "datasets");
/**
* @todo: should we assign a dataset title to name like this?
*/
if (dataset.getLatestVersion() != null) {
if (dataset.getLatestVersion().getMetadata() != null) {
if (dataset.getLatestVersion().getMetadata().getAuthorsStr() != null) {
if (!dataset.getLatestVersion().getMetadata().getAuthorsStr().isEmpty()) {
solrInputDocument.addField(SearchFields.AUTHOR_STRING, dataset.getLatestVersion().getMetadata().getAuthorsStr());
} else {
logger.info("author string was empty");
}
} else {
logger.info("dataset.getLatestVersion().getMetadata().getAuthorsStr() was null");
}
if (dataset.getLatestVersion().getMetadata().getTitle() != null) {
if (!dataset.getLatestVersion().getMetadata().getTitle().isEmpty()) {
solrInputDocument.addField(SearchFields.TITLE, dataset.getLatestVersion().getMetadata().getTitle());
}
else {
logger.info("title was empty");
}
}
if (dataset.getLatestVersion().getMetadata().getProductionDate() != null) {
/**
* @todo: clean this up, DRY
*/
SimpleDateFormat inputDateyyyy = new SimpleDateFormat("yyyy", Locale.ENGLISH);
try {
Date citationDate = inputDateyyyy.parse(dataset.getLatestVersion().getMetadata().getProductionDate());
solrInputDocument.addField(SearchFields.CITATION_DATE, citationDate);
SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
String citationYear = yearOnly.format(citationDate);
solrInputDocument.addField(SearchFields.CITATION_YEAR, Integer.parseInt(citationYear));
} catch (Exception ex) {
logger.info("Can't convert " + dataset.getLatestVersion().getMetadata().getProductionDate() + " to a YYYY date from dataset " + dataset.getId());
}
SimpleDateFormat inputDateyyyyMMdd = new SimpleDateFormat("yyyy-MM-dd", Locale.ENGLISH);
try {
Date citationDate = inputDateyyyyMMdd.parse(dataset.getLatestVersion().getMetadata().getProductionDate());
solrInputDocument.addField(SearchFields.CITATION_DATE, citationDate);
SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
String citationYear = yearOnly.format(citationDate);
solrInputDocument.addField(SearchFields.CITATION_YEAR, Integer.parseInt(citationYear));
} catch (Exception ex) {
logger.info("Can't convert " + dataset.getLatestVersion().getMetadata().getProductionDate() + " to a YYYY-MM-DD date from dataset " + dataset.getId());
}
}
else {
logger.info("dataset.getLatestVersion().getMetadata().getTitle() was null");
}
} else {
logger.info("dataset.getLatestVersion().getMetadata() was null");
}
} else {
logger.info("dataset.getLatestVersion() was null");
}
/**
* @todo: don't use distributor for category. testing facets
*/
// solrInputDocument.addField(SearchFields.CATEGORY, dataset.getDistributor());
if (dataset.getDescription() != null && !dataset.getDescription().isEmpty()) {
solrInputDocument.addField(SearchFields.DESCRIPTION, dataset.getDescription());
}
// solrInputDocument.addField(SearchFields.SUBTREE, dataversePaths);
solrInputDocument.addField(SearchFields.ORIGINAL_DATAVERSE, dataset.getOwner().getName());
solrInputDocument.addField(SearchFields.PARENT_TYPE, "datasets");
solrInputDocument.addField(SearchFields.PARENT_ID, dataset.getOwner().getId());
solrInputDocument.addField(SearchFields.PARENT_NAME, dataset.getOwner().getName());
docs.add(solrInputDocument);
List<DataFile> files = dataset.getFiles();
for (DataFile dataFile : files) {
SolrInputDocument datafileSolrInputDocument = new SolrInputDocument();
datafileSolrInputDocument.addField(SearchFields.ID, "datafile_" + dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.ENTITY_ID, dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.TYPE, "files");
datafileSolrInputDocument.addField(SearchFields.NAME, dataFile.getName());
datafileSolrInputDocument.addField(SearchFields.FILE_TYPE, dataFile.getContentType());
datafileSolrInputDocument.addField(SearchFields.FILE_TYPE_GROUP, dataFile.getContentType().split("/")[0]);
// datafileSolrInputDocument.addField(SearchFields.SUBTREE, dataversePaths);
datafileSolrInputDocument.addField(SearchFields.ORIGINAL_DATAVERSE, dataFile.getOwner().getOwner().getName());
datafileSolrInputDocument.addField(SearchFields.PARENT_TYPE, "datasets");
// datafileSolrInputDocument.addField(SearchFields.PARENT_NAME, dataFile.getDataset().getTitle());
datafileSolrInputDocument.addField(SearchFields.PARENT_ID, dataFile.getOwner().getId());
if (!dataFile.getOwner().getLatestVersion().getMetadata().getTitle().isEmpty()) {
datafileSolrInputDocument.addField(SearchFields.PARENT_NAME, dataFile.getOwner().getLatestVersion().getMetadata().getTitle());
}
docs.add(datafileSolrInputDocument);
}
/**
* @todo allow for configuration of hostname and port
*/
SolrServer server = new HttpSolrServer("http://localhost:8983/solr/");
try {
server.add(docs);
} catch (SolrServerException | IOException ex) {
return ex.toString();
}
try {
server.commit();
} catch (SolrServerException | IOException ex) {
return ex.toString();
}
return "indexed dataset " + dataset.getId(); // + ":" + dataset.getTitle();
}
#location 81
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String indexDataset(Dataset dataset) {
logger.info("indexing dataset " + dataset.getId());
Collection<SolrInputDocument> docs = new ArrayList<>();
List<String> dataversePathSegmentsAccumulator = new ArrayList<>();
List<String> dataverseSegments = null;
try {
dataverseSegments = findPathSegments(dataset.getOwner(), dataversePathSegmentsAccumulator);
} catch (Exception ex) {
logger.info("failed to find dataverseSegments for dataversePaths for " + SearchFields.SUBTREE + ": " + ex);
}
List<String> dataversePaths = getDataversePathsFromSegments(dataverseSegments);
SolrInputDocument solrInputDocument = new SolrInputDocument();
solrInputDocument.addField(SearchFields.ID, "dataset_" + dataset.getId());
solrInputDocument.addField(SearchFields.ENTITY_ID, dataset.getId());
solrInputDocument.addField(SearchFields.TYPE, "datasets");
/**
* @todo: should we assign a dataset title to name like this?
*/
if (dataset.getLatestVersion() != null) {
if (dataset.getLatestVersion().getMetadata() != null) {
if (dataset.getLatestVersion().getMetadata().getAuthorsStr() != null) {
if (!dataset.getLatestVersion().getMetadata().getAuthorsStr().isEmpty()) {
solrInputDocument.addField(SearchFields.AUTHOR_STRING, dataset.getLatestVersion().getMetadata().getAuthorsStr());
} else {
logger.info("author string was empty");
}
} else {
logger.info("dataset.getLatestVersion().getMetadata().getAuthorsStr() was null");
}
if (dataset.getLatestVersion().getMetadata().getTitle() != null) {
if (!dataset.getLatestVersion().getMetadata().getTitle().isEmpty()) {
solrInputDocument.addField(SearchFields.TITLE, dataset.getLatestVersion().getMetadata().getTitle());
}
else {
logger.info("title was empty");
}
}
if (dataset.getLatestVersion().getMetadata().getProductionDate() != null) {
/**
* @todo: clean this up, DRY
*/
SimpleDateFormat inputDateyyyy = new SimpleDateFormat("yyyy", Locale.ENGLISH);
try {
Date citationDate = inputDateyyyy.parse(dataset.getLatestVersion().getMetadata().getProductionDate());
solrInputDocument.addField(SearchFields.CITATION_DATE, citationDate);
SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
String citationYear = yearOnly.format(citationDate);
solrInputDocument.addField(SearchFields.CITATION_YEAR, Integer.parseInt(citationYear));
} catch (Exception ex) {
logger.info("Can't convert " + dataset.getLatestVersion().getMetadata().getProductionDate() + " to a YYYY date from dataset " + dataset.getId());
}
SimpleDateFormat inputDateyyyyMMdd = new SimpleDateFormat("yyyy-MM-dd", Locale.ENGLISH);
try {
Date citationDate = inputDateyyyyMMdd.parse(dataset.getLatestVersion().getMetadata().getProductionDate());
solrInputDocument.addField(SearchFields.CITATION_DATE, citationDate);
SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
String citationYear = yearOnly.format(citationDate);
solrInputDocument.addField(SearchFields.CITATION_YEAR, Integer.parseInt(citationYear));
} catch (Exception ex) {
logger.info("Can't convert " + dataset.getLatestVersion().getMetadata().getProductionDate() + " to a YYYY-MM-DD date from dataset " + dataset.getId());
}
}
else {
logger.info("dataset.getLatestVersion().getMetadata().getTitle() was null");
}
} else {
logger.info("dataset.getLatestVersion().getMetadata() was null");
}
} else {
logger.info("dataset.getLatestVersion() was null");
}
/**
* @todo: don't use distributor for category. testing facets
*/
// solrInputDocument.addField(SearchFields.CATEGORY, dataset.getDistributor());
if (dataset.getDescription() != null && !dataset.getDescription().isEmpty()) {
solrInputDocument.addField(SearchFields.DESCRIPTION, dataset.getDescription());
}
solrInputDocument.addField(SearchFields.SUBTREE, dataversePaths);
solrInputDocument.addField(SearchFields.ORIGINAL_DATAVERSE, dataset.getOwner().getName());
solrInputDocument.addField(SearchFields.PARENT_TYPE, "datasets");
solrInputDocument.addField(SearchFields.PARENT_ID, dataset.getOwner().getId());
solrInputDocument.addField(SearchFields.PARENT_NAME, dataset.getOwner().getName());
docs.add(solrInputDocument);
List<DataFile> files = dataset.getFiles();
for (DataFile dataFile : files) {
SolrInputDocument datafileSolrInputDocument = new SolrInputDocument();
datafileSolrInputDocument.addField(SearchFields.ID, "datafile_" + dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.ENTITY_ID, dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.TYPE, "files");
datafileSolrInputDocument.addField(SearchFields.NAME, dataFile.getName());
datafileSolrInputDocument.addField(SearchFields.FILE_TYPE, dataFile.getContentType());
datafileSolrInputDocument.addField(SearchFields.FILE_TYPE_GROUP, dataFile.getContentType().split("/")[0]);
datafileSolrInputDocument.addField(SearchFields.SUBTREE, dataversePaths);
datafileSolrInputDocument.addField(SearchFields.ORIGINAL_DATAVERSE, dataFile.getOwner().getOwner().getName());
datafileSolrInputDocument.addField(SearchFields.PARENT_TYPE, "datasets");
// datafileSolrInputDocument.addField(SearchFields.PARENT_NAME, dataFile.getDataset().getTitle());
datafileSolrInputDocument.addField(SearchFields.PARENT_ID, dataFile.getOwner().getId());
if (!dataFile.getOwner().getLatestVersion().getMetadata().getTitle().isEmpty()) {
datafileSolrInputDocument.addField(SearchFields.PARENT_NAME, dataFile.getOwner().getLatestVersion().getMetadata().getTitle());
}
docs.add(datafileSolrInputDocument);
}
/**
* @todo allow for configuration of hostname and port
*/
SolrServer server = new HttpSolrServer("http://localhost:8983/solr/");
try {
server.add(docs);
} catch (SolrServerException | IOException ex) {
return ex.toString();
}
try {
server.commit();
} catch (SolrServerException | IOException ex) {
return ex.toString();
}
return "indexed dataset " + dataset.getId(); // + ":" + dataset.getTitle();
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String releaseDraft() {
if (releaseRadio == 1) {
dataset.getEditVersion().setVersionNumber(new Long(dataset.getReleasedVersion().getVersionNumber().intValue() + 1));
dataset.getEditVersion().setMinorVersionNumber(new Long(0));
} else {
dataset.getEditVersion().setVersionNumber(new Long(dataset.getReleasedVersion().getVersionNumber().intValue()));
dataset.getEditVersion().setMinorVersionNumber(new Long(dataset.getReleasedVersion().getMinorVersionNumber().intValue() + 1));
}
return releaseDataset(false);
}
#location 6
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String releaseDraft() {
if (releaseRadio == 1) {
return releaseDataset(false);
} else {
return releaseDataset(true);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void init() {
if (dataset.getId() != null) { // view mode for a dataset
dataset = datasetService.find(dataset.getId());
editVersion = dataset.getLatestVersion();
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
ownerId = dataset.getOwner().getId();
} else if (ownerId != null) { // create mode for a new child dataset
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setVersions(new ArrayList());
editVersion.setDataset(dataset);
editVersion.setFileMetadatas(new ArrayList());
editVersion.setDatasetFieldValues(null);
editVersion.setVersionState(VersionState.DRAFT);
editVersion.setDatasetFieldValues(editVersion.initDatasetFieldValues());
editVersion.setVersionNumber(new Long(1));
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
dataset.getVersions().add(editVersion);
} else {
throw new RuntimeException("On Dataset page without id or ownerid."); // improve error handling
}
setCitationFields(dataverseService.findCitationDatasetFieldsByDataverseId(ownerId));
setOtherMetadataFields(dataverseService.findOtherMetadataDatasetFieldsByDataverseId(ownerId));
}
#location 8
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void init() {
if (dataset.getId() != null) { // view mode for a dataset
dataset = datasetService.find(dataset.getId());
editVersion = dataset.getLatestVersion();
editVersion.setDatasetFieldValues(editVersion.initDatasetFieldValues());
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
ownerId = dataset.getOwner().getId();
} else if (ownerId != null) { // create mode for a new child dataset
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setVersions(new ArrayList());
editVersion.setDataset(dataset);
editVersion.setFileMetadatas(new ArrayList());
editVersion.setDatasetFieldValues(null);
editVersion.setVersionState(VersionState.DRAFT);
editVersion.setDatasetFieldValues(editVersion.initDatasetFieldValues());
editVersion.setVersionNumber(new Long(1));
editValues = editVersion.getDatasetFieldValues();
citationValues = extractValues(editValues, true);
otherMetadataValues = extractValues(editValues, false);
dataset.getVersions().add(editVersion);
} else {
throw new RuntimeException("On Dataset page without id or ownerid."); // improve error handling
}
setCitationFields(dataverseService.findCitationDatasetFieldsByDataverseId(ownerId));
setOtherMetadataFields(dataverseService.findOtherMetadataDatasetFieldsByDataverseId(ownerId));
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String releaseDraft() {
if (releaseRadio == 1) {
dataset.getEditVersion().setVersionNumber(new Long(dataset.getReleasedVersion().getVersionNumber().intValue() + 1));
dataset.getEditVersion().setMinorVersionNumber(new Long(0));
} else {
dataset.getEditVersion().setVersionNumber(new Long(dataset.getReleasedVersion().getVersionNumber().intValue()));
dataset.getEditVersion().setMinorVersionNumber(new Long(dataset.getReleasedVersion().getMinorVersionNumber().intValue() + 1));
}
return releaseDataset(false);
}
#location 3
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String releaseDraft() {
if (releaseRadio == 1) {
return releaseDataset(false);
} else {
return releaseDataset(true);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public void deleteContainer(String uri, AuthCredentials authCredentials, SwordConfiguration sc) throws SwordError, SwordServerException, SwordAuthException {
// swordConfiguration = (SwordConfigurationImpl) sc;
DataverseUser vdcUser = swordAuth.auth(authCredentials);
logger.fine("deleteContainer called with url: " + uri);
urlManager.processUrl(uri);
logger.fine("original url: " + urlManager.getOriginalUrl());
if (!"edit".equals(urlManager.getServlet())) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "edit servlet expected, not " + urlManager.getServlet());
}
String targetType = urlManager.getTargetType();
if (!targetType.isEmpty()) {
logger.fine("operating on target type: " + urlManager.getTargetType());
// StudyServiceLocal studyService;
Context ctx;
try {
ctx = new InitialContext();
// studyService = (StudyServiceLocal) ctx.lookup("java:comp/env/studyService");
} catch (NamingException ex) {
logger.info("problem looking up studyService");
throw new SwordServerException("problem looking up studyService");
}
if ("dataverse".equals(targetType)) {
/**
* @todo throw SWORD error recommending use of 4.0 "native" API
* to delete dataverses
*/
// String dvAlias = urlManager.getTargetIdentifier();
// List<VDC> userVDCs = vdcService.getUserVDCs(vdcUser.getId());
// VDC dataverseToEmpty = vdcService.findByAlias(dvAlias);
// if (dataverseToEmpty != null) {
// if ("Admin".equals(vdcUser.getNetworkRole().getName())) {
// if (swordConfiguration.allowNetworkAdminDeleteAllStudies()) {
//
// /**
// * @todo: this is the deleteContainer method...
// * should move this to some sort of "emptyContainer"
// * method
// */
// // curl --insecure -s -X DELETE https://sword:sword@localhost:8181/dvn/api/data-deposit/v1/swordv2/edit/dataverse/sword
// Collection<Study> studies = dataverseToEmpty.getOwnedStudies();
// for (Study study : studies) {
// logger.info("In dataverse " + dataverseToEmpty.getAlias() + " about to delete study id " + study.getId());
// studyService.deleteStudy(study.getId());
// }
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "DELETE on a dataverse is not supported");
// }
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Role was " + vdcUser.getNetworkRole().getName() + " but admin required.");
// }
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Couldn't find dataverse to delete from URL: " + uri);
// }
} else if ("study".equals(targetType)) {
String globalId = urlManager.getTargetIdentifier();
logger.info("globalId: " + globalId);
if (globalId != null) {
Dataset study = null;
try {
/**
* @todo don't hard code this, obviously. In DVN 3.x we
* had a method for
* studyService.getStudyByGlobalId(globalId)
*/
// study = studyService.getStudyByGlobalId(globalId);
long databaseIdForRoastingAtHomeDataset = 10;
study = datasetService.find(databaseIdForRoastingAtHomeDataset);
} catch (EJBException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on global id (" + globalId + ") in URL: " + uri);
}
if (study != null) {
Dataverse dvThatOwnsStudy = study.getOwner();
if (swordAuth.hasAccessToModifyDataverse(vdcUser, dvThatOwnsStudy)) {
DatasetVersion.VersionState studyState = study.getLatestVersion().getVersionState();
if (studyState.equals(DatasetVersion.VersionState.DRAFT)) {
/**
* @todo use getGlobalId when it's available
*/
logger.info("destroying working copy version of study " + study.getIdentifier());
/**
* @todo in DVN 3.x we had a convenient
* destroyWorkingCopyVersion method but the
* DeleteDatasetCommand is pretty scary... what
* if a released study has a new draft version?
* What we need is a
* DeleteDatasetVersionCommand, I suppose...
*/
// studyService.destroyWorkingCopyVersion(study.getLatestVersion().getId());
try {
engineSvc.submit(new DeleteDatasetCommand(study, vdcUser));
/**
* @todo re-index after deletion
* https://redmine.hmdc.harvard.edu/issues/3544#note-21
*/
logger.info("dataset deleted");
} catch (CommandExecutionException ex) {
// internal error
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset: " + ex.getMessage());
} catch (CommandException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset: " + ex.getMessage());
}
/**
* @todo think about how to handle non-drafts
*/
} else if (studyState.equals(DatasetVersion.VersionState.RELEASED)) {
// logger.fine("deaccessioning latest version of study " + study.getGlobalId());
// studyService.deaccessionStudy(study.getLatestVersion());
} else if (studyState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of study " + study.getGlobalId() + " has already been deaccessioned.");
} else if (studyState.equals(DatasetVersion.VersionState.ARCHIVED)) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of study " + study.getGlobalId() + " has been archived and can not be deleted or deaccessioned.");
} else if (studyState.equals(DatasetVersion.VersionState.IN_REVIEW)) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of study " + study.getGlobalId() + " is in review and can not be deleted or deaccessioned.");
} else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Operation not valid for study " + study.getGlobalId() + " in state " + studyState);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "User " + vdcUser.getUserName() + " is not authorized to modify " + dvThatOwnsStudy.getAlias());
}
} else {
throw new SwordError(404);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study to delete from URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unsupported delete target in URL:" + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "No target for deletion specified");
}
}
#location 78
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public void deleteContainer(String uri, AuthCredentials authCredentials, SwordConfiguration sc) throws SwordError, SwordServerException, SwordAuthException {
// swordConfiguration = (SwordConfigurationImpl) sc;
DataverseUser vdcUser = swordAuth.auth(authCredentials);
logger.fine("deleteContainer called with url: " + uri);
urlManager.processUrl(uri);
logger.fine("original url: " + urlManager.getOriginalUrl());
if (!"edit".equals(urlManager.getServlet())) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "edit servlet expected, not " + urlManager.getServlet());
}
String targetType = urlManager.getTargetType();
if (!targetType.isEmpty()) {
logger.fine("operating on target type: " + urlManager.getTargetType());
// StudyServiceLocal studyService;
Context ctx;
try {
ctx = new InitialContext();
// studyService = (StudyServiceLocal) ctx.lookup("java:comp/env/studyService");
} catch (NamingException ex) {
logger.info("problem looking up studyService");
throw new SwordServerException("problem looking up studyService");
}
if ("dataverse".equals(targetType)) {
/**
* @todo throw SWORD error recommending use of 4.0 "native" API
* to delete dataverses
*/
// String dvAlias = urlManager.getTargetIdentifier();
// List<VDC> userVDCs = vdcService.getUserVDCs(vdcUser.getId());
// VDC dataverseToEmpty = vdcService.findByAlias(dvAlias);
// if (dataverseToEmpty != null) {
// if ("Admin".equals(vdcUser.getNetworkRole().getName())) {
// if (swordConfiguration.allowNetworkAdminDeleteAllStudies()) {
//
// /**
// * @todo: this is the deleteContainer method...
// * should move this to some sort of "emptyContainer"
// * method
// */
// // curl --insecure -s -X DELETE https://sword:sword@localhost:8181/dvn/api/data-deposit/v1/swordv2/edit/dataverse/sword
// Collection<Study> studies = dataverseToEmpty.getOwnedStudies();
// for (Study study : studies) {
// logger.info("In dataverse " + dataverseToEmpty.getAlias() + " about to delete study id " + study.getId());
// studyService.deleteStudy(study.getId());
// }
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "DELETE on a dataverse is not supported");
// }
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Role was " + vdcUser.getNetworkRole().getName() + " but admin required.");
// }
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Couldn't find dataverse to delete from URL: " + uri);
// }
} else if ("study".equals(targetType)) {
String globalId = urlManager.getTargetIdentifier();
logger.info("globalId: " + globalId);
if (globalId != null) {
Dataset study = null;
try {
study = datasetService.findByGlobalId(globalId);
} catch (EJBException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on global id (" + globalId + ") in URL: " + uri);
}
if (study != null) {
Dataverse dvThatOwnsStudy = study.getOwner();
if (swordAuth.hasAccessToModifyDataverse(vdcUser, dvThatOwnsStudy)) {
DatasetVersion.VersionState studyState = study.getLatestVersion().getVersionState();
if (studyState.equals(DatasetVersion.VersionState.DRAFT)) {
logger.info("destroying working copy version of study " + study.getGlobalId());
/**
* @todo in DVN 3.x we had a convenient
* destroyWorkingCopyVersion method but the
* DeleteDatasetCommand is pretty scary... what
* if a released study has a new draft version?
* What we need is a
* DeleteDatasetVersionCommand, I suppose...
*/
// studyService.destroyWorkingCopyVersion(study.getLatestVersion().getId());
try {
engineSvc.submit(new DeleteDatasetCommand(study, vdcUser));
/**
* @todo re-index after deletion
* https://redmine.hmdc.harvard.edu/issues/3544#note-21
*/
logger.info("dataset deleted");
} catch (CommandExecutionException ex) {
// internal error
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset: " + ex.getMessage());
} catch (CommandException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Can't delete dataset: " + ex.getMessage());
}
/**
* @todo think about how to handle non-drafts
*/
} else if (studyState.equals(DatasetVersion.VersionState.RELEASED)) {
// logger.fine("deaccessioning latest version of study " + study.getGlobalId());
// studyService.deaccessionStudy(study.getLatestVersion());
} else if (studyState.equals(DatasetVersion.VersionState.DEACCESSIONED)) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of study " + study.getGlobalId() + " has already been deaccessioned.");
} else if (studyState.equals(DatasetVersion.VersionState.ARCHIVED)) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of study " + study.getGlobalId() + " has been archived and can not be deleted or deaccessioned.");
} else if (studyState.equals(DatasetVersion.VersionState.IN_REVIEW)) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Lastest version of study " + study.getGlobalId() + " is in review and can not be deleted or deaccessioned.");
} else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Operation not valid for study " + study.getGlobalId() + " in state " + studyState);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "User " + vdcUser.getUserName() + " is not authorized to modify " + dvThatOwnsStudy.getAlias());
}
} else {
throw new SwordError(404);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study to delete from URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unsupported delete target in URL:" + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "No target for deletion specified");
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String indexDataset(Dataset dataset) {
logger.info("indexing dataset " + dataset.getId());
Collection<SolrInputDocument> docs = new ArrayList<>();
List<String> dataversePathSegmentsAccumulator = new ArrayList<>();
List<String> dataverseSegments = new ArrayList<>();
try {
dataverseSegments = findPathSegments(dataset.getOwner(), dataversePathSegmentsAccumulator);
} catch (Exception ex) {
logger.info("failed to find dataverseSegments for dataversePaths for " + SearchFields.SUBTREE + ": " + ex);
}
List<String> dataversePaths = getDataversePathsFromSegments(dataverseSegments);
SolrInputDocument solrInputDocument = new SolrInputDocument();
solrInputDocument.addField(SearchFields.ID, "dataset_" + dataset.getId());
solrInputDocument.addField(SearchFields.ENTITY_ID, dataset.getId());
solrInputDocument.addField(SearchFields.TYPE, "datasets");
if (dataset.isReleased()) {
solrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataset.getPublicationDate());
solrInputDocument.addField(SearchFields.PERMS, publicGroupString);
} else if (dataset.getOwner().getCreator() != null) {
/**
* todo why is dataset.getCreateDate() null? For now I guess we'll
* use the createDate of it's parent dataverse?! https://redmine.hmdc.harvard.edu/issues/3806
*/
// solrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataset.getCreateDate());
solrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataset.getOwner().getCreateDate());
solrInputDocument.addField(SearchFields.PERMS, groupPerUserPrefix + dataset.getOwner().getCreator().getId());
/**
* @todo: replace this fake version of granting users access to
* dataverses with the real thing, when it's available in the app
*/
if (dataset.getOwner().getCreator().getUserName().equals("pete")) {
// figure out if cathy is around
DataverseUser cathy = dataverseUserServiceBean.findByUserName("cathy");
if (cathy != null) {
// let cathy see all of pete's dataverses
solrInputDocument.addField(SearchFields.PERMS, groupPerUserPrefix + cathy.getId());
}
}
} else {
/**
* todo why is dataset.getCreateDate() null? For now I guess we'll
* use the createDate of it's parent dataverse?! https://redmine.hmdc.harvard.edu/issues/3806
*/
// solrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataset.getCreateDate());
solrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataset.getOwner().getCreateDate());
/**
* @todo: remove this once everyone has dropped their database and
* won't get NPE's from dataverse.getCreator
*/
solrInputDocument.addField(SearchFields.PERMS, npeGetCreator);
}
/**
* @todo: remove this fake "has access to all data" group
*/
solrInputDocument.addField(SearchFields.PERMS, groupPrefix + tmpNsaGroupId);
addDatasetReleaseDateToSolrDoc(solrInputDocument, dataset);
if (dataset.getLatestVersion() != null) {
DatasetVersionUI datasetVersionUI = null;
try {
datasetVersionUI = new DatasetVersionUI(dataset.getLatestVersion());
} catch (NullPointerException ex) {
logger.info("Caught exception trying to instantiate DatasetVersionUI for dataset " + dataset.getId() + ". : " + ex);
}
if (datasetVersionUI != null) {
String citation = null;
try {
citation = datasetVersionUI.getCitation();
if (citation != null) {
solrInputDocument.addField(SearchFields.CITATION, citation);
}
} catch (NullPointerException ex) {
logger.info("Caught exception trying to get citation for dataset " + dataset.getId() + ". : " + ex);
}
}
for (DatasetField dsf : dataset.getLatestVersion().getFlatDatasetFields()) {
DatasetFieldType dsfType = dsf.getDatasetFieldType();
String solrFieldSearchable = dsfType.getSolrField().getNameSearchable();
String solrFieldFacetable = dsfType.getSolrField().getNameFacetable();
if (dsf.getValues() != null && !dsf.getValues().isEmpty() && dsf.getValues().get(0) != null && solrFieldSearchable != null) {
logger.info("indexing " + dsf.getDatasetFieldType().getName() + ":" + dsf.getValues() + " into " + solrFieldSearchable + " and maybe " + solrFieldFacetable);
if (dsfType.getSolrField().getSolrType().equals(SolrField.SolrType.INTEGER)) {
String dateAsString = dsf.getValues().get(0);
logger.info("date as string: " + dateAsString);
if (dateAsString != null && !dateAsString.isEmpty()) {
SimpleDateFormat inputDateyyyy = new SimpleDateFormat("yyyy", Locale.ENGLISH);
try {
/**
* @todo when bean validation is working we
* won't have to convert strings into dates
*/
logger.info("Trying to convert " + dateAsString + " to a YYYY date from dataset " + dataset.getId());
Date dateAsDate = inputDateyyyy.parse(dateAsString);
SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
String datasetFieldFlaggedAsDate = yearOnly.format(dateAsDate);
logger.info("YYYY only: " + datasetFieldFlaggedAsDate);
solrInputDocument.addField(solrFieldSearchable, Integer.parseInt(datasetFieldFlaggedAsDate));
if (dsfType.getSolrField().isFacetable()) {
solrInputDocument.addField(solrFieldFacetable, Integer.parseInt(datasetFieldFlaggedAsDate));
}
} catch (Exception ex) {
logger.info("unable to convert " + dateAsString + " into YYYY format and couldn't index it (" + dsfType.getName() + ")");
}
}
} else {
// _s (dynamic string) and all other Solr fields
if (dsf.getDatasetFieldType().getName().equals("authorAffiliation")) {
/**
* @todo think about how to tie the fact that this
* needs to be multivalued (_ss) because a
* multivalued facet (authorAffilition_ss) is being
* collapsed into here at index time. The business
* logic to determine if a data-driven metadata
* field should be indexed into Solr as a single or
* multiple value lives in the getSolrField() method
* of DatasetField.java
*/
solrInputDocument.addField(SearchFields.AFFILIATION, dsf.getValues());
} else if (dsf.getDatasetFieldType().getName().equals("title")) {
// datasets have titles not names but index title under name as well so we can sort datasets by name along dataverses and files
solrInputDocument.addField(SearchFields.NAME_SORT, dsf.getValues());
}
if (dsfType.isControlledVocabulary()) {
for (ControlledVocabularyValue controlledVocabularyValue : dsf.getControlledVocabularyValues()) {
solrInputDocument.addField(solrFieldSearchable, controlledVocabularyValue.getStrValue());
if (dsfType.getSolrField().isFacetable()) {
solrInputDocument.addField(solrFieldFacetable, controlledVocabularyValue.getStrValue());
}
}
} else {
solrInputDocument.addField(solrFieldSearchable, dsf.getValues());
if (dsfType.getSolrField().isFacetable()) {
solrInputDocument.addField(solrFieldFacetable, dsf.getValues());
}
}
}
}
/**
* @todo: review all code below... commented out old indexing of
* hard coded fields. Also, should we respect the
* isAdvancedSearchField boolean?
*/
// if (datasetField.isAdvancedSearchField()) {
// advancedSearchFields.add(idDashName);
// logger.info(idDashName + " is an advanced search field (" + title + ")");
// if (name.equals(DatasetFieldConstant.title)) {
// String toIndexTitle = datasetFieldValue.getStrValue();
// if (toIndexTitle != null && !toIndexTitle.isEmpty()) {
// solrInputDocument.addField(SearchFields.TITLE, toIndexTitle);
// }
// } else if (name.equals(DatasetFieldConstant.authorName)) {
// String toIndexAuthor = datasetFieldValue.getStrValue();
// if (toIndexAuthor != null && !toIndexAuthor.isEmpty()) {
// logger.info("index this author: " + toIndexAuthor);
// solrInputDocument.addField(SearchFields.AUTHOR_STRING, toIndexAuthor);
// }
// } else if (name.equals(DatasetFieldConstant.productionDate)) {
// String toIndexProductionDateString = datasetFieldValue.getStrValue();
// logger.info("production date: " + toIndexProductionDateString);
// if (toIndexProductionDateString != null && !toIndexProductionDateString.isEmpty()) {
// SimpleDateFormat inputDateyyyy = new SimpleDateFormat("yyyy", Locale.ENGLISH);
// try {
// logger.info("Trying to convert " + toIndexProductionDateString + " to a YYYY date from dataset " + dataset.getId());
// Date productionDate = inputDateyyyy.parse(toIndexProductionDateString);
// SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
// String productionYear = yearOnly.format(productionDate);
// logger.info("YYYY only: " + productionYear);
// solrInputDocument.addField(SearchFields.PRODUCTION_DATE_YEAR_ONLY, Integer.parseInt(productionYear));
// solrInputDocument.addField(SearchFields.PRODUCTION_DATE_ORIGINAL, productionDate);
// } catch (Exception ex) {
// logger.info("unable to convert " + toIndexProductionDateString + " into YYYY format");
// }
// }
// /**
// * @todo: DRY! this is the same as above!
// */
// } else if (name.equals(DatasetFieldConstant.distributionDate)) {
// String toIndexdistributionDateString = datasetFieldValue.getStrValue();
// logger.info("distribution date: " + toIndexdistributionDateString);
// if (toIndexdistributionDateString != null && !toIndexdistributionDateString.isEmpty()) {
// SimpleDateFormat inputDateyyyy = new SimpleDateFormat("yyyy", Locale.ENGLISH);
// try {
// logger.info("Trying to convert " + toIndexdistributionDateString + " to a YYYY date from dataset " + dataset.getId());
// Date distributionDate = inputDateyyyy.parse(toIndexdistributionDateString);
// SimpleDateFormat yearOnly = new SimpleDateFormat("yyyy");
// String distributionYear = yearOnly.format(distributionDate);
// logger.info("YYYY only: " + distributionYear);
// solrInputDocument.addField(SearchFields.DISTRIBUTION_DATE_YEAR_ONLY, Integer.parseInt(distributionYear));
// solrInputDocument.addField(SearchFields.DISTRIBUTION_DATE_ORIGINAL, distributionDate);
// } catch (Exception ex) {
// logger.info("unable to convert " + toIndexdistributionDateString + " into YYYY format");
// }
// }
// } else if (name.equals(DatasetFieldConstant.keywordValue)) {
// String toIndexKeyword = datasetFieldValue.getStrValue();
// if (toIndexKeyword != null && !toIndexKeyword.isEmpty()) {
// solrInputDocument.addField(SearchFields.KEYWORD, toIndexKeyword);
// }
// } else if (name.equals(DatasetFieldConstant.distributorName)) {
// String toIndexDistributor = datasetFieldValue.getStrValue();
// if (toIndexDistributor != null && !toIndexDistributor.isEmpty()) {
// solrInputDocument.addField(SearchFields.DISTRIBUTOR, toIndexDistributor);
// }
// } else if (name.equals(DatasetFieldConstant.description)) {
// String toIndexDescription = datasetFieldValue.getStrValue();
// if (toIndexDescription != null && !toIndexDescription.isEmpty()) {
// solrInputDocument.addField(SearchFields.DESCRIPTION, toIndexDescription);
// }
// }
// } else {
// notAdvancedSearchFields.add(idDashName);
// logger.info(idDashName + " is not an advanced search field (" + title + ")");
// }
}
}
solrInputDocument.addField(SearchFields.SUBTREE, dataversePaths);
// solrInputDocument.addField(SearchFields.HOST_DATAVERSE, dataset.getOwner().getName());
solrInputDocument.addField(SearchFields.PARENT_ID, dataset.getOwner().getId());
solrInputDocument.addField(SearchFields.PARENT_NAME, dataset.getOwner().getName());
docs.add(solrInputDocument);
List<DataFile> files = dataset.getFiles();
for (DataFile dataFile : files) {
SolrInputDocument datafileSolrInputDocument = new SolrInputDocument();
datafileSolrInputDocument.addField(SearchFields.ID, "datafile_" + dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.ENTITY_ID, dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.TYPE, "files");
datafileSolrInputDocument.addField(SearchFields.NAME, dataFile.getName());
datafileSolrInputDocument.addField(SearchFields.NAME_SORT, dataFile.getName());
if (dataset.isReleased()) {
/**
* @todo: are datafiles supposed to have release dates? It's
* null. For now just set something: https://redmine.hmdc.harvard.edu/issues/3806
*/
// datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataFile.getReleaseDate());
datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataFile.getOwner().getOwner().getCreateDate());
datafileSolrInputDocument.addField(SearchFields.PERMS, publicGroupString);
} else if (dataset.getOwner().getCreator() != null) {
/**
* todo why is dataFile.getCreateDate() null? For now I guess
* we'll use the createDate of its parent datase's dataverset?! https://redmine.hmdc.harvard.edu/issues/3806
*/
// datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataFile.getCreateDate());
datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataFile.getOwner().getOwner().getCreateDate());
datafileSolrInputDocument.addField(SearchFields.PERMS, groupPerUserPrefix + dataset.getOwner().getCreator().getId());
/**
* @todo: replace this fake version of granting users access to
* dataverses with the real thing, when it's available in the
* app
*/
if (dataset.getOwner().getCreator().getUserName().equals("pete")) {
// figure out if cathy is around
DataverseUser cathy = dataverseUserServiceBean.findByUserName("cathy");
if (cathy != null) {
// let cathy see all of pete's dataverses
datafileSolrInputDocument.addField(SearchFields.PERMS, groupPerUserPrefix + cathy.getId());
}
}
} else {
/**
* @todo: remove this once everyone has dropped their database
* and won't get NPE's from dataverse.getCreator
*/
/**
* todo why is dataFile.getCreateDate() null? For now I guess
* we'll use the createDate of its parent dataset's dataverse?! https://redmine.hmdc.harvard.edu/issues/3806
*/
// datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataFile.getCreateDate());
datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataFile.getOwner().getOwner().getCreateDate());
datafileSolrInputDocument.addField(SearchFields.PERMS, npeGetCreator);
}
/**
* @todo: remove this fake "has access to all data" group
*/
datafileSolrInputDocument.addField(SearchFields.PERMS, groupPrefix + tmpNsaGroupId);
// For the mime type, we are going to index the "friendly" version, e.g.,
// "PDF File" instead of "application/pdf", "MS Excel" instead of
// "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" (!), etc.,
// if available:
datafileSolrInputDocument.addField(SearchFields.FILE_TYPE_MIME, dataFile.getFriendlyType());
// For the file type facets, we have a property file that maps mime types
// to facet-friendly names; "application/fits" should become "FITS", etc.:
datafileSolrInputDocument.addField(SearchFields.FILE_TYPE, FileUtil.getFacetFileType(dataFile));
datafileSolrInputDocument.addField(SearchFields.DESCRIPTION, dataFile.getDescription());
datafileSolrInputDocument.addField(SearchFields.SUBTREE, dataversePaths);
// datafileSolrInputDocument.addField(SearchFields.HOST_DATAVERSE, dataFile.getOwner().getOwner().getName());
// datafileSolrInputDocument.addField(SearchFields.PARENT_NAME, dataFile.getDataset().getTitle());
datafileSolrInputDocument.addField(SearchFields.PARENT_ID, dataFile.getOwner().getId());
if (!dataFile.getOwner().getLatestVersion().getTitle().isEmpty()) {
datafileSolrInputDocument.addField(SearchFields.PARENT_NAME, dataFile.getOwner().getLatestVersion().getTitle());
}
// If this is a tabular data file -- i.e., if there are data
// variables associated with this file, we index the variable
// names and labels:
if (dataFile.isTabularData()) {
List<DataVariable> variables = dataFile.getDataTable().getDataVariables();
String variableNamesToIndex = null;
String variableLabelsToIndex = null;
for (DataVariable var : variables) {
// Hard-coded search fields, for now:
// TODO: immediately: define these as constants in SearchFields;
// TODO: eventually: review, decide how datavariables should
// be handled for indexing purposes. (should it be a fixed
// setup, defined in the code? should it be flexible? unlikely
// that this needs to be domain-specific... since these data
// variables are quite specific to tabular data, which in turn
// is something social science-specific...
// anyway -- needs to be reviewed. -- L.A. 4.0alpha1
if (var.getName() != null && !var.getName().equals("")) {
if (variableNamesToIndex == null) {
variableNamesToIndex = var.getName();
} else {
variableNamesToIndex = variableNamesToIndex + " " + var.getName();
}
}
if (var.getLabel() != null && !var.getLabel().equals("")) {
if (variableLabelsToIndex == null) {
variableLabelsToIndex = var.getLabel();
} else {
variableLabelsToIndex = variableLabelsToIndex + " " + var.getLabel();
}
}
}
if (variableNamesToIndex != null) {
logger.info("indexing " + variableNamesToIndex.length() + " bytes");
datafileSolrInputDocument.addField("varname_s", variableNamesToIndex);
}
if (variableLabelsToIndex != null) {
logger.info("indexing " + variableLabelsToIndex.length() + " bytes");
datafileSolrInputDocument.addField("varlabel_s", variableLabelsToIndex);
}
}
// And if the file has indexable file-level metadata associated
// with it, we'll index that too:
List<FileMetadataFieldValue> fileMetadataFieldValues = dataFile.getFileMetadataFieldValues();
if (fileMetadataFieldValues != null && fileMetadataFieldValues.size() > 0) {
for (int j = 0; j < fileMetadataFieldValues.size(); j++) {
String fieldValue = fileMetadataFieldValues.get(j).getStrValue();
FileMetadataField fmf = fileMetadataFieldValues.get(j).getFileMetadataField();
String fileMetadataFieldName = fmf.getName();
String fileMetadataFieldFormatName = fmf.getFileFormatName();
String fieldName = fileMetadataFieldFormatName + "-" + fileMetadataFieldName + "_s";
datafileSolrInputDocument.addField(fieldName, fieldValue);
}
}
docs.add(datafileSolrInputDocument);
}
/**
* @todo allow for configuration of hostname and port
*/
SolrServer server = new HttpSolrServer("http://localhost:8983/solr/");
try {
server.add(docs);
} catch (SolrServerException | IOException ex) {
return ex.toString();
}
try {
server.commit();
} catch (SolrServerException | IOException ex) {
return ex.toString();
}
return "indexed dataset " + dataset.getId(); // + ":" + dataset.getTitle();
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String indexDataset(Dataset dataset) {
logger.info("indexing dataset " + dataset.getId());
String solrIdDraftStudy = "dataset_" + dataset.getId() + "_draft";
String solrIdPublishedStudy = "dataset_" + dataset.getId();
StringBuilder sb = new StringBuilder();
sb.append("rationale:\n");
List<DatasetVersion> versions = dataset.getVersions();
for (DatasetVersion datasetVersion : versions) {
Long versionDatabaseId = datasetVersion.getId();
String versionTitle = datasetVersion.getTitle();
String semanticVersion = datasetVersion.getSemanticVersion();
String versionState = datasetVersion.getVersionState().name();
boolean versionIsReleased = datasetVersion.isReleased();
boolean versionIsWorkingCopy = datasetVersion.isWorkingCopy();
sb.append("version found with database id " + versionDatabaseId + "\n");
sb.append("- title: " + versionTitle + "\n");
sb.append("- semanticVersion-STATE: " + semanticVersion + "-" + versionState + "\n");
sb.append("- isWorkingCopy: " + versionIsWorkingCopy + "\n");
sb.append("- isReleased: " + versionIsReleased + "\n");
}
DatasetVersion latestVersion = dataset.getLatestVersion();
String latestVersionState = latestVersion.getVersionState().name();
DatasetVersion releasedVersion = dataset.getReleasedVersion();
if (latestVersion.isWorkingCopy()) {
sb.append("The latest version is a working copy (latestVersionState: " + latestVersionState + ") and will be indexed as " + solrIdDraftStudy + " (only visible by creator)\n");
if (releasedVersion != null) {
String releasedVersionState = releasedVersion.getVersionState().name();
String semanticVersion = releasedVersion.getSemanticVersion();
sb.append("The released version is " + semanticVersion + " (releasedVersionState: " + releasedVersionState + ") and will be indexed as " + solrIdPublishedStudy + " (visible by anonymous)");
/**
* The latest version is a working copy (latestVersionState:
* DRAFT) and will be indexed as dataset_17_draft (only visible
* by creator)
*
* The released version is 1.0 (releasedVersionState: RELEASED)
* and will be indexed as dataset_17 (visible by anonymous)
*/
logger.info(sb.toString());
String indexDraftResult = indexDatasetAddOrUpdate(dataset);
String indexReleasedVersionResult = indexDatasetAddOrUpdate(dataset);
return "indexDraftResult:" + indexDraftResult + ", indexReleasedVersionResult:" + indexReleasedVersionResult + ", " + sb.toString();
} else {
sb.append("There is no released version yet so nothing will be indexed as " + solrIdPublishedStudy);
/**
* The latest version is a working copy (latestVersionState:
* DRAFT) and will be indexed as dataset_33_draft (only visible
* by creator)
*
* There is no released version yet so nothing will be indexed
* as dataset_33
*/
logger.info(sb.toString());
String indexDraftResult = indexDatasetAddOrUpdate(dataset);
return "indexDraftResult:" + indexDraftResult + ", " + sb.toString();
}
} else {
sb.append("The latest version is not a working copy (latestVersionState: " + latestVersionState + ") and will be indexed as " + solrIdPublishedStudy + " (visible by anonymous) and we will be deleting " + solrIdDraftStudy + "\n");
if (releasedVersion != null) {
String releasedVersionState = releasedVersion.getVersionState().name();
String semanticVersion = releasedVersion.getSemanticVersion();
sb.append("The released version is " + semanticVersion + " (releasedVersionState: " + releasedVersionState + ") and will be (again) indexed as " + solrIdPublishedStudy + " (visible by anonymous)");
/**
* The latest version is not a working copy (latestVersionState:
* RELEASED) and will be indexed as dataset_34 (visible by
* anonymous) and we will be deleting dataset_34_draft
*
* The released version is 1.0 (releasedVersionState: RELEASED)
* and will be (again) indexed as dataset_34 (visible by anonymous)
*/
logger.info(sb.toString());
String deleteDraftVersionResult = removeDatasetDraftFromIndex(solrIdDraftStudy);
String indexReleasedVersionResult = indexDatasetAddOrUpdate(dataset);
return "deleteDraftVersionResult: " + deleteDraftVersionResult + ", indexReleasedVersionResult:" + indexReleasedVersionResult + ", " + sb.toString();
} else {
sb.append("We don't ever expect to ever get here. Why is there no released version if the latest version is not a working copy? The latestVersionState is " + latestVersionState + " and we don't know what to do with it. Nothing will be added or deleted from the index.");
logger.info(sb.toString());
return sb.toString();
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public synchronized String CalculateMD5 (String datafile) {
MessageDigest md = null;
try {
md = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
FileInputStream fis = null;
try {
fis = new FileInputStream(datafile);
} catch (FileNotFoundException ex) {
throw new RuntimeException(ex);
}
byte[] dataBytes = new byte[1024];
int nread;
try {
while ((nread = fis.read(dataBytes)) != -1) {
md.update(dataBytes, 0, nread);
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
byte[] mdbytes = md.digest();
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < mdbytes.length; i++) {
sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
}
#location 22
#vulnerability type RESOURCE_LEAK
|
#fixed code
public synchronized String CalculateMD5 (String datafile) {
FileInputStream fis = null;
try {
fis = new FileInputStream(datafile);
} catch (FileNotFoundException ex) {
throw new RuntimeException(ex);
}
return CalculateMD5(fis);
/*
byte[] dataBytes = new byte[1024];
int nread;
try {
while ((nread = fis.read(dataBytes)) != -1) {
md.update(dataBytes, 0, nread);
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
byte[] mdbytes = md.digest();
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < mdbytes.length; i++) {
sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
*/
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String save() {
dataset.setOwner(dataverseService.find(ownerId));
//TODO get real application-wide protocol/authority
dataset.setProtocol("doi");
dataset.setAuthority("10.5072/FK2");
dataset.setIdentifier("5555");
/*
* Save and/or ingest files, if there are any:
*/
if (newFiles != null && newFiles.size() > 0) {
try {
if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
Files.createDirectories(dataset.getFileSystemDirectory());
}
} catch (IOException dirEx) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Failed to create study directory " + dataset.getFileSystemDirectory().toString());
}
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (DataFile dFile : newFiles) {
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
// These are all brand new files, so they should all have
// one filemetadata total. You do NOT want to use
// getLatestFilemetadata() here - because it relies on
// comparing the object IDs of the corresponding datasetversions...
// Which may not have been persisted yet.
// -- L.A. 4.0 beta.
FileMetadata fileMetadata = dFile.getFileMetadatas().get(0);
String fileName = fileMetadata.getLabel();
//boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
datasetService.generateFileSystemName(dFile);
if (ingestService.ingestableAsTabular(dFile)) {
/*
* Note that we don't try to ingest the file right away -
* instead we mark it as "scheduled for ingest", then at
* the end of the save process it will be queued for async.
* ingest in the background. In the meantime, the file
* will be ingested as a regular, non-tabular file, and
* appear as such to the user, until the ingest job is
* finished with the Ingest Service.
*/
dFile.SetIngestScheduled();
} else if (ingestService.fileMetadataExtractable(dFile)) {
try {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + fileName, mex);
}
if (metadataExtracted) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + fileName);
} else {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + fileName);
}
}
// Try to save the file in its permanent location:
//if (!ingestedAsTabular) {
try {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the file as: " + dFile.getFileSystemLocation().toString());
Files.copy(new FileInputStream(new File(tempFileLocation)), dFile.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
MD5Checksum md5Checksum = new MD5Checksum();
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + fileName);
}
} catch (IOException ioex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to save the file " + dFile.getFileSystemLocation());
}
//}
// Any necessary post-processing:
ingestService.performPostProcessingTasks(dFile);
}
}
}
Command<Dataset> cmd;
try {
if (editMode == EditMode.CREATE) {
cmd = new CreateDatasetCommand(dataset, session.getUser());
} else {
cmd = new UpdateDatasetCommand(dataset, session.getUser());
}
dataset = commandEngine.submit(cmd);
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex + " ");
error.append(ex.getMessage() + " ");
Throwable cause = ex;
while (cause.getCause() != null) {
cause = cause.getCause();
error.append(cause + " ");
error.append(cause.getMessage() + " ");
}
logger.info("Couldn't save dataset: " + error.toString());
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + error.toString()));
return null;
} catch (CommandException ex) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + ex.toString()));
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, ex);
}
newFiles.clear();
editMode = null;
// Queue the ingest jobs for asynchronous execution:
for (DataFile dataFile : dataset.getFiles()) {
if (dataFile.isIngestScheduled()) {
dataFile.SetIngestInProgress();
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getFileMetadata().getLabel() + " for ingest.");
ingestService.asyncIngestAsTabular(dataFile);
}
}
return "/dataset.xhtml?id=" + dataset.getId() + "&faces-redirect=true";
}
#location 126
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String save() {
// Validate
boolean dontSave = false;
ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
Validator validator = factory.getValidator();
for (DatasetField dsf : editVersion.getFlatDatasetFields()) {
for (DatasetFieldValue dsfv : dsf.getDatasetFieldValues()) {
// dsfv.setValidationMessage(null); // clear out any existing validation message
Set<ConstraintViolation<DatasetFieldValue>> constraintViolations = validator.validate(dsfv);
for (ConstraintViolation<DatasetFieldValue> constraintViolation : constraintViolations) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Validation Error", constraintViolation.getMessage()));
// dsfv.setValidationMessage(constraintViolation.getMessage());
dontSave = true;
}
}
}
if (dontSave) {
return "";
}
dataset.setOwner(dataverseService.find(ownerId));
//TODO get real application-wide protocol/authority
dataset.setProtocol("doi");
dataset.setAuthority("10.5072/FK2");
dataset.setIdentifier("5555");
/*
* Save and/or ingest files, if there are any:
*/
if (newFiles != null && newFiles.size() > 0) {
try {
if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
Files.createDirectories(dataset.getFileSystemDirectory());
}
} catch (IOException dirEx) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Failed to create study directory " + dataset.getFileSystemDirectory().toString());
}
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (DataFile dFile : newFiles) {
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
// These are all brand new files, so they should all have
// one filemetadata total. You do NOT want to use
// getLatestFilemetadata() here - because it relies on
// comparing the object IDs of the corresponding datasetversions...
// Which may not have been persisted yet.
// -- L.A. 4.0 beta.
FileMetadata fileMetadata = dFile.getFileMetadatas().get(0);
String fileName = fileMetadata.getLabel();
//boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
datasetService.generateFileSystemName(dFile);
if (ingestService.ingestableAsTabular(dFile)) {
/*
* Note that we don't try to ingest the file right away -
* instead we mark it as "scheduled for ingest", then at
* the end of the save process it will be queued for async.
* ingest in the background. In the meantime, the file
* will be ingested as a regular, non-tabular file, and
* appear as such to the user, until the ingest job is
* finished with the Ingest Service.
*/
dFile.SetIngestScheduled();
} else if (ingestService.fileMetadataExtractable(dFile)) {
try {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + fileName, mex);
}
if (metadataExtracted) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + fileName);
} else {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + fileName);
}
}
// Try to save the file in its permanent location:
//if (!ingestedAsTabular) {
try {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the file as: " + dFile.getFileSystemLocation().toString());
Files.copy(new FileInputStream(new File(tempFileLocation)), dFile.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
MD5Checksum md5Checksum = new MD5Checksum();
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + fileName);
}
} catch (IOException ioex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to save the file " + dFile.getFileSystemLocation());
}
//}
// Any necessary post-processing:
ingestService.performPostProcessingTasks(dFile);
}
}
}
Command<Dataset> cmd;
try {
if (editMode == EditMode.CREATE) {
cmd = new CreateDatasetCommand(dataset, session.getUser());
} else {
cmd = new UpdateDatasetCommand(dataset, session.getUser());
}
dataset = commandEngine.submit(cmd);
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex + " ");
error.append(ex.getMessage() + " ");
Throwable cause = ex;
while (cause.getCause() != null) {
cause = cause.getCause();
error.append(cause + " ");
error.append(cause.getMessage() + " ");
}
logger.info("Couldn't save dataset: " + error.toString());
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + error.toString()));
return null;
} catch (CommandException ex) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + ex.toString()));
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, ex);
}
newFiles.clear();
editMode = null;
// Queue the ingest jobs for asynchronous execution:
for (DataFile dataFile : dataset.getFiles()) {
if (dataFile.isIngestScheduled()) {
dataFile.SetIngestInProgress();
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getFileMetadata().getLabel() + " for ingest.");
ingestService.asyncIngestAsTabular(dataFile);
}
}
return "/dataset.xhtml?id=" + dataset.getId() + "&faces-redirect=true";
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void init() {
if (dataset.getId() != null) { // view mode for a dataset
dataset = datasetService.find(dataset.getId());
editVersion = dataset.getLatestVersion();
ownerId = dataset.getOwner().getId();
editVersion.setDatasetFields(editVersion.initDatasetFields());
datasetVersionUI = new DatasetVersionUI(editVersion);
} else if (ownerId != null) {
// create mode for a new child dataset
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setVersions(new ArrayList());
editVersion.setDataset(dataset);
editVersion.setFileMetadatas(new ArrayList());
editVersion.setDatasetFields(null);
editVersion.setVersionState(VersionState.DRAFT);
editVersion.setDatasetFields(editVersion.initDatasetFields());
editVersion.setVersionNumber(new Long(1));
datasetVersionUI = new DatasetVersionUI(editVersion);
//TODO add call to initDepositFields if it's decided that they are indeed metadata
//initDepositFields();
dataset.getVersions().add(editVersion);
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Add New Dataset", " - Enter metadata to create the dataset's citation. You can add more metadata about this dataset after it's created."));
} else {
throw new RuntimeException("On Dataset page without id or ownerid."); // improve error handling
}
}
#location 7
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void init() {
if (dataset.getId() != null) { // view mode for a dataset
dataset = datasetService.find(dataset.getId());
editVersion = dataset.getLatestVersion();
ownerId = dataset.getOwner().getId();
editVersion.setDatasetFields(editVersion.initDatasetFields());
datasetVersionUI = new DatasetVersionUI(editVersion);
} else if (ownerId != null) {
// create mode for a new child dataset
editMode = EditMode.CREATE;
dataset.setOwner(dataverseService.find(ownerId));
dataset.setVersions(new ArrayList());
editVersion.setDataset(dataset);
editVersion.setFileMetadatas(new ArrayList());
editVersion.setVersionState(VersionState.DRAFT);
editVersion.setDatasetFields(editVersion.initDatasetFields());
editVersion.setVersionNumber(new Long(1));
datasetVersionUI = new DatasetVersionUI(editVersion);
//TODO add call to initDepositFields if it's decided that they are indeed metadata
//initDepositFields();
dataset.getVersions().add(editVersion);
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Add New Dataset", " - Enter metadata to create the dataset's citation. You can add more metadata about this dataset after it's created."));
} else {
throw new RuntimeException("On Dataset page without id or ownerid."); // improve error handling
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public void handleDropBoxUpload(ActionEvent e) {
// Read JSON object from the output of the DropBox Chooser:
JsonReader dbJsonReader = Json.createReader(new StringReader(dropBoxSelection));
JsonArray dbArray = dbJsonReader.readArray();
dbJsonReader.close();
for (int i = 0; i < dbArray.size(); i++) {
JsonObject dbObject = dbArray.getJsonObject(i);
// Extract the payload:
String fileLink = dbObject.getString("link");
String fileName = dbObject.getString("name");
int fileSize = dbObject.getInt("bytes");
logger.info("DropBox url: " + fileLink + ", filename: " + fileName + ", size: " + fileSize);
DataFile dFile = null;
// Make http call, download the file:
GetMethod dropBoxMethod = new GetMethod(fileLink);
int status = 0;
InputStream dropBoxStream = null;
try {
status = getClient().executeMethod(dropBoxMethod);
if (status == 200) {
dropBoxStream = dropBoxMethod.getResponseBodyAsStream();
dFile = new DataFile("application/octet-stream");
dFile.setOwner(dataset);
// save the file, in the temporary location for now:
datasetService.generateFileSystemName(dFile);
if (ingestService.getFilesTempDirectory() != null) {
logger.info("Will attempt to save the DropBox file as: " + ingestService.getFilesTempDirectory() + "/" + dFile.getFileSystemName());
Files.copy(dropBoxStream, Paths.get(ingestService.getFilesTempDirectory(), dFile.getFileSystemName()), StandardCopyOption.REPLACE_EXISTING);
File tempFile = Paths.get(ingestService.getFilesTempDirectory(), dFile.getFileSystemName()).toFile();
if (tempFile.exists()) {
long writtenBytes = tempFile.length();
logger.info("File size, expected: " + fileSize + ", written: " + writtenBytes);
} else {
throw new IOException();
}
}
}
} catch (IOException ex) {
logger.warning("Failed to access DropBox url: " + fileLink + "!");
continue;
} finally {
if (dropBoxMethod != null) {
dropBoxMethod.releaseConnection();
}
if (dropBoxStream != null) {
try {
dropBoxStream.close();
} catch (Exception ex) {
}
}
}
// If we've made it this far, we must have downloaded the file
// successfully, so let's finish processing it as a new DataFile
// object:
FileMetadata fmd = new FileMetadata();
fmd.setDataFile(dFile);
dFile.getFileMetadatas().add(fmd);
fmd.setLabel(fileName);
fmd.setCategory(dFile.getContentType());
if (editVersion.getFileMetadatas() == null) {
editVersion.setFileMetadatas(new ArrayList());
}
editVersion.getFileMetadatas().add(fmd);
fmd.setDatasetVersion(editVersion);
dataset.getFiles().add(dFile);
// When uploading files from dropBox, we don't get the benefit of
// having the browser recognize the mime type of the file. So we'll
// have to rely on our own utilities (Jhove, etc.) to try and determine
// what it is.
String fileType = null;
try {
fileType = FileUtil.determineFileType(Paths.get(ingestService.getFilesTempDirectory(), dFile.getFileSystemName()).toFile(), fileName);
logger.fine("File utility recognized the file as " + fileType);
if (fileType != null && !fileType.equals("")) {
dFile.setContentType(fileType);
}
} catch (IOException ex) {
logger.warning("Failed to run the file utility mime type check on file " + fileName);
}
newFiles.add(dFile);
}
}
#location 64
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public void handleDropBoxUpload(ActionEvent e) {
// Read JSON object from the output of the DropBox Chooser:
JsonReader dbJsonReader = Json.createReader(new StringReader(dropBoxSelection));
JsonArray dbArray = dbJsonReader.readArray();
dbJsonReader.close();
for (int i = 0; i < dbArray.size(); i++) {
JsonObject dbObject = dbArray.getJsonObject(i);
// Extract the payload:
String fileLink = dbObject.getString("link");
String fileName = dbObject.getString("name");
int fileSize = dbObject.getInt("bytes");
logger.info("DropBox url: " + fileLink + ", filename: " + fileName + ", size: " + fileSize);
DataFile dFile = null;
// Make http call, download the file:
GetMethod dropBoxMethod = new GetMethod(fileLink);
int status = 0;
InputStream dropBoxStream = null;
try {
status = getClient().executeMethod(dropBoxMethod);
if (status == 200) {
dropBoxStream = dropBoxMethod.getResponseBodyAsStream();
// If we've made it this far, we must have been able to
// make a successful HTTP call to the DropBox server and
// obtain an InputStream - so we can now create a new
// DataFile object:
dFile = ingestService.createDataFile(editVersion, dropBoxStream, fileName, null);
newFiles.add(dFile);
}
} catch (IOException ex) {
logger.warning("Failed to access DropBox url: " + fileLink + "!");
continue;
} finally {
if (dropBoxMethod != null) {
dropBoxMethod.releaseConnection();
}
if (dropBoxStream != null) {
try {
dropBoxStream.close();
} catch (Exception ex) {
//logger.whocares("...");
}
}
}
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String getCitation(boolean isOnlineVersion, DatasetVersion version) {
String str = "";
boolean includeAffiliation = false;
String authors = version.getAuthorsStr(includeAffiliation);
if (!StringUtil.isEmpty(authors)) {
str += authors;
}
if (this.getPublicationDate() == null || StringUtil.isEmpty(this.getPublicationDate().toString())) {
//if not released use current year
if (!StringUtil.isEmpty(str)) {
str += ", ";
}
str += new SimpleDateFormat("yyyy").format(new Timestamp(new Date().getTime())) ;
} else {
if (!StringUtil.isEmpty(str)) {
str += ", ";
}
str += new SimpleDateFormat("yyyy").format(new Timestamp(this.getPublicationDate().getTime()));
}
if ( version.getTitle() != null ) {
if (!StringUtil.isEmpty(version.getTitle())) {
if (!StringUtil.isEmpty(str)) {
str += ", ";
}
str += "\"" + version.getTitle() + "\"";
}
}
if (!StringUtil.isEmpty(this.getIdentifier())) {
if (!StringUtil.isEmpty(str)) {
str += ", ";
}
if (isOnlineVersion) {
str += "<a href=\"" + this.getPersistentURL() + "\">" + this.getIdentifier() + "</a>";
} else {
str += this.getPersistentURL();
}
}
//Get root dataverse name for Citation
Dataverse root = this.getOwner();
while (root.getOwner() != null) {
root = root.getOwner();
}
String rootDataverseName = root.getName();
if (!StringUtil.isEmpty(rootDataverseName)) {
if (!StringUtil.isEmpty(str)) {
str += ", ";
}
str += " " + rootDataverseName + " ";
}
if (version.getVersionNumber() != null) {
if (!StringUtil.isEmpty(str)) {
str += ", ";
}
str += " V" + version.getVersionNumber();
str += " [Version]";
}
/*UNF is not calculated yet
if (!StringUtil.isEmpty(getUNF())) {
if (!StringUtil.isEmpty(str)) {
str += " ";
}
str += getUNF();
}
String distributorNames = getDistributorNames();
if (distributorNames.trim().length() > 0) {
str += " " + distributorNames;
str += " [Distributor]";
}*/
return str;
}
#location 46
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String getCitation(boolean isOnlineVersion, DatasetVersion version) {
return version.getCitation(isOnlineVersion);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public DepositReceipt getEntry(String uri, Map<String, String> map, AuthCredentials authCredentials, SwordConfiguration swordConfiguration) throws SwordServerException, SwordError, SwordAuthException {
DataverseUser dataverseUser = swordAuth.auth(authCredentials);
logger.fine("getEntry called with url: " + uri);
urlManager.processUrl(uri);
String targetType = urlManager.getTargetType();
if (!targetType.isEmpty()) {
logger.fine("operating on target type: " + urlManager.getTargetType());
if ("study".equals(targetType)) {
String globalId = urlManager.getTargetIdentifier();
Dataset dataset = null;
try {
/**
* @todo don't hard code this, obviously. In DVN 3.x we had
* a method for studyService.getStudyByGlobalId(globalId)
*/
// study = studyService.getStudyByGlobalId(globalId);
long databaseIdForRoastingAtHomeDataset = 10;
dataset = datasetService.find(databaseIdForRoastingAtHomeDataset);
} catch (EJBException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on global id (" + globalId + ") in URL: " + uri);
}
if (dataset != null) {
Dataverse dvThatOwnsStudy = dataset.getOwner();
if (swordAuth.hasAccessToModifyDataverse(dataverseUser, dvThatOwnsStudy)) {
ReceiptGenerator receiptGenerator = new ReceiptGenerator();
String baseUrl = urlManager.getHostnamePlusBaseUrlPath(uri);
DepositReceipt depositReceipt = receiptGenerator.createReceipt(baseUrl, dataset);
if (depositReceipt != null) {
return depositReceipt;
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not generate deposit receipt.");
}
} else {
/**
* @todo need study.getGlobalId() from DVN 3.x
*/
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "User " + dataverseUser.getUserName() + " is not authorized to retrieve entry for " + dataset.getIdentifier());
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unsupported target type (" + targetType + ") in URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unable to determine target type from URL: " + uri);
}
}
#location 25
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public DepositReceipt getEntry(String uri, Map<String, String> map, AuthCredentials authCredentials, SwordConfiguration swordConfiguration) throws SwordServerException, SwordError, SwordAuthException {
DataverseUser dataverseUser = swordAuth.auth(authCredentials);
logger.fine("getEntry called with url: " + uri);
urlManager.processUrl(uri);
String targetType = urlManager.getTargetType();
if (!targetType.isEmpty()) {
logger.fine("operating on target type: " + urlManager.getTargetType());
if ("study".equals(targetType)) {
String globalId = urlManager.getTargetIdentifier();
Dataset dataset = null;
try {
dataset = datasetService.findByGlobalId(globalId);
} catch (EJBException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on global id (" + globalId + ") in URL: " + uri);
}
if (dataset != null) {
Dataverse dvThatOwnsStudy = dataset.getOwner();
if (swordAuth.hasAccessToModifyDataverse(dataverseUser, dvThatOwnsStudy)) {
ReceiptGenerator receiptGenerator = new ReceiptGenerator();
String baseUrl = urlManager.getHostnamePlusBaseUrlPath(uri);
DepositReceipt depositReceipt = receiptGenerator.createReceipt(baseUrl, dataset);
if (depositReceipt != null) {
return depositReceipt;
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not generate deposit receipt.");
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "User " + dataverseUser.getUserName() + " is not authorized to retrieve entry for " + dataset.getGlobalId());
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unsupported target type (" + targetType + ") in URL: " + uri);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unable to determine target type from URL: " + uri);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String save() {
dataset.setOwner(dataverseService.find(ownerId));
//TODO get real application-wide protocol/authority
dataset.setProtocol("doi");
dataset.setAuthority("10.5072/FK2");
dataset.setIdentifier("5555");
//TODO add replication for logic if necessary
if (replicationFor){
//dataset.getVersions().get(0).getDatasetFields().
}
//Todo pre populate deposit date
//If new ds get create date user
if (dataset.getId() == null){
dataset.setCreator(session.getUser());
dataset.setCreateDate(new Timestamp(new Date().getTime()));
}
if (!(dataset.getVersions().get(0).getFileMetadatas() == null) && !dataset.getVersions().get(0).getFileMetadatas().isEmpty()) {
int fmdIndex = 0;
for (FileMetadata fmd : dataset.getVersions().get(0).getFileMetadatas()) {
for (FileMetadata fmdTest : editVersion.getFileMetadatas()) {
if (fmd.equals(fmdTest)) {
dataset.getVersions().get(0).getFileMetadatas().get(fmdIndex).setDataFile(fmdTest.getDataFile());
}
}
fmdIndex++;
}
}
/*
* Save and/or ingest files, if there are any:
*/
if (newFiles != null && newFiles.size() > 0) {
try {
if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
Files.createDirectories(dataset.getFileSystemDirectory());
}
} catch (IOException dirEx) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Failed to create study directory " + dataset.getFileSystemDirectory().toString());
}
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (UploadedFile uFile : newFiles.keySet()) {
DataFile dFile = newFiles.get(uFile);
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
datasetService.generateFileSystemName(dFile);
if (ingestService.ingestableAsTabular(dFile)) {
try {
ingestedAsTabular = ingestService.ingestAsTabular(tempFileLocation, dFile);
dFile.setContentType("text/tab-separated-values");
} catch (IOException iex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, iex);
ingestedAsTabular = false;
}
} else if (ingestService.fileMetadataExtractable(dFile)) {
try {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + dFile.getName(), mex);
}
if (metadataExtracted) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + dFile.getName());
} else {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + dFile.getName());
}
}
/* Try to save the file in its permanent location:
* (unless it was already ingested and saved as tabular data)
*/
if (!ingestedAsTabular) {
try {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the file as: " + dFile.getFileSystemLocation().toString());
Files.copy(uFile.getInputstream(), dFile.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
MD5Checksum md5Checksum = new MD5Checksum();
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + dFile.getName());
}
} catch (IOException ioex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to save the file " + dFile.getFileSystemLocation());
}
}
// Any necessary post-processing:
ingestService.performPostProcessingTasks(dFile);
}
}
}
try {
dataset = datasetService.save(dataset);
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex + " ");
error.append(ex.getMessage() + " ");
Throwable cause = ex;
while (cause.getCause() != null) {
cause = cause.getCause();
error.append(cause + " ");
error.append(cause.getMessage() + " ");
}
logger.info("Couldn't save dataset: " + error.toString());
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + error.toString()));
return null;
}
newFiles.clear();
editMode = null;
return "/dataset.xhtml?id=" + dataset.getId() + "&faces-redirect=true";
}
#location 87
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String save() {
dataset.setOwner(dataverseService.find(ownerId));
//TODO get real application-wide protocol/authority
dataset.setProtocol("doi");
dataset.setAuthority("10.5072/FK2");
dataset.setIdentifier("5555");
//TODO add replication for logic if necessary
if (replicationFor){
//dataset.getVersions().get(0).getDatasetFields().
}
//Todo pre populate deposit date
//If new ds get create date user
if (dataset.getId() == null){
dataset.setCreator(session.getUser());
dataset.setCreateDate(new Timestamp(new Date().getTime()));
}
if (!(dataset.getVersions().get(0).getFileMetadatas() == null) && !dataset.getVersions().get(0).getFileMetadatas().isEmpty()) {
int fmdIndex = 0;
for (FileMetadata fmd : dataset.getVersions().get(0).getFileMetadatas()) {
for (FileMetadata fmdTest : editVersion.getFileMetadatas()) {
if (fmd.equals(fmdTest)) {
dataset.getVersions().get(0).getFileMetadatas().get(fmdIndex).setDataFile(fmdTest.getDataFile());
}
}
fmdIndex++;
}
}
/*
* Save and/or ingest files, if there are any:
*/
if (newFiles != null && newFiles.size() > 0) {
try {
if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
Files.createDirectories(dataset.getFileSystemDirectory());
}
} catch (IOException dirEx) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Failed to create study directory " + dataset.getFileSystemDirectory().toString());
}
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (UploadedFile uFile : newFiles.keySet()) {
DataFile dFile = newFiles.get(uFile);
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
datasetService.generateFileSystemName(dFile);
if (ingestService.ingestableAsTabular(dFile)) {
/*
try {
ingestedAsTabular = ingestService.ingestAsTabular(tempFileLocation, dFile);
dFile.setContentType("text/tab-separated-values");
} catch (IOException iex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, iex);
ingestedAsTabular = false;
}
*/
dFile.SetIngestScheduled();
} else if (ingestService.fileMetadataExtractable(dFile)) {
try {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + dFile.getName(), mex);
}
if (metadataExtracted) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + dFile.getName());
} else {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + dFile.getName());
}
}
/* Try to save the file in its permanent location:
* (unless it was already ingested and saved as tabular data)
*/
if (!ingestedAsTabular) {
try {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the file as: " + dFile.getFileSystemLocation().toString());
Files.copy(uFile.getInputstream(), dFile.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
MD5Checksum md5Checksum = new MD5Checksum();
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + dFile.getName());
}
} catch (IOException ioex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to save the file " + dFile.getFileSystemLocation());
}
}
// Any necessary post-processing:
ingestService.performPostProcessingTasks(dFile);
}
}
}
try {
dataset = datasetService.save(dataset);
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex + " ");
error.append(ex.getMessage() + " ");
Throwable cause = ex;
while (cause.getCause() != null) {
cause = cause.getCause();
error.append(cause + " ");
error.append(cause.getMessage() + " ");
}
logger.info("Couldn't save dataset: " + error.toString());
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + error.toString()));
return null;
}
newFiles.clear();
editMode = null;
// Queue the ingest jobs for asynchronous execution:
for (DataFile dataFile : dataset.getFiles()) {
if (dataFile.isIngestScheduled()) {
dataFile.SetIngestInProgress();
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getName() + " for ingest.");
ingestService.asyncIngestAsTabular(dataFile);
}
}
return "/dataset.xhtml?id=" + dataset.getId() + "&faces-redirect=true";
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Path("dsPreview/{datasetId}")
@GET
@Produces({ "image/png" })
public InputStream dsPreview(@PathParam("datasetId") Long datasetId, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
Dataset dataset = datasetService.find(datasetId);
String imageThumbFileName = null;
List<DataFile> dataFiles = dataset.getFiles();
for (DataFile dataFile : dataFiles) {
if (dataFile.isImage()) {
imageThumbFileName = ImageThumbConverter.generateImageThumb(dataFile.getFileSystemLocation().toString(), 48);
break;
}
}
if (imageThumbFileName == null) {
imageThumbFileName = getWebappImageResource (DEFAULT_DATASET_ICON);
}
if (imageThumbFileName != null) {
InputStream in;
try {
in = new FileInputStream(imageThumbFileName);
} catch (Exception ex) {
// We don't particularly care what the reason why we have
// failed to access the file was.
// From the point of view of the download subsystem, it's a
// binary operation -- it's either successfull or not.
// If we can't access it for whatever reason, we are saying
// it's 404 NOT FOUND in our HTTP response.
return null;
}
return in;
}
return null;
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Path("dsPreview/{datasetId}")
@GET
@Produces({ "image/png" })
public InputStream dsPreview(@PathParam("datasetId") Long datasetId, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) /*throws NotFoundException, ServiceUnavailableException, PermissionDeniedException, AuthorizationRequiredException*/ {
Dataset dataset = datasetService.find(datasetId);
if (dataset != null) {
logger.warning("Preview: dataset service could not locate a Dataset object for id "+datasetId+"!");
return null;
}
String imageThumbFileName = null;
List<DataFile> dataFiles = dataset.getFiles();
for (DataFile dataFile : dataFiles) {
if (dataFile.isImage()) {
imageThumbFileName = ImageThumbConverter.generateImageThumb(dataFile.getFileSystemLocation().toString(), 48);
break;
}
}
if (imageThumbFileName == null) {
imageThumbFileName = getWebappImageResource (DEFAULT_DATASET_ICON);
}
if (imageThumbFileName != null) {
InputStream in;
try {
in = new FileInputStream(imageThumbFileName);
} catch (Exception ex) {
// We don't particularly care what the reason why we have
// failed to access the file was.
// From the point of view of the download subsystem, it's a
// binary operation -- it's either successfull or not.
// If we can't access it for whatever reason, we are saying
// it's 404 NOT FOUND in our HTTP response.
return null;
}
return in;
}
return null;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Override
public Statement getStatement(String editUri, Map<String, String> map, AuthCredentials authCredentials, SwordConfiguration swordConfiguration) throws SwordServerException, SwordError, SwordAuthException {
this.swordConfiguration = (SwordConfigurationImpl) swordConfiguration;
swordConfiguration = (SwordConfigurationImpl) swordConfiguration;
if (authCredentials == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "auth credentials are null");
}
if (swordAuth == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "swordAuth is null");
}
DataverseUser vdcUser = swordAuth.auth(authCredentials);
urlManager.processUrl(editUri);
String globalId = urlManager.getTargetIdentifier();
if (urlManager.getTargetType().equals("study") && globalId != null) {
logger.fine("request for sword statement by user " + vdcUser.getUserName());
// Study study = null;
/**
* @todo don't hard code this, obviously. In DVN 3.x we had a method
* for editStudyService.getStudyByGlobalId(globalId)
*/
// Study study = editStudyService.getStudyByGlobalId(globalId);
long databaseIdForRoastingAtHomeDataset = 10;
Dataset study = datasetService.find(databaseIdForRoastingAtHomeDataset);
// try {
// study = studyService.getStudyByGlobalId(globalId);
// } catch (EJBException ex) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on global id (" + globalId + ") in URL: " + editUri);
// }
Long studyId;
try {
studyId = study.getId();
} catch (NullPointerException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "couldn't find study with global ID of " + globalId);
}
Dataverse dvThatOwnsStudy = study.getOwner();
if (swordAuth.hasAccessToModifyDataverse(vdcUser, dvThatOwnsStudy)) {
/**
* @todo getIdentifier is equivalent to getGlobalId, right?
*/
// String feedUri = urlManager.getHostnamePlusBaseUrlPath(editUri) + "/edit/study/" + study.getGlobalId();
String feedUri = urlManager.getHostnamePlusBaseUrlPath(editUri) + "/edit/study/" + study.getIdentifier();
/**
* @todo is it safe to use this?
*/
String author = study.getLatestVersion().getAuthorsStr();
String title = study.getLatestVersion().getTitle();
Date lastUpdated = study.getLatestVersion().getLastUpdateTime();
if (lastUpdated == null) {
/**
* @todo why is this date null?
*/
logger.info("why is lastUpdated null?");
lastUpdated = new Date();
}
AtomDate atomDate = new AtomDate(lastUpdated);
// AtomDate atomDate = new AtomDate(study.getLatestVersion().getLastUpdateTime());
String datedUpdated = atomDate.toString();
Statement statement = new AtomStatement(feedUri, author, title, datedUpdated);
Map<String, String> states = new HashMap<String, String>();
states.put("latestVersionState", study.getLatestVersion().getVersionState().toString());
/**
* @todo DVN 3.x had a studyLock. What's the equivalent in 4.0?
*/
// StudyLock lock = study.getStudyLock();
// if (lock != null) {
// states.put("locked", "true");
// states.put("lockedDetail", lock.getDetail());
// states.put("lockedStartTime", lock.getStartTime().toString());
// } else {
// states.put("locked", "false");
// }
statement.setStates(states);
List<FileMetadata> fileMetadatas = study.getLatestVersion().getFileMetadatas();
for (FileMetadata fileMetadata : fileMetadatas) {
DataFile studyFile = fileMetadata.getDataFile();
// We are exposing the filename for informational purposes. The file id is what you
// actually operate on to delete a file, etc.
//
// Replace spaces to avoid IRISyntaxException
String fileNameFinal = fileMetadata.getLabel().replace(' ', '_');
String studyFileUrlString = urlManager.getHostnamePlusBaseUrlPath(editUri) + "/edit-media/file/" + studyFile.getId() + "/" + fileNameFinal;
IRI studyFileUrl;
try {
studyFileUrl = new IRI(studyFileUrlString);
} catch (IRISyntaxException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Invalid URL for file ( " + studyFileUrlString + " ) resulted in " + ex.getMessage());
}
ResourcePart resourcePart = new ResourcePart(studyFileUrl.toString());
/**
* @todo get this working. show the actual file type
*/
// resourcePart.setMediaType(studyFile.getOriginalFileFormat());
resourcePart.setMediaType("application/octet-stream");
/**
* @todo: Why are properties set on a ResourcePart not
* exposed when you GET a Statement?
*/
// Map<String, String> properties = new HashMap<String, String>();
// properties.put("filename", studyFile.getFileName());
// properties.put("category", studyFile.getLatestCategory());
// properties.put("originalFileType", studyFile.getOriginalFileType());
// properties.put("id", studyFile.getId().toString());
// properties.put("UNF", studyFile.getUnf());
// resourcePart.setProperties(properties);
statement.addResource(resourcePart);
}
return statement;
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "user " + vdcUser.getUserName() + " is not authorized to view study with global ID " + globalId);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not determine target type or identifier from URL: " + editUri);
}
}
#location 40
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Override
public Statement getStatement(String editUri, Map<String, String> map, AuthCredentials authCredentials, SwordConfiguration swordConfiguration) throws SwordServerException, SwordError, SwordAuthException {
this.swordConfiguration = (SwordConfigurationImpl) swordConfiguration;
swordConfiguration = (SwordConfigurationImpl) swordConfiguration;
if (authCredentials == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "auth credentials are null");
}
if (swordAuth == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "swordAuth is null");
}
DataverseUser vdcUser = swordAuth.auth(authCredentials);
urlManager.processUrl(editUri);
String globalId = urlManager.getTargetIdentifier();
if (urlManager.getTargetType().equals("study") && globalId != null) {
logger.fine("request for sword statement by user " + vdcUser.getUserName());
Dataset study = datasetService.findByGlobalId(globalId);
// try {
// study = studyService.getStudyByGlobalId(globalId);
// } catch (EJBException ex) {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study based on global id (" + globalId + ") in URL: " + editUri);
// }
Long studyId;
try {
studyId = study.getId();
} catch (NullPointerException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "couldn't find study with global ID of " + globalId);
}
Dataverse dvThatOwnsStudy = study.getOwner();
if (swordAuth.hasAccessToModifyDataverse(vdcUser, dvThatOwnsStudy)) {
String feedUri = urlManager.getHostnamePlusBaseUrlPath(editUri) + "/edit/study/" + study.getGlobalId();
/**
* @todo is it safe to use this?
*/
String author = study.getLatestVersion().getAuthorsStr();
String title = study.getLatestVersion().getTitle();
Date lastUpdated = study.getLatestVersion().getLastUpdateTime();
if (lastUpdated == null) {
/**
* @todo why is this date null?
*/
logger.info("why is lastUpdated null?");
lastUpdated = new Date();
}
AtomDate atomDate = new AtomDate(lastUpdated);
// AtomDate atomDate = new AtomDate(study.getLatestVersion().getLastUpdateTime());
String datedUpdated = atomDate.toString();
Statement statement = new AtomStatement(feedUri, author, title, datedUpdated);
Map<String, String> states = new HashMap<String, String>();
states.put("latestVersionState", study.getLatestVersion().getVersionState().toString());
/**
* @todo DVN 3.x had a studyLock. What's the equivalent in 4.0?
*/
// StudyLock lock = study.getStudyLock();
// if (lock != null) {
// states.put("locked", "true");
// states.put("lockedDetail", lock.getDetail());
// states.put("lockedStartTime", lock.getStartTime().toString());
// } else {
// states.put("locked", "false");
// }
statement.setStates(states);
List<FileMetadata> fileMetadatas = study.getLatestVersion().getFileMetadatas();
for (FileMetadata fileMetadata : fileMetadatas) {
DataFile studyFile = fileMetadata.getDataFile();
// We are exposing the filename for informational purposes. The file id is what you
// actually operate on to delete a file, etc.
//
// Replace spaces to avoid IRISyntaxException
String fileNameFinal = fileMetadata.getLabel().replace(' ', '_');
String studyFileUrlString = urlManager.getHostnamePlusBaseUrlPath(editUri) + "/edit-media/file/" + studyFile.getId() + "/" + fileNameFinal;
IRI studyFileUrl;
try {
studyFileUrl = new IRI(studyFileUrlString);
} catch (IRISyntaxException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Invalid URL for file ( " + studyFileUrlString + " ) resulted in " + ex.getMessage());
}
ResourcePart resourcePart = new ResourcePart(studyFileUrl.toString());
/**
* @todo get this working. show the actual file type
*/
// resourcePart.setMediaType(studyFile.getOriginalFileFormat());
resourcePart.setMediaType("application/octet-stream");
/**
* @todo: Why are properties set on a ResourcePart not
* exposed when you GET a Statement?
*/
// Map<String, String> properties = new HashMap<String, String>();
// properties.put("filename", studyFile.getFileName());
// properties.put("category", studyFile.getLatestCategory());
// properties.put("originalFileType", studyFile.getOriginalFileType());
// properties.put("id", studyFile.getId().toString());
// properties.put("UNF", studyFile.getUnf());
// resourcePart.setProperties(properties);
statement.addResource(resourcePart);
}
return statement;
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "user " + vdcUser.getUserName() + " is not authorized to view study with global ID " + globalId);
}
} else {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not determine target type or identifier from URL: " + editUri);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public String save() {
dataset.setOwner(dataverseService.find(ownerId));
//TODO get real application-wide protocol/authority
dataset.setProtocol("doi");
dataset.setAuthority("10.5072/FK2");
dataset.setIdentifier("5555");
//TODO update title in page itself
if (replicationFor) {
updateTitle();
}
/*
* Save and/or ingest files, if there are any:
*/
if (newFiles != null && newFiles.size() > 0) {
try {
if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
Files.createDirectories(dataset.getFileSystemDirectory());
}
} catch (IOException dirEx) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Failed to create study directory " + dataset.getFileSystemDirectory().toString());
}
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (DataFile dFile : newFiles) {
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
// These are all brand new files, so they should all have
// one filemetadata total. You do NOT want to use
// getLatestFilemetadata() here - because it relies on
// comparing the object IDs of the corresponding datasetversions...
// Which may not have been persisted yet.
// -- L.A. 4.0 beta.
FileMetadata fileMetadata = dFile.getFileMetadatas().get(0);
String fileName = fileMetadata.getLabel();
//boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
datasetService.generateFileSystemName(dFile);
if (ingestService.ingestableAsTabular(dFile)) {
/*
* Note that we don't try to ingest the file right away -
* instead we mark it as "scheduled for ingest", then at
* the end of the save process it will be queued for async.
* ingest in the background. In the meantime, the file
* will be ingested as a regular, non-tabular file, and
* appear as such to the user, until the ingest job is
* finished with the Ingest Service.
*/
dFile.SetIngestScheduled();
} else if (ingestService.fileMetadataExtractable(dFile)) {
try {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + fileName, mex);
}
if (metadataExtracted) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + fileName);
} else {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + fileName);
}
}
// Try to save the file in its permanent location:
//if (!ingestedAsTabular) {
try {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the file as: " + dFile.getFileSystemLocation().toString());
Files.copy(new FileInputStream(new File(tempFileLocation)), dFile.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
MD5Checksum md5Checksum = new MD5Checksum();
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + fileName);
}
} catch (IOException ioex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to save the file " + dFile.getFileSystemLocation());
}
//}
// Any necessary post-processing:
ingestService.performPostProcessingTasks(dFile);
}
}
}
Command<Dataset> cmd;
try {
if (editMode == EditMode.CREATE) {
cmd = new CreateDatasetCommand(dataset, session.getUser());
} else {
cmd = new UpdateDatasetCommand(dataset, session.getUser());
}
dataset = commandEngine.submit(cmd);
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex + " ");
error.append(ex.getMessage() + " ");
Throwable cause = ex;
while (cause.getCause() != null) {
cause = cause.getCause();
error.append(cause + " ");
error.append(cause.getMessage() + " ");
}
logger.info("Couldn't save dataset: " + error.toString());
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + error.toString()));
return null;
} catch (CommandException ex) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + ex.toString()));
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, ex);
}
newFiles.clear();
editMode = null;
// Queue the ingest jobs for asynchronous execution:
for (DataFile dataFile : dataset.getFiles()) {
if (dataFile.isIngestScheduled()) {
dataFile.SetIngestInProgress();
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getFileMetadata().getLabel() + " for ingest.");
ingestService.asyncIngestAsTabular(dataFile);
}
}
return "/dataset.xhtml?id=" + dataset.getId() + "&faces-redirect=true";
}
#location 130
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public String save() {
dataset.setOwner(dataverseService.find(ownerId));
//TODO get real application-wide protocol/authority
dataset.setProtocol("doi");
dataset.setAuthority("10.5072/FK2");
dataset.setIdentifier("5555");
/*
* Save and/or ingest files, if there are any:
*/
if (newFiles != null && newFiles.size() > 0) {
try {
if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
/* Note that "createDirectories()" must be used - not
* "createDirectory()", to make sure all the parent
* directories that may not yet exist are created as well.
*/
Files.createDirectories(dataset.getFileSystemDirectory());
}
} catch (IOException dirEx) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Failed to create study directory " + dataset.getFileSystemDirectory().toString());
}
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (DataFile dFile : newFiles) {
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
// These are all brand new files, so they should all have
// one filemetadata total. You do NOT want to use
// getLatestFilemetadata() here - because it relies on
// comparing the object IDs of the corresponding datasetversions...
// Which may not have been persisted yet.
// -- L.A. 4.0 beta.
FileMetadata fileMetadata = dFile.getFileMetadatas().get(0);
String fileName = fileMetadata.getLabel();
//boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
datasetService.generateFileSystemName(dFile);
if (ingestService.ingestableAsTabular(dFile)) {
/*
* Note that we don't try to ingest the file right away -
* instead we mark it as "scheduled for ingest", then at
* the end of the save process it will be queued for async.
* ingest in the background. In the meantime, the file
* will be ingested as a regular, non-tabular file, and
* appear as such to the user, until the ingest job is
* finished with the Ingest Service.
*/
dFile.SetIngestScheduled();
} else if (ingestService.fileMetadataExtractable(dFile)) {
try {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + fileName, mex);
}
if (metadataExtracted) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + fileName);
} else {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + fileName);
}
}
// Try to save the file in its permanent location:
//if (!ingestedAsTabular) {
try {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the file as: " + dFile.getFileSystemLocation().toString());
Files.copy(new FileInputStream(new File(tempFileLocation)), dFile.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
MD5Checksum md5Checksum = new MD5Checksum();
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + fileName);
}
} catch (IOException ioex) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to save the file " + dFile.getFileSystemLocation());
}
//}
// Any necessary post-processing:
ingestService.performPostProcessingTasks(dFile);
}
}
}
Command<Dataset> cmd;
try {
if (editMode == EditMode.CREATE) {
cmd = new CreateDatasetCommand(dataset, session.getUser());
} else {
cmd = new UpdateDatasetCommand(dataset, session.getUser());
}
dataset = commandEngine.submit(cmd);
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex + " ");
error.append(ex.getMessage() + " ");
Throwable cause = ex;
while (cause.getCause() != null) {
cause = cause.getCause();
error.append(cause + " ");
error.append(cause.getMessage() + " ");
}
logger.info("Couldn't save dataset: " + error.toString());
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + error.toString()));
return null;
} catch (CommandException ex) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Dataset Save Failed", " - " + ex.toString()));
Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, null, ex);
}
newFiles.clear();
editMode = null;
// Queue the ingest jobs for asynchronous execution:
for (DataFile dataFile : dataset.getFiles()) {
if (dataFile.isIngestScheduled()) {
dataFile.SetIngestInProgress();
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getFileMetadata().getLabel() + " for ingest.");
ingestService.asyncIngestAsTabular(dataFile);
}
}
return "/dataset.xhtml?id=" + dataset.getId() + "&faces-redirect=true";
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private DatasetVersion createNewDatasetVersion() {
DatasetVersion dsv = new DatasetVersion();
dsv.setVersionState(DatasetVersion.VersionState.DRAFT);
DatasetVersion latestVersion = getLatestVersion();
//if the latest version has values get them copied over
if (latestVersion.getDatasetFields() != null && !latestVersion.getDatasetFields().isEmpty()) {
dsv.setDatasetFields(dsv.copyDatasetFields(latestVersion.getDatasetFields()));
}
dsv.setFileMetadatas(new ArrayList());
for (FileMetadata fm : latestVersion.getFileMetadatas()) {
FileMetadata newFm = new FileMetadata();
newFm.setCategory(fm.getCategory());
newFm.setDescription(fm.getDescription());
newFm.setLabel(fm.getLabel());
newFm.setDataFile(fm.getDataFile());
newFm.setDatasetVersion(dsv);
dsv.getFileMetadatas().add(newFm);
}
dsv.setVersionNumber(latestVersion.getVersionNumber() + 1);
// I'm adding the version to the list so it will be persisted when
// the study object is persisted.
getVersions().add(0, dsv);
dsv.setDataset(this);
return dsv;
}
#location 12
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private DatasetVersion createNewDatasetVersion() {
DatasetVersion dsv = new DatasetVersion();
dsv.setVersionState(DatasetVersion.VersionState.DRAFT);
DatasetVersion latestVersion = getLatestVersion();
//if the latest version has values get them copied over
if (latestVersion.getDatasetFields() != null && !latestVersion.getDatasetFields().isEmpty()) {
dsv.setDatasetFields(dsv.copyDatasetFields(latestVersion.getDatasetFields()));
}
dsv.setFileMetadatas(new ArrayList());
for (FileMetadata fm : latestVersion.getFileMetadatas()) {
FileMetadata newFm = new FileMetadata();
newFm.setCategory(fm.getCategory());
newFm.setDescription(fm.getDescription());
newFm.setLabel(fm.getLabel());
newFm.setDataFile(fm.getDataFile());
newFm.setDatasetVersion(dsv);
dsv.getFileMetadatas().add(newFm);
}
// I'm adding the version to the list so it will be persisted when
// the study object is persisted.
getVersions().add(0, dsv);
dsv.setDataset(this);
return dsv;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
DepositReceipt replaceOrAddFiles(String uri, Deposit deposit, AuthCredentials authCredentials, SwordConfiguration swordConfiguration, boolean shouldReplace) throws SwordError, SwordAuthException, SwordServerException {
DataverseUser vdcUser = swordAuth.auth(authCredentials);
urlManager.processUrl(uri);
String globalId = urlManager.getTargetIdentifier();
if (urlManager.getTargetType().equals("study") && globalId != null) {
// EditStudyService editStudyService;
Context ctx;
try {
ctx = new InitialContext();
// editStudyService = (EditStudyService) ctx.lookup("java:comp/env/editStudy");
} catch (NamingException ex) {
logger.info("problem looking up editStudyService");
throw new SwordServerException("problem looking up editStudyService");
}
logger.fine("looking up study with globalId " + globalId);
/**
* @todo don't hard code this, obviously. In DVN 3.x we had a method
* for editStudyService.getStudyByGlobalId(globalId)
*/
// Study study = editStudyService.getStudyByGlobalId(globalId);
long databaseIdForRoastingAtHomeDataset = 10;
Dataset study = datasetService.find(databaseIdForRoastingAtHomeDataset);
if (study == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study with global ID of " + globalId);
}
// StudyLock studyLock = study.getStudyLock();
// if (studyLock != null) {
// String message = Util.getStudyLockMessage(studyLock, study.getGlobalId());
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, message);
// }
Long studyId;
try {
studyId = study.getId();
} catch (NullPointerException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "couldn't find study with global ID of " + globalId);
}
Dataverse dvThatOwnsStudy = study.getOwner();
if (swordAuth.hasAccessToModifyDataverse(vdcUser, dvThatOwnsStudy)) {
// editStudyService.setStudyVersion(studyId);
// editStudyService.save(dvThatOwnsStudy.getId(), vdcUser.getId());
//
// EditStudyFilesService editStudyFilesService;
// try {
// editStudyFilesService = (EditStudyFilesService) ctx.lookup("java:comp/env/editStudyFiles");
// } catch (NamingException ex) {
// logger.info("problem looking up editStudyFilesService");
// throw new SwordServerException("problem looking up editStudyFilesService");
// }
// editStudyFilesService.setStudyVersionByGlobalId(globalId);
// List studyFileEditBeans = editStudyFilesService.getCurrentFiles();
List<String> exisitingFilenames = new ArrayList<String>();
// for (Iterator it = studyFileEditBeans.iterator(); it.hasNext();) {
// StudyFileEditBean studyFileEditBean = (StudyFileEditBean) it.next();
if (shouldReplace) {
// studyFileEditBean.setDeleteFlag(true);
// logger.fine("marked for deletion: " + studyFileEditBean.getStudyFile().getFileName());
} else {
// String filename = studyFileEditBean.getStudyFile().getFileName();
// exisitingFilenames.add(filename);
}
}
// editStudyFilesService.save(dvThatOwnsStudy.getId(), vdcUser.getId());
if (!deposit.getPackaging().equals(UriRegistry.PACKAGE_SIMPLE_ZIP)) {
throw new SwordError(UriRegistry.ERROR_CONTENT, 415, "Package format " + UriRegistry.PACKAGE_SIMPLE_ZIP + " is required but format specified in 'Packaging' HTTP header was " + deposit.getPackaging());
}
// Right now we are only supporting UriRegistry.PACKAGE_SIMPLE_ZIP but
// in the future maybe we'll support other formats? Rdata files? Stata files?
// That's what the uploadDir was going to be for, but for now it's commented out
//
String importDirString;
File importDir;
String swordTempDirString = swordConfiguration.getTempDirectory();
if (swordTempDirString == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not determine temp directory");
} else {
importDirString = swordTempDirString + File.separator + "import" + File.separator + study.getId().toString();
importDir = new File(importDirString);
if (!importDir.exists()) {
if (!importDir.mkdirs()) {
logger.info("couldn't create directory: " + importDir.getAbsolutePath());
throw new SwordServerException("couldn't create import directory");
}
}
}
if (true) {
DataFile dFile = new DataFile("application/octet-stream");
dFile.setOwner(study);
datasetService.generateFileSystemName(dFile);
// if (true) {
// throw returnEarly("dataFile.getFileSystemName(): " + dFile.getFileSystemName());
// }
InputStream depositInputStream = deposit.getInputStream();
try {
Files.copy(depositInputStream, Paths.get(importDirString, dFile.getFileSystemName()), StandardCopyOption.REPLACE_EXISTING);
} catch (IOException ex) {
throw new SwordError("problem running Files.copy");
}
study.getFiles().add(dFile);
DatasetVersion editVersion = study.getEditVersion();
// boolean metadataExtracted = false;
// try {
// metadataExtracted = ingestService.extractIndexableMetadata(importDir.getAbsolutePath() + File.separator + dFile.getFileSystemName(), dFile, editVersion);
// } catch (IOException ex) {
// throw returnEarly("couldn't extract metadata" + ex);
// }
FileMetadata fmd = new FileMetadata();
fmd.setDataFile(dFile);
fmd.setLabel("myLabel");
fmd.setDatasetVersion(editVersion);
dFile.getFileMetadatas().add(fmd);
Command<Dataset> cmd;
cmd = new UpdateDatasetCommand(study, vdcUser);
try {
/**
* @todo at update time indexing is run but the file is not
* indexed. Why? Manually re-indexing later finds it. Fix
* this. Related to
* https://redmine.hmdc.harvard.edu/issues/3809 ?
*/
study = commandEngine.submit(cmd);
} catch (CommandException ex) {
throw returnEarly("couldn't update dataset");
} catch (EJBException ex) {
Throwable cause = ex;
StringBuilder sb = new StringBuilder();
sb.append(ex.getLocalizedMessage());
while (cause.getCause() != null) {
cause = cause.getCause();
sb.append(cause + " ");
if (cause instanceof ConstraintViolationException) {
ConstraintViolationException constraintViolationException = (ConstraintViolationException) cause;
for (ConstraintViolation<?> violation : constraintViolationException.getConstraintViolations()) {
sb.append(" Invalid value: <<<").append(violation.getInvalidValue()).append(">>> for ")
.append(violation.getPropertyPath()).append(" at ")
.append(violation.getLeafBean()).append(" - ")
.append(violation.getMessage());
}
}
}
throw returnEarly("EJBException: " + sb.toString());
}
}
/**
* @todo remove this comment after confirming that the upstream jar
* now has our bugfix
*/
// the first character of the filename is truncated with the official jar
// so we use include the bug fix at https://github.com/IQSS/swordv2-java-server-library/commit/aeaef83
// and use this jar: https://build.hmdc.harvard.edu:8443/job/swordv2-java-server-library-iqss/2/
String uploadedZipFilename = deposit.getFilename();
ZipInputStream ziStream = new ZipInputStream(deposit.getInputStream());
ZipEntry zEntry;
FileOutputStream tempOutStream = null;
// List<StudyFileEditBean> fbList = new ArrayList<StudyFileEditBean>();
try {
// copied from createStudyFilesFromZip in AddFilesPage
while ((zEntry = ziStream.getNextEntry()) != null) {
// Note that some zip entries may be directories - we
// simply skip them:
if (!zEntry.isDirectory()) {
String fileEntryName = zEntry.getName();
logger.fine("file found: " + fileEntryName);
String dirName = null;
String finalFileName = null;
int ind = fileEntryName.lastIndexOf('/');
if (ind > -1) {
finalFileName = fileEntryName.substring(ind + 1);
if (ind > 0) {
dirName = fileEntryName.substring(0, ind);
dirName = dirName.replace('/', '-');
}
} else {
finalFileName = fileEntryName;
}
if (".DS_Store".equals(finalFileName)) {
continue;
}
// http://superuser.com/questions/212896/is-there-any-way-to-prevent-a-mac-from-creating-dot-underscore-files
if (finalFileName.startsWith("._")) {
continue;
}
File tempUploadedFile = new File(importDir, finalFileName);
tempOutStream = new FileOutputStream(tempUploadedFile);
byte[] dataBuffer = new byte[8192];
int i = 0;
while ((i = ziStream.read(dataBuffer)) > 0) {
tempOutStream.write(dataBuffer, 0, i);
tempOutStream.flush();
}
tempOutStream.close();
// We now have the unzipped file saved in the upload directory;
// zero-length dta files (for example) are skipped during zip
// upload in the GUI, so we'll skip them here as well
if (tempUploadedFile.length() != 0) {
if (true) {
// tempUploadedFile;
// UploadedFile uFile = tempUploadedFile;
// DataFile dataFile = new DataFile();
// throw new SwordError("let's create a file");
}
// StudyFileEditBean tempFileBean = new StudyFileEditBean(tempUploadedFile, studyService.generateFileSystemNameSequence(), study);
// tempFileBean.setSizeFormatted(tempUploadedFile.length());
String finalFileNameAfterReplace = finalFileName;
// if (tempFileBean.getStudyFile() instanceof TabularDataFile) {
// predict what the tabular file name will be
// finalFileNameAfterReplace = FileUtil.replaceExtension(finalFileName);
// }
// validateFileName(exisitingFilenames, finalFileNameAfterReplace, study);
// And, if this file was in a legit (non-null) directory,
// we'll use its name as the file category:
if (dirName != null) {
// tempFileBean.getFileMetadata().setCategory(dirName);
}
// fbList.add(tempFileBean);
}
} else {
logger.fine("directory found: " + zEntry.getName());
}
}
} catch (IOException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Problem with file: " + uploadedZipFilename);
} finally {
/**
* @todo shouldn't we delete this uploadDir? Commented out in
* DVN 3.x
*/
// if (!uploadDir.delete()) {
// logger.fine("Unable to delete " + uploadDir.getAbsolutePath());
// }
}
// if (fbList.size() > 0) {
// StudyFileServiceLocal studyFileService;
// try {
// studyFileService = (StudyFileServiceLocal) ctx.lookup("java:comp/env/studyFileService");
// } catch (NamingException ex) {
// logger.info("problem looking up studyFileService");
// throw new SwordServerException("problem looking up studyFileService");
// }
try {
// studyFileService.addFiles(study.getLatestVersion(), fbList, vdcUser);
} catch (EJBException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unable to add file(s) to study: " + ex.getMessage());
}
ReceiptGenerator receiptGenerator = new ReceiptGenerator();
String baseUrl = urlManager.getHostnamePlusBaseUrlPath(uri);
DepositReceipt depositReceipt = receiptGenerator.createReceipt(baseUrl, study);
return depositReceipt;
} else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Problem with zip file '" + uploadedZipFilename + "'. Number of files unzipped: " + fbList.size());
}
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "user " + vdcUser.getUserName() + " is not authorized to modify study with global ID " + study.getGlobalId());
return new DepositReceipt(); // added just to get this to compile 2014-05-14
}
#location 39
#vulnerability type NULL_DEREFERENCE
|
#fixed code
DepositReceipt replaceOrAddFiles(String uri, Deposit deposit, AuthCredentials authCredentials, SwordConfiguration swordConfiguration, boolean shouldReplace) throws SwordError, SwordAuthException, SwordServerException {
DataverseUser vdcUser = swordAuth.auth(authCredentials);
urlManager.processUrl(uri);
String globalId = urlManager.getTargetIdentifier();
if (urlManager.getTargetType().equals("study") && globalId != null) {
// EditStudyService editStudyService;
Context ctx;
try {
ctx = new InitialContext();
// editStudyService = (EditStudyService) ctx.lookup("java:comp/env/editStudy");
} catch (NamingException ex) {
logger.info("problem looking up editStudyService");
throw new SwordServerException("problem looking up editStudyService");
}
logger.fine("looking up study with globalId " + globalId);
Dataset study = datasetService.findByGlobalId(globalId);
if (study == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not find study with global ID of " + globalId);
}
// StudyLock studyLock = study.getStudyLock();
// if (studyLock != null) {
// String message = Util.getStudyLockMessage(studyLock, study.getGlobalId());
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, message);
// }
Long studyId;
try {
studyId = study.getId();
} catch (NullPointerException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "couldn't find study with global ID of " + globalId);
}
Dataverse dvThatOwnsStudy = study.getOwner();
if (swordAuth.hasAccessToModifyDataverse(vdcUser, dvThatOwnsStudy)) {
// editStudyService.setStudyVersion(studyId);
// editStudyService.save(dvThatOwnsStudy.getId(), vdcUser.getId());
//
// EditStudyFilesService editStudyFilesService;
// try {
// editStudyFilesService = (EditStudyFilesService) ctx.lookup("java:comp/env/editStudyFiles");
// } catch (NamingException ex) {
// logger.info("problem looking up editStudyFilesService");
// throw new SwordServerException("problem looking up editStudyFilesService");
// }
// editStudyFilesService.setStudyVersionByGlobalId(globalId);
// List studyFileEditBeans = editStudyFilesService.getCurrentFiles();
List<String> exisitingFilenames = new ArrayList<String>();
// for (Iterator it = studyFileEditBeans.iterator(); it.hasNext();) {
// StudyFileEditBean studyFileEditBean = (StudyFileEditBean) it.next();
if (shouldReplace) {
// studyFileEditBean.setDeleteFlag(true);
// logger.fine("marked for deletion: " + studyFileEditBean.getStudyFile().getFileName());
} else {
// String filename = studyFileEditBean.getStudyFile().getFileName();
// exisitingFilenames.add(filename);
}
}
// editStudyFilesService.save(dvThatOwnsStudy.getId(), vdcUser.getId());
if (!deposit.getPackaging().equals(UriRegistry.PACKAGE_SIMPLE_ZIP)) {
throw new SwordError(UriRegistry.ERROR_CONTENT, 415, "Package format " + UriRegistry.PACKAGE_SIMPLE_ZIP + " is required but format specified in 'Packaging' HTTP header was " + deposit.getPackaging());
}
// Right now we are only supporting UriRegistry.PACKAGE_SIMPLE_ZIP but
// in the future maybe we'll support other formats? Rdata files? Stata files?
// That's what the uploadDir was going to be for, but for now it's commented out
//
String importDirString;
File importDir;
String swordTempDirString = swordConfiguration.getTempDirectory();
if (swordTempDirString == null) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Could not determine temp directory");
} else {
importDirString = swordTempDirString + File.separator + "import" + File.separator + study.getId().toString();
importDir = new File(importDirString);
if (!importDir.exists()) {
if (!importDir.mkdirs()) {
logger.info("couldn't create directory: " + importDir.getAbsolutePath());
throw new SwordServerException("couldn't create import directory");
}
}
}
if (true) {
DataFile dFile = new DataFile("application/octet-stream");
dFile.setOwner(study);
datasetService.generateFileSystemName(dFile);
// if (true) {
// throw returnEarly("dataFile.getFileSystemName(): " + dFile.getFileSystemName());
// }
InputStream depositInputStream = deposit.getInputStream();
try {
Files.copy(depositInputStream, Paths.get(importDirString, dFile.getFileSystemName()), StandardCopyOption.REPLACE_EXISTING);
} catch (IOException ex) {
throw new SwordError("problem running Files.copy");
}
study.getFiles().add(dFile);
DatasetVersion editVersion = study.getEditVersion();
// boolean metadataExtracted = false;
// try {
// metadataExtracted = ingestService.extractIndexableMetadata(importDir.getAbsolutePath() + File.separator + dFile.getFileSystemName(), dFile, editVersion);
// } catch (IOException ex) {
// throw returnEarly("couldn't extract metadata" + ex);
// }
FileMetadata fmd = new FileMetadata();
fmd.setDataFile(dFile);
fmd.setLabel("myLabel");
fmd.setDatasetVersion(editVersion);
dFile.getFileMetadatas().add(fmd);
Command<Dataset> cmd;
cmd = new UpdateDatasetCommand(study, vdcUser);
try {
/**
* @todo at update time indexing is run but the file is not
* indexed. Why? Manually re-indexing later finds it. Fix
* this. Related to
* https://redmine.hmdc.harvard.edu/issues/3809 ?
*/
study = commandEngine.submit(cmd);
} catch (CommandException ex) {
throw returnEarly("couldn't update dataset");
} catch (EJBException ex) {
Throwable cause = ex;
StringBuilder sb = new StringBuilder();
sb.append(ex.getLocalizedMessage());
while (cause.getCause() != null) {
cause = cause.getCause();
sb.append(cause + " ");
if (cause instanceof ConstraintViolationException) {
ConstraintViolationException constraintViolationException = (ConstraintViolationException) cause;
for (ConstraintViolation<?> violation : constraintViolationException.getConstraintViolations()) {
sb.append(" Invalid value: <<<").append(violation.getInvalidValue()).append(">>> for ")
.append(violation.getPropertyPath()).append(" at ")
.append(violation.getLeafBean()).append(" - ")
.append(violation.getMessage());
}
}
}
throw returnEarly("EJBException: " + sb.toString());
}
}
/**
* @todo remove this comment after confirming that the upstream jar
* now has our bugfix
*/
// the first character of the filename is truncated with the official jar
// so we use include the bug fix at https://github.com/IQSS/swordv2-java-server-library/commit/aeaef83
// and use this jar: https://build.hmdc.harvard.edu:8443/job/swordv2-java-server-library-iqss/2/
String uploadedZipFilename = deposit.getFilename();
ZipInputStream ziStream = new ZipInputStream(deposit.getInputStream());
ZipEntry zEntry;
FileOutputStream tempOutStream = null;
// List<StudyFileEditBean> fbList = new ArrayList<StudyFileEditBean>();
try {
// copied from createStudyFilesFromZip in AddFilesPage
while ((zEntry = ziStream.getNextEntry()) != null) {
// Note that some zip entries may be directories - we
// simply skip them:
if (!zEntry.isDirectory()) {
String fileEntryName = zEntry.getName();
logger.fine("file found: " + fileEntryName);
String dirName = null;
String finalFileName = null;
int ind = fileEntryName.lastIndexOf('/');
if (ind > -1) {
finalFileName = fileEntryName.substring(ind + 1);
if (ind > 0) {
dirName = fileEntryName.substring(0, ind);
dirName = dirName.replace('/', '-');
}
} else {
finalFileName = fileEntryName;
}
if (".DS_Store".equals(finalFileName)) {
continue;
}
// http://superuser.com/questions/212896/is-there-any-way-to-prevent-a-mac-from-creating-dot-underscore-files
if (finalFileName.startsWith("._")) {
continue;
}
File tempUploadedFile = new File(importDir, finalFileName);
tempOutStream = new FileOutputStream(tempUploadedFile);
byte[] dataBuffer = new byte[8192];
int i = 0;
while ((i = ziStream.read(dataBuffer)) > 0) {
tempOutStream.write(dataBuffer, 0, i);
tempOutStream.flush();
}
tempOutStream.close();
// We now have the unzipped file saved in the upload directory;
// zero-length dta files (for example) are skipped during zip
// upload in the GUI, so we'll skip them here as well
if (tempUploadedFile.length() != 0) {
if (true) {
// tempUploadedFile;
// UploadedFile uFile = tempUploadedFile;
// DataFile dataFile = new DataFile();
// throw new SwordError("let's create a file");
}
// StudyFileEditBean tempFileBean = new StudyFileEditBean(tempUploadedFile, studyService.generateFileSystemNameSequence(), study);
// tempFileBean.setSizeFormatted(tempUploadedFile.length());
String finalFileNameAfterReplace = finalFileName;
// if (tempFileBean.getStudyFile() instanceof TabularDataFile) {
// predict what the tabular file name will be
// finalFileNameAfterReplace = FileUtil.replaceExtension(finalFileName);
// }
// validateFileName(exisitingFilenames, finalFileNameAfterReplace, study);
// And, if this file was in a legit (non-null) directory,
// we'll use its name as the file category:
if (dirName != null) {
// tempFileBean.getFileMetadata().setCategory(dirName);
}
// fbList.add(tempFileBean);
}
} else {
logger.fine("directory found: " + zEntry.getName());
}
}
} catch (IOException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Problem with file: " + uploadedZipFilename);
} finally {
/**
* @todo shouldn't we delete this uploadDir? Commented out in
* DVN 3.x
*/
// if (!uploadDir.delete()) {
// logger.fine("Unable to delete " + uploadDir.getAbsolutePath());
// }
}
// if (fbList.size() > 0) {
// StudyFileServiceLocal studyFileService;
// try {
// studyFileService = (StudyFileServiceLocal) ctx.lookup("java:comp/env/studyFileService");
// } catch (NamingException ex) {
// logger.info("problem looking up studyFileService");
// throw new SwordServerException("problem looking up studyFileService");
// }
try {
// studyFileService.addFiles(study.getLatestVersion(), fbList, vdcUser);
} catch (EJBException ex) {
throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Unable to add file(s) to study: " + ex.getMessage());
}
ReceiptGenerator receiptGenerator = new ReceiptGenerator();
String baseUrl = urlManager.getHostnamePlusBaseUrlPath(uri);
DepositReceipt depositReceipt = receiptGenerator.createReceipt(baseUrl, study);
return depositReceipt;
} else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "Problem with zip file '" + uploadedZipFilename + "'. Number of files unzipped: " + fbList.size());
}
// } else {
// throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "user " + vdcUser.getUserName() + " is not authorized to modify study with global ID " + study.getGlobalId());
return new DepositReceipt(); // added just to get this to compile 2014-05-14
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public synchronized String CalculateMD5 (String datafile) {
MessageDigest md = null;
try {
md = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
FileInputStream fis = null;
try {
fis = new FileInputStream(datafile);
} catch (FileNotFoundException ex) {
throw new RuntimeException(ex);
}
byte[] dataBytes = new byte[1024];
int nread;
try {
while ((nread = fis.read(dataBytes)) != -1) {
md.update(dataBytes, 0, nread);
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
byte[] mdbytes = md.digest();
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < mdbytes.length; i++) {
sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
}
#location 26
#vulnerability type RESOURCE_LEAK
|
#fixed code
public synchronized String CalculateMD5 (String datafile) {
FileInputStream fis = null;
try {
fis = new FileInputStream(datafile);
} catch (FileNotFoundException ex) {
throw new RuntimeException(ex);
}
return CalculateMD5(fis);
/*
byte[] dataBytes = new byte[1024];
int nread;
try {
while ((nread = fis.read(dataBytes)) != -1) {
md.update(dataBytes, 0, nread);
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
byte[] mdbytes = md.digest();
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < mdbytes.length; i++) {
sb.append(Integer.toString((mdbytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
*/
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public DataModel getDatasetFieldsDataModel() {
List values = new ArrayList();
int i = 0;
for (DatasetFieldValue dsfv : dataset.getEditVersion().getDatasetFieldValues()){
DatasetField datasetField = dsfv.getDatasetField();
Object[] row = new Object[4];
row[0] = datasetField;
row[1] = getValuesDataModel(datasetField);
row[2] = new Integer(i);
row[3] = datasetField;
values.add(row);
i++;
}
return new ListDataModel(values);
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public DataModel getDatasetFieldsDataModel() {
List values = new ArrayList();
int i = 0;
for (DatasetFieldValue dsfv : editVersion.getDatasetFieldValues()){
DatasetField datasetField = dsfv.getDatasetField();
Object[] row = new Object[4];
row[0] = datasetField;
row[1] = getValuesDataModel(dsfv);
row[2] = new Integer(i);
row[3] = datasetField;
values.add(row);
i++;
}
return new ListDataModel(values);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testFlowWithLoops() throws Exception {
URI resource = getClass().getResource("/yaml/loops/simple_loop.sl").toURI();
URI operation1 = getClass().getResource("/yaml/loops/print.sl").toURI();
Set<SlangSource> path = Sets.newHashSet(SlangSource.fromFile(operation1));
CompilationArtifact compilationArtifact = slang.compile(SlangSource.fromFile(resource), path);
Map<String, Serializable> userInputs = new HashMap<>();
Map<String, StepData> stepsData = triggerWithData(compilationArtifact, userInputs, EMPTY_SET).getTasks();
StepData firstTask = stepsData.get(FIRST_STEP_PATH);
StepData secondTask = stepsData.get(SECOND_STEP_KEY);
StepData thirdTask = stepsData.get(THIRD_STEP_KEY);
Assert.assertTrue(firstTask.getInputs().containsValue(1));
Assert.assertTrue(secondTask.getInputs().containsValue(2));
Assert.assertTrue(thirdTask.getInputs().containsValue(3));
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testFlowWithLoops() throws Exception {
URI resource = getClass().getResource("/yaml/loops/simple_loop.sl").toURI();
URI operation1 = getClass().getResource("/yaml/loops/print.sl").toURI();
Set<SlangSource> path = Sets.newHashSet(SlangSource.fromFile(operation1));
CompilationArtifact compilationArtifact = slang.compile(SlangSource.fromFile(resource), path);
Map<String, Serializable> userInputs = new HashMap<>();
Set<SystemProperty> systemProperties = new HashSet<>();
systemProperties.add(
SystemProperty.createSystemProperty("loop", "for.prop1", "for_value")
);
Map<String, StepData> stepsData = triggerWithData(compilationArtifact, userInputs, systemProperties).getTasks();
StepData firstTask = stepsData.get(FIRST_STEP_PATH);
StepData secondTask = stepsData.get(SECOND_STEP_KEY);
StepData thirdTask = stepsData.get(THIRD_STEP_KEY);
Map<String, Serializable> expectedInputs = new HashMap<>();
expectedInputs.put("text", 1);
expectedInputs.put("sp_arg", "for_value");
Assert.assertEquals(expectedInputs, firstTask.getInputs());
expectedInputs.put("text", 2);
Assert.assertEquals(expectedInputs, secondTask.getInputs());
expectedInputs.put("text", 3);
Assert.assertEquals(expectedInputs, thirdTask.getInputs());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public ReturnValues getExecutionReturnValues(){
if(StringUtils.isEmpty(result)){
throw new RuntimeException("Result of executing the test " + testCaseName + " cannot be empty");
}
if (outputs == null){
outputs = new HashMap<>();
}
return new ReturnValues(outputs, result);
}
#location 2
#vulnerability type THREAD_SAFETY_VIOLATION
|
#fixed code
public ReturnValues getExecutionReturnValues(){
// if(StringUtils.isEmpty(result)){
// throw new RuntimeException("Result of executing the test " + testCaseName + " cannot be empty");
// }
return new ReturnValues(outputs, result);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testValidStatementAndTrim() throws Exception {
ForLoopStatement statement = transformer.transform(" min in collection ");
Assert.assertEquals(ForLoopStatement.Type.LIST, statement.getType());
Assert.assertEquals("min", statement.getVarName());
Assert.assertEquals("collection", statement.getCollectionExpression());
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testValidStatementAndTrim() throws Exception {
ForLoopStatement statement = transformer.transform(" min in collection ");
ListForLoopStatement listForLoopStatement = validateListForLoopStatement(statement);
Assert.assertEquals("min", listForLoopStatement.getVarName());
Assert.assertEquals("collection", listForLoopStatement.getCollectionExpression());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testValidMapStatement() throws Exception {
ForLoopStatement statement = transformer.transform("k, v in collection");
Assert.assertEquals(ForLoopStatement.Type.MAP, statement.getType());
Assert.assertEquals("k v", statement.getVarName());
Assert.assertEquals("collection", statement.getCollectionExpression());
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testValidMapStatement() throws Exception {
ForLoopStatement statement = transformer.transform("k, v in collection");
MapForLoopStatement mapForLoopStatement = validateMapForLoopStatement(statement);
Assert.assertEquals("k", mapForLoopStatement.getKeyName());
Assert.assertEquals("v", mapForLoopStatement.getValueName());
Assert.assertEquals("collection", statement.getCollectionExpression());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testValidStatement() throws Exception {
ForLoopStatement statement = transformer.transform("x in collection");
Assert.assertEquals(ForLoopStatement.Type.LIST, statement.getType());
Assert.assertEquals("x", statement.getVarName());
Assert.assertEquals("collection", statement.getCollectionExpression());
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testValidStatement() throws Exception {
ForLoopStatement statement = transformer.transform("x in collection");
ListForLoopStatement listForLoopStatement = validateListForLoopStatement(statement);
Assert.assertEquals("x", listForLoopStatement.getVarName());
Assert.assertEquals("collection", listForLoopStatement.getCollectionExpression());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public static void main(String[] args) {
String repositoryPath = System.getProperty("path");
String testsPath = System.getProperty("testPath");
String testSuitsArg = System.getProperty("testSuits");
Validate.notNull(repositoryPath, "You must pass a path to your repository");
repositoryPath = FilenameUtils.separatorsToSystem(repositoryPath);
Validate.isTrue(new File(repositoryPath).isDirectory(),
"Directory path argument \'" + repositoryPath + "\' does not lead to a directory");
String[] testSuits = null;
if(testSuitsArg != null){
testSuits = testSuitsArg.split(",");
}
ApplicationContext context = new AnnotationConfigApplicationContext(SlangBuildSpringConfiguration.class);
SlangBuild slangBuild = context.getBean(SlangBuild.class);
try {
int numberOfValidSlangFiles = slangBuild.buildSlangContent(repositoryPath, testsPath, testSuits);
System.out.println("SUCCESS: Found " + numberOfValidSlangFiles + " slang files under directory: \"" + repositoryPath + "\" and all are valid.");
System.exit(0);
} catch (Exception e) {
System.out.println(e.getMessage() + "\n\nFAILURE: Validation of slang files under directory: \"" + repositoryPath + "\" failed.");
// TODO - do we want to throw exception or exit with 1?
System.exit(1);
}
}
#location 15
#vulnerability type RESOURCE_LEAK
|
#fixed code
public static void main(String[] args) {
String repositoryPath = System.getProperty("path");
String testsPath = System.getProperty("testPath");
String testSuitsArg = System.getProperty("testSuits");
Validate.notNull(repositoryPath, "You must pass a path to your repository");
repositoryPath = FilenameUtils.separatorsToSystem(repositoryPath);
Validate.isTrue(new File(repositoryPath).isDirectory(),
"Directory path argument \'" + repositoryPath + "\' does not lead to a directory");
String[] testSuits = null;
if(testSuitsArg != null){
testSuits = testSuitsArg.split(",");
}
ApplicationContext context = new ClassPathXmlApplicationContext("/META-INF/spring/testRunnerContext.xml");
SlangBuild slangBuild = context.getBean(SlangBuild.class);
try {
int numberOfValidSlangFiles = slangBuild.buildSlangContent(repositoryPath, testsPath, testSuits);
System.out.println("SUCCESS: Found " + numberOfValidSlangFiles + " slang files under directory: \"" + repositoryPath + "\" and all are valid.");
System.exit(0);
} catch (Exception e) {
System.out.println(e.getMessage() + "\n\nFAILURE: Validation of slang files under directory: \"" + repositoryPath + "\" failed.");
// TODO - do we want to throw exception or exit with 1?
System.exit(1);
}
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
private List<RuntimeException> validateModelWithDependencies(
Executable executable,
Map<String, Executable> dependencies,
Set<Executable> verifiedExecutables,
List<RuntimeException> errors) {
//validate that all required & non private parameters with no default value of a reference are provided
if(!SlangTextualKeys.FLOW_TYPE.equals(executable.getType()) || verifiedExecutables.contains(executable)){
return errors;
}
verifiedExecutables.add(executable);
Flow flow = (Flow) executable;
Collection<Step> steps = flow.getWorkflow().getSteps();
Set<Executable> flowReferences = new HashSet<>();
for (Step step : steps) {
String refId = step.getRefId();
Executable reference = dependencies.get(refId);
List<String> mandatoryInputNames = getMandatoryInputNames(reference);
List<String> stepInputNames = getStepInputNames(step);
List<String> inputsNotWired = getInputsNotWired(mandatoryInputNames, stepInputNames);
try {
validateInputNamesEmpty(inputsNotWired, flow, step, reference);
validateStepInputNamesDifferentFromDependencyOutputNames(flow, step, reference);
validateDependenciesResultsHaveMatchingNavigations(executable, refId, step, reference);
} catch (RuntimeException e) {
errors.add(e);
}
flowReferences.add(reference);
}
for (Executable reference : flowReferences) {
validateModelWithDependencies(reference, dependencies, verifiedExecutables, errors);
}
return errors;
}
#location 19
#vulnerability type NULL_DEREFERENCE
|
#fixed code
private List<RuntimeException> validateModelWithDependencies(
Executable executable,
Map<String, Executable> dependencies,
Set<Executable> verifiedExecutables,
List<RuntimeException> errors) {
//validate that all required & non private parameters with no default value of a reference are provided
if(!SlangTextualKeys.FLOW_TYPE.equals(executable.getType()) || verifiedExecutables.contains(executable)){
return errors;
}
verifiedExecutables.add(executable);
Flow flow = (Flow) executable;
Collection<Step> steps = flow.getWorkflow().getSteps();
Set<Executable> flowReferences = new HashSet<>();
for (Step step : steps) {
String refId = step.getRefId();
Executable reference = dependencies.get(refId);
try {
validateMandatoryInputsAreWired(flow, step, reference);
validateStepInputNamesDifferentFromDependencyOutputNames(flow, step, reference);
validateDependenciesResultsHaveMatchingNavigations(executable, refId, step, reference);
} catch (RuntimeException e) {
errors.add(e);
}
flowReferences.add(reference);
}
for (Executable reference : flowReferences) {
validateModelWithDependencies(reference, dependencies, verifiedExecutables, errors);
}
return errors;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
public List<Value> bindAsyncLoopList(
AsyncLoopStatement asyncLoopStatement,
Context flowContext,
Set<SystemProperty> systemProperties,
String nodeName) {
Validate.notNull(asyncLoopStatement, "async loop statement cannot be null");
Validate.notNull(flowContext, "flow context cannot be null");
Validate.notNull(systemProperties, "system properties cannot be null");
Validate.notNull(nodeName, "node name cannot be null");
List<Value> result = new ArrayList<>();
try {
Value evalResult = scriptEvaluator.evalExpr(asyncLoopStatement.getExpression(), flowContext.getImmutableViewOfVariables(), systemProperties);
if (evalResult.get() != null) {
//noinspection unchecked
for (Serializable serializable : ((List<Serializable>)evalResult.get())) {
Value value = serializable instanceof Value ? (Value)serializable : ValueFactory.create(serializable, evalResult.isSensitive());
result.add(value);
}
}
} catch (Throwable t) {
throw new RuntimeException(generateAsyncLoopExpressionMessage(nodeName, t.getMessage()), t);
}
if (CollectionUtils.isEmpty(result)) {
throw new RuntimeException(generateAsyncLoopExpressionMessage(nodeName, "expression is empty"));
}
return result;
}
#location 14
#vulnerability type NULL_DEREFERENCE
|
#fixed code
public List<Value> bindAsyncLoopList(
AsyncLoopStatement asyncLoopStatement,
Context flowContext,
Set<SystemProperty> systemProperties,
String nodeName) {
Validate.notNull(asyncLoopStatement, "async loop statement cannot be null");
Validate.notNull(flowContext, "flow context cannot be null");
Validate.notNull(systemProperties, "system properties cannot be null");
Validate.notNull(nodeName, "node name cannot be null");
List<Value> result = new ArrayList<>();
try {
Value evalResult = scriptEvaluator.evalExpr(asyncLoopStatement.getExpression(), flowContext.getImmutableViewOfVariables(), systemProperties);
if (evalResult != null && evalResult.get() != null) {
//noinspection unchecked
for (Serializable serializable : ((List<Serializable>)evalResult.get())) {
Value value = ValueFactory.create(serializable, evalResult.isSensitive());
result.add(value);
}
}
} catch (Throwable t) {
throw new RuntimeException(generateAsyncLoopExpressionMessage(nodeName, t.getMessage()), t);
}
if (CollectionUtils.isEmpty(result)) {
throw new RuntimeException(generateAsyncLoopExpressionMessage(nodeName, "expression is empty"));
}
return result;
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testValues() throws Exception {
// compile
URI resource = getClass().getResource("/yaml/formats/values_flow.sl").toURI();
URI operation1 = getClass().getResource("/yaml/formats/values_op.sl").toURI();
URI operation2 = getClass().getResource("/yaml/noop.sl").toURI();
SlangSource dep1 = SlangSource.fromFile(operation1);
SlangSource dep2 = SlangSource.fromFile(operation2);
Set<SlangSource> path = Sets.newHashSet(dep1, dep2);
CompilationArtifact compilationArtifact = slang.compile(SlangSource.fromFile(resource), path);
// trigger
Map<String, StepData> steps = prepareAndRun(compilationArtifact);
// verify
StepData flowData = steps.get(EXEC_START_PATH);
StepData stepData = steps.get(FIRST_STEP_PATH);
verifyExecutableInputs(flowData);
verifyExecutableOutputs(flowData);
verifyStepInputs(stepData);
verifyStepPublishValues(stepData);
verifySuccessResult(flowData);
}
#location 23
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testValues() throws Exception {
// compile
URI resource = getClass().getResource("/yaml/formats/values_flow.sl").toURI();
URI operation1 = getClass().getResource("/yaml/formats/values_op.sl").toURI();
URI operation2 = getClass().getResource("/yaml/noop.sl").toURI();
SlangSource dep1 = SlangSource.fromFile(operation1);
SlangSource dep2 = SlangSource.fromFile(operation2);
Set<SlangSource> path = Sets.newHashSet(dep1, dep2);
CompilationArtifact compilationArtifact = slang.compile(SlangSource.fromFile(resource), path);
// trigger
Map<String, StepData> steps = prepareAndRun(compilationArtifact);
// verify
StepData flowData = steps.get(EXEC_START_PATH);
StepData stepData = steps.get(FIRST_STEP_PATH);
verifyExecutableInputs(flowData);
verifyExecutableOutputs(flowData);
verifyStepInputs(stepData);
verifySuccessResult(flowData);
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
#vulnerable code
@Test
public void testMultipleInsAreTrimmed() throws Exception {
ForLoopStatement statement = transformer.transform(" in in in ");
Assert.assertEquals("in", statement.getVarName());
Assert.assertEquals("in", statement.getCollectionExpression());
}
#location 4
#vulnerability type NULL_DEREFERENCE
|
#fixed code
@Test
public void testMultipleInsAreTrimmed() throws Exception {
ForLoopStatement statement = transformer.transform(" in in in ");
ListForLoopStatement listForLoopStatement = validateListForLoopStatement(statement);
Assert.assertEquals("in", listForLoopStatement.getCollectionExpression());
}
|
Below is the vulnerable code, please generate the patch based on the following information.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.