method
stringlengths 13
441k
| clean_method
stringlengths 7
313k
| doc
stringlengths 17
17.3k
| comment
stringlengths 3
1.42k
| method_name
stringlengths 1
273
| extra
dict | imports
list | imports_info
stringlengths 19
34.8k
| cluster_imports_info
stringlengths 15
3.66k
| libraries
list | libraries_info
stringlengths 6
661
| id
int64 0
2.92M
|
---|---|---|---|---|---|---|---|---|---|---|---|
public boolean swap_connection_to_pin(boolean p_at_start)
{
Polyline trace_polyline;
Collection<Item> contact_list;
if (p_at_start)
{
trace_polyline = this.polyline();
contact_list = this.get_start_contacts();
}
else
{
trace_polyline = this.polyline().reverse();
contact_list = this.get_end_contacts();
}
if (contact_list.size() != 1)
{
return false;
}
Item curr_contact = contact_list.iterator().next();
if (!(curr_contact.get_fixed_state() == FixedState.SHOVE_FIXED && (curr_contact instanceof PolylineTrace)))
{
return false;
}
PolylineTrace contact_trace = (PolylineTrace) curr_contact;
Polyline contact_polyline = contact_trace.polyline();
Line contact_last_line = contact_polyline.arr[contact_polyline.arr.length - 2];
// look, if this trace has a sharp angle with the contact trace.
Line first_line = trace_polyline.arr[1];
// check for sharp angle
boolean check_swap = contact_last_line.direction().projection(first_line.direction()) == Signum.NEGATIVE;
if (!check_swap)
{
double half_width = this.get_half_width();
if (trace_polyline.arr.length > 3 &&
trace_polyline.corner_approx(0).distance_square(trace_polyline.corner_approx(1)) <= half_width * half_width)
{
// check also for sharp angle with the second line
check_swap =
(contact_last_line.direction().projection(trace_polyline.arr[2].direction()) == Signum.NEGATIVE);
}
}
if (!check_swap)
{
return false;
}
Pin contact_pin = null;
Collection<Item> curr_contacts = contact_trace.get_start_contacts();
for (Item tmp_contact : curr_contacts)
{
if (tmp_contact instanceof Pin)
{
contact_pin = (Pin) tmp_contact;
break;
}
}
if (contact_pin == null)
{
return false;
}
Polyline combined_polyline = contact_polyline.combine(trace_polyline);
Direction nearest_pin_exit_direction =
contact_pin.calc_nearest_exit_restriction_direction(combined_polyline, this.get_half_width(), this.get_layer());
if (nearest_pin_exit_direction == null || nearest_pin_exit_direction.equals(contact_polyline.arr[1].direction()))
{
return false; // direction would not be changed
}
contact_trace.set_fixed_state(this.get_fixed_state());
this.combine();
return true;
}
// primary data
private Polyline lines; | boolean function(boolean p_at_start) { Polyline trace_polyline; Collection<Item> contact_list; if (p_at_start) { trace_polyline = this.polyline(); contact_list = this.get_start_contacts(); } else { trace_polyline = this.polyline().reverse(); contact_list = this.get_end_contacts(); } if (contact_list.size() != 1) { return false; } Item curr_contact = contact_list.iterator().next(); if (!(curr_contact.get_fixed_state() == FixedState.SHOVE_FIXED && (curr_contact instanceof PolylineTrace))) { return false; } PolylineTrace contact_trace = (PolylineTrace) curr_contact; Polyline contact_polyline = contact_trace.polyline(); Line contact_last_line = contact_polyline.arr[contact_polyline.arr.length - 2]; Line first_line = trace_polyline.arr[1]; boolean check_swap = contact_last_line.direction().projection(first_line.direction()) == Signum.NEGATIVE; if (!check_swap) { double half_width = this.get_half_width(); if (trace_polyline.arr.length > 3 && trace_polyline.corner_approx(0).distance_square(trace_polyline.corner_approx(1)) <= half_width * half_width) { check_swap = (contact_last_line.direction().projection(trace_polyline.arr[2].direction()) == Signum.NEGATIVE); } } if (!check_swap) { return false; } Pin contact_pin = null; Collection<Item> curr_contacts = contact_trace.get_start_contacts(); for (Item tmp_contact : curr_contacts) { if (tmp_contact instanceof Pin) { contact_pin = (Pin) tmp_contact; break; } } if (contact_pin == null) { return false; } Polyline combined_polyline = contact_polyline.combine(trace_polyline); Direction nearest_pin_exit_direction = contact_pin.calc_nearest_exit_restriction_direction(combined_polyline, this.get_half_width(), this.get_layer()); if (nearest_pin_exit_direction == null nearest_pin_exit_direction.equals(contact_polyline.arr[1].direction())) { return false; } contact_trace.set_fixed_state(this.get_fixed_state()); this.combine(); return true; } private Polyline lines; | /**
* Looks, if an other pin connection restriction fits better than the current connection restriction
* and changes this trace in this case.
* If p_at_start, the start of the trace polygon is changed, else the end.
* Returns true, if this trace was changed.
*
* @param p_at_start a boolean.
* @return a boolean.
*/ | Looks, if an other pin connection restriction fits better than the current connection restriction and changes this trace in this case. If p_at_start, the start of the trace polygon is changed, else the end. Returns true, if this trace was changed | swap_connection_to_pin | {
"repo_name": "nick-less/freerouting",
"path": "src/main/java/board/PolylineTrace.java",
"license": "gpl-3.0",
"size": 54452
} | [
"java.util.Collection"
] | import java.util.Collection; | import java.util.*; | [
"java.util"
] | java.util; | 349,155 |
private CheckSum copyFileWithCheckSum(FileSystem fs,
HdfsFile source,
File dest,
HdfsCopyStats stats,
CheckSumType checkSumType,
byte[] buffer) throws Throwable {
CheckSum fileCheckSumGenerator = null;
logger.debug("Starting copy of " + source + " to " + dest);
// Check if its Gzip compressed
boolean isCompressed = source.isCompressed();
FilterInputStream input = null;
OutputStream output = null;
long startTimeMS = System.currentTimeMillis();
for(int attempt = 0; attempt < maxAttempts; attempt++) {
boolean success = true;
long totalBytesRead = 0;
boolean fsOpened = false;
try {
// Create a per file checksum generator
if(checkSumType != null) {
fileCheckSumGenerator = CheckSum.getInstance(checkSumType);
}
logger.info("Attempt " + attempt + " at copy of " + source + " to " + dest);
if(!isCompressed) {
input = fs.open(source.getPath());
} else {
// We are already bounded by the "hdfs.fetcher.buffer.size"
// specified in the Voldemort config, the default value of
// which is 64K. Using the same as the buffer size for
// GZIPInputStream as well.
input = new GZIPInputStream(fs.open(source.getPath()), this.bufferSize);
}
fsOpened = true;
output = new BufferedOutputStream(new FileOutputStream(dest));
while(true) {
int read = input.read(buffer);
if(read < 0) {
break;
} else {
output.write(buffer, 0, read);
}
// Update the per file checksum
if(fileCheckSumGenerator != null) {
fileCheckSumGenerator.update(buffer, 0, read);
}
// Check if we need to throttle the fetch
if(throttler != null) {
throttler.maybeThrottle(read);
}
stats.recordBytes(read);
totalBytesRead += read;
if(stats.getBytesSinceLastReport() > reportingIntervalBytes) {
NumberFormat format = NumberFormat.getNumberInstance();
format.setMaximumFractionDigits(2);
logger.info(stats.getTotalBytesCopied() / (1024 * 1024) + " MB copied at "
+ format.format(stats.getBytesPerSecond() / (1024 * 1024))
+ " MB/sec - " + format.format(stats.getPercentCopied())
+ " % complete, destination:" + dest);
if(this.status != null) {
this.status.setStatus(stats.getTotalBytesCopied()
/ (1024 * 1024)
+ " MB copied at "
+ format.format(stats.getBytesPerSecond()
/ (1024 * 1024)) + " MB/sec - "
+ format.format(stats.getPercentCopied())
+ " % complete, destination:" + dest);
}
stats.reset();
}
}
stats.reportFileDownloaded(dest,
startTimeMS,
source.getSize(),
System.currentTimeMillis() - startTimeMS,
attempt,
totalBytesRead);
logger.info("Completed copy of " + source + " to " + dest);
} catch(Throwable te) {
success = false;
if(!fsOpened) {
logger.error("Error while opening the file stream to " + source, te);
} else {
logger.error("Error while copying file " + source + " after " + totalBytesRead
+ " bytes.", te);
}
if(te.getCause() != null) {
logger.error("Cause of error ", te.getCause());
}
if(attempt < maxAttempts - 1) {
logger.info("Will retry copying after " + retryDelayMs + " ms");
sleepForRetryDelayMs();
} else {
stats.reportFileError(dest, maxAttempts, startTimeMS, te);
logger.info("Fetcher giving up copy after " + maxAttempts + " attempts");
throw te;
}
} finally {
IOUtils.closeQuietly(output);
IOUtils.closeQuietly(input);
if(success) {
break;
}
}
logger.debug("Completed copy of " + source + " to " + dest);
}
return fileCheckSumGenerator;
} | CheckSum function(FileSystem fs, HdfsFile source, File dest, HdfsCopyStats stats, CheckSumType checkSumType, byte[] buffer) throws Throwable { CheckSum fileCheckSumGenerator = null; logger.debug(STR + source + STR + dest); boolean isCompressed = source.isCompressed(); FilterInputStream input = null; OutputStream output = null; long startTimeMS = System.currentTimeMillis(); for(int attempt = 0; attempt < maxAttempts; attempt++) { boolean success = true; long totalBytesRead = 0; boolean fsOpened = false; try { if(checkSumType != null) { fileCheckSumGenerator = CheckSum.getInstance(checkSumType); } logger.info(STR + attempt + STR + source + STR + dest); if(!isCompressed) { input = fs.open(source.getPath()); } else { input = new GZIPInputStream(fs.open(source.getPath()), this.bufferSize); } fsOpened = true; output = new BufferedOutputStream(new FileOutputStream(dest)); while(true) { int read = input.read(buffer); if(read < 0) { break; } else { output.write(buffer, 0, read); } if(fileCheckSumGenerator != null) { fileCheckSumGenerator.update(buffer, 0, read); } if(throttler != null) { throttler.maybeThrottle(read); } stats.recordBytes(read); totalBytesRead += read; if(stats.getBytesSinceLastReport() > reportingIntervalBytes) { NumberFormat format = NumberFormat.getNumberInstance(); format.setMaximumFractionDigits(2); logger.info(stats.getTotalBytesCopied() / (1024 * 1024) + STR + format.format(stats.getBytesPerSecond() / (1024 * 1024)) + STR + format.format(stats.getPercentCopied()) + STR + dest); if(this.status != null) { this.status.setStatus(stats.getTotalBytesCopied() / (1024 * 1024) + STR + format.format(stats.getBytesPerSecond() / (1024 * 1024)) + STR + format.format(stats.getPercentCopied()) + STR + dest); } stats.reset(); } } stats.reportFileDownloaded(dest, startTimeMS, source.getSize(), System.currentTimeMillis() - startTimeMS, attempt, totalBytesRead); logger.info(STR + source + STR + dest); } catch(Throwable te) { success = false; if(!fsOpened) { logger.error(STR + source, te); } else { logger.error(STR + source + STR + totalBytesRead + STR, te); } if(te.getCause() != null) { logger.error(STR, te.getCause()); } if(attempt < maxAttempts - 1) { logger.info(STR + retryDelayMs + STR); sleepForRetryDelayMs(); } else { stats.reportFileError(dest, maxAttempts, startTimeMS, te); logger.info(STR + maxAttempts + STR); throw te; } } finally { IOUtils.closeQuietly(output); IOUtils.closeQuietly(input); if(success) { break; } } logger.debug(STR + source + STR + dest); } return fileCheckSumGenerator; } | /**
* Function to copy a file from the given filesystem with a checksum of type
* 'checkSumType' computed and returned. In case an error occurs during such
* a copy, we do a retry for a maximum of NUM_RETRIES
*
* @param fs Filesystem used to copy the file
* @param source Source path of the file to copy
* @param dest Destination path of the file on the local machine
* @param stats Stats for measuring the transfer progress
* @param checkSumType Type of the Checksum to be computed for this file
* @return A Checksum (generator) of type checkSumType which contains the
* computed checksum of the copied file
* @throws IOException
*/ | Function to copy a file from the given filesystem with a checksum of type 'checkSumType' computed and returned. In case an error occurs during such a copy, we do a retry for a maximum of NUM_RETRIES | copyFileWithCheckSum | {
"repo_name": "birendraa/voldemort",
"path": "contrib/hadoop-store-builder/src/java/voldemort/store/readonly/fetcher/HdfsFetcher.java",
"license": "apache-2.0",
"size": 32733
} | [
"java.io.BufferedOutputStream",
"java.io.File",
"java.io.FileOutputStream",
"java.io.FilterInputStream",
"java.io.OutputStream",
"java.text.NumberFormat",
"java.util.zip.GZIPInputStream",
"org.apache.commons.io.IOUtils",
"org.apache.hadoop.fs.FileSystem"
] | import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.FilterInputStream; import java.io.OutputStream; import java.text.NumberFormat; import java.util.zip.GZIPInputStream; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.FileSystem; | import java.io.*; import java.text.*; import java.util.zip.*; import org.apache.commons.io.*; import org.apache.hadoop.fs.*; | [
"java.io",
"java.text",
"java.util",
"org.apache.commons",
"org.apache.hadoop"
] | java.io; java.text; java.util; org.apache.commons; org.apache.hadoop; | 2,113,272 |
default Collection<ScheduledBlockUpdate> getScheduledUpdates(Vector3i position) {
return getScheduledUpdates(position.getX(), position.getY(), position.getZ());
} | default Collection<ScheduledBlockUpdate> getScheduledUpdates(Vector3i position) { return getScheduledUpdates(position.getX(), position.getY(), position.getZ()); } | /**
* Gets a list of {@link ScheduledBlockUpdate}s on this block.
*
* @param position The position of the block
* @return A list of ScheduledBlockUpdates on this block
*/ | Gets a list of <code>ScheduledBlockUpdate</code>s on this block | getScheduledUpdates | {
"repo_name": "JBYoshi/SpongeAPI",
"path": "src/main/java/org/spongepowered/api/world/extent/Extent.java",
"license": "mit",
"size": 22806
} | [
"com.flowpowered.math.vector.Vector3i",
"java.util.Collection",
"org.spongepowered.api.block.ScheduledBlockUpdate"
] | import com.flowpowered.math.vector.Vector3i; import java.util.Collection; import org.spongepowered.api.block.ScheduledBlockUpdate; | import com.flowpowered.math.vector.*; import java.util.*; import org.spongepowered.api.block.*; | [
"com.flowpowered.math",
"java.util",
"org.spongepowered.api"
] | com.flowpowered.math; java.util; org.spongepowered.api; | 1,985,221 |
private ComboBoxModel<String> createBipartitionModel(CyNetwork network,String ColName)
{
DefaultComboBoxModel<String> compoundColumnSelectionModel = new DefaultComboBoxModel<String>();
List<CyRow> rows = network.getDefaultNodeTable().getAllRows();
HashSet<Object> diffvals = new HashSet<Object>();
for(CyRow row : rows)
{
try{
diffvals.add(row.getRaw(ColName));
}
catch(NullPointerException e)
{
}
}
//add all different items
for(Object o : diffvals)
{
if(o != null)
{
compoundColumnSelectionModel.addElement(o.toString());
}
}
//add a null element, we have to filter this at some point.
compoundColumnSelectionModel.addElement(null);
return compoundColumnSelectionModel;
} | ComboBoxModel<String> function(CyNetwork network,String ColName) { DefaultComboBoxModel<String> compoundColumnSelectionModel = new DefaultComboBoxModel<String>(); List<CyRow> rows = network.getDefaultNodeTable().getAllRows(); HashSet<Object> diffvals = new HashSet<Object>(); for(CyRow row : rows) { try{ diffvals.add(row.getRaw(ColName)); } catch(NullPointerException e) { } } for(Object o : diffvals) { if(o != null) { compoundColumnSelectionModel.addElement(o.toString()); } } compoundColumnSelectionModel.addElement(null); return compoundColumnSelectionModel; } | /**
* Create a Comboboxmodel with one element per different string present in the Column in the network.
* @param network The Network for which to create the bipartitionModel
* @param ColName The Network Column Name to use for bipartition selection
* @return a ComboboxModel that allows the bipartition
*/ | Create a Comboboxmodel with one element per different string present in the Column in the network | createBipartitionModel | {
"repo_name": "sysbiolux/IDARE",
"path": "METANODE-CREATOR/src/main/java/idare/imagenode/internal/GUI/NetworkSetup/ColumnTypeChooser.java",
"license": "lgpl-3.0",
"size": 14238
} | [
"java.util.HashSet",
"java.util.List",
"javax.swing.ComboBoxModel",
"javax.swing.DefaultComboBoxModel",
"org.cytoscape.model.CyNetwork",
"org.cytoscape.model.CyRow"
] | import java.util.HashSet; import java.util.List; import javax.swing.ComboBoxModel; import javax.swing.DefaultComboBoxModel; import org.cytoscape.model.CyNetwork; import org.cytoscape.model.CyRow; | import java.util.*; import javax.swing.*; import org.cytoscape.model.*; | [
"java.util",
"javax.swing",
"org.cytoscape.model"
] | java.util; javax.swing; org.cytoscape.model; | 1,963,480 |
private void downloadScript(ScriptActivityParam param)
{
FileChooser chooser = new FileChooser(view, FileChooser.SAVE,
"Download", "Select where to download the file.", null,
true);
IconManager icons = IconManager.getInstance();
chooser.setTitleIcon(icons.getIcon(IconManager.DOWNLOAD_48));
chooser.setSelectedFileFull(param.getScript().getName());
chooser.setApproveButtonText("Download");
final long id = param.getScript().getScriptID();
chooser.addPropertyChangeListener(new PropertyChangeListener() {
| void function(ScriptActivityParam param) { FileChooser chooser = new FileChooser(view, FileChooser.SAVE, STR, STR, null, true); IconManager icons = IconManager.getInstance(); chooser.setTitleIcon(icons.getIcon(IconManager.DOWNLOAD_48)); chooser.setSelectedFileFull(param.getScript().getName()); chooser.setApproveButtonText(STR); final long id = param.getScript().getScriptID(); chooser.addPropertyChangeListener(new PropertyChangeListener() { | /**
* Downloads the possible script.
*
* @param param The parameter holding the script.
*/ | Downloads the possible script | downloadScript | {
"repo_name": "rleigh-dundee/openmicroscopy",
"path": "components/insight/SRC/org/openmicroscopy/shoola/agents/imviewer/view/ImViewerControl.java",
"license": "gpl-2.0",
"size": 42528
} | [
"java.beans.PropertyChangeListener",
"org.openmicroscopy.shoola.agents.imviewer.IconManager",
"org.openmicroscopy.shoola.env.data.model.ScriptActivityParam",
"org.openmicroscopy.shoola.util.ui.filechooser.FileChooser"
] | import java.beans.PropertyChangeListener; import org.openmicroscopy.shoola.agents.imviewer.IconManager; import org.openmicroscopy.shoola.env.data.model.ScriptActivityParam; import org.openmicroscopy.shoola.util.ui.filechooser.FileChooser; | import java.beans.*; import org.openmicroscopy.shoola.agents.imviewer.*; import org.openmicroscopy.shoola.env.data.model.*; import org.openmicroscopy.shoola.util.ui.filechooser.*; | [
"java.beans",
"org.openmicroscopy.shoola"
] | java.beans; org.openmicroscopy.shoola; | 1,591,700 |
public SqlDatabaseCreateUpdateParameters withOptions(Map<String, String> options) {
this.options = options;
return this;
} | SqlDatabaseCreateUpdateParameters function(Map<String, String> options) { this.options = options; return this; } | /**
* Set the options property: A key-value pair of options to be applied for the request. This corresponds to the
* headers sent with the request.
*
* @param options the options value to set.
* @return the SqlDatabaseCreateUpdateParameters object itself.
*/ | Set the options property: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request | withOptions | {
"repo_name": "selvasingh/azure-sdk-for-java",
"path": "sdk/resourcemanager/azure-resourcemanager-cosmos/src/main/java/com/azure/resourcemanager/cosmos/models/SqlDatabaseCreateUpdateParameters.java",
"license": "mit",
"size": 3276
} | [
"java.util.Map"
] | import java.util.Map; | import java.util.*; | [
"java.util"
] | java.util; | 1,640,011 |
@Override
public int readUnsignedShort() throws IOException {
byte b1 = readAndCheckByte();
byte b2 = readAndCheckByte();
return Ints.fromBytes((byte) 0, (byte) 0, b2, b1);
} | int function() throws IOException { byte b1 = readAndCheckByte(); byte b2 = readAndCheckByte(); return Ints.fromBytes((byte) 0, (byte) 0, b2, b1); } | /**
* Reads an unsigned {@code short} as specified by
* {@link DataInputStream#readUnsignedShort()}, except using little-endian
* byte order.
*
* @return the next two bytes of the input stream, interpreted as an
* unsigned 16-bit integer in little-endian byte order
* @throws IOException if an I/O error occurs
*/ | Reads an unsigned short as specified by <code>DataInputStream#readUnsignedShort()</code>, except using little-endian byte order | readUnsignedShort | {
"repo_name": "kumarrus/voltdb",
"path": "third_party/java/src/com/google_voltpatches/common/io/LittleEndianDataInputStream.java",
"license": "agpl-3.0",
"size": 6813
} | [
"com.google_voltpatches.common.primitives.Ints",
"java.io.IOException"
] | import com.google_voltpatches.common.primitives.Ints; import java.io.IOException; | import com.google_voltpatches.common.primitives.*; import java.io.*; | [
"com.google_voltpatches.common",
"java.io"
] | com.google_voltpatches.common; java.io; | 261,107 |
List<EntityBean> getEntities();
EntityBean addEntity(); | List<EntityBean> getEntities(); EntityBean addEntity(); | /**
* Adds new child to the list of entity children.
* @return created child
*/ | Adds new child to the list of entity children | addEntity | {
"repo_name": "consulo/consulo-javaee",
"path": "javaee-api/src/main/java/com/intellij/javaee/model/xml/ejb/EnterpriseBeans.java",
"license": "apache-2.0",
"size": 2071
} | [
"java.util.List"
] | import java.util.List; | import java.util.*; | [
"java.util"
] | java.util; | 2,629,544 |
private void botonAbrirMouseEntered(java.awt.event.MouseEvent evt) {
// TODO add your handling code here:
botonAbrir.setCursor(new Cursor(Cursor.HAND_CURSOR));
botonAbrir.setIcon(new ImageIcon(getClass().getResource("/hermes/imagenes/inicio/botAbrirProyecto_over.png")));
}
| void function(java.awt.event.MouseEvent evt) { botonAbrir.setCursor(new Cursor(Cursor.HAND_CURSOR)); botonAbrir.setIcon(new ImageIcon(getClass().getResource(STR))); } | /**
* cambia el icono a mano
* @param evt
*/ | cambia el icono a mano | botonAbrirMouseEntered | {
"repo_name": "Esleelkartea/hermes",
"path": "hermes_v1.0.0_src/bgc/gui/inicio/VInicio.java",
"license": "gpl-2.0",
"size": 23674
} | [
"java.awt.Cursor",
"javax.swing.ImageIcon"
] | import java.awt.Cursor; import javax.swing.ImageIcon; | import java.awt.*; import javax.swing.*; | [
"java.awt",
"javax.swing"
] | java.awt; javax.swing; | 706,110 |
public VirtualHostBuilder annotatedService(String pathPrefix, Object service) {
return annotatedService(pathPrefix, service, Function.identity(), ImmutableList.of());
} | VirtualHostBuilder function(String pathPrefix, Object service) { return annotatedService(pathPrefix, service, Function.identity(), ImmutableList.of()); } | /**
* Binds the specified annotated service object under the specified path prefix.
*/ | Binds the specified annotated service object under the specified path prefix | annotatedService | {
"repo_name": "anuraaga/armeria",
"path": "core/src/main/java/com/linecorp/armeria/server/VirtualHostBuilder.java",
"license": "apache-2.0",
"size": 46914
} | [
"com.google.common.collect.ImmutableList",
"java.util.function.Function"
] | import com.google.common.collect.ImmutableList; import java.util.function.Function; | import com.google.common.collect.*; import java.util.function.*; | [
"com.google.common",
"java.util"
] | com.google.common; java.util; | 1,402,874 |
public Builder addTransitiveArtifacts(NestedSet<Artifact> artifacts) {
artifactsBuilder.addTransitive(artifacts);
return this;
} | Builder function(NestedSet<Artifact> artifacts) { artifactsBuilder.addTransitive(artifacts); return this; } | /**
* Adds a nested set to the internal collection.
*/ | Adds a nested set to the internal collection | addTransitiveArtifacts | {
"repo_name": "charlieaustin/bazel",
"path": "src/main/java/com/google/devtools/build/lib/analysis/Runfiles.java",
"license": "apache-2.0",
"size": 31326
} | [
"com.google.devtools.build.lib.actions.Artifact",
"com.google.devtools.build.lib.collect.nestedset.NestedSet"
] | import com.google.devtools.build.lib.actions.Artifact; import com.google.devtools.build.lib.collect.nestedset.NestedSet; | import com.google.devtools.build.lib.actions.*; import com.google.devtools.build.lib.collect.nestedset.*; | [
"com.google.devtools"
] | com.google.devtools; | 1,317,023 |
@LogMessage(level = WARN)
@Message(id = 15, value = "Unknown timezone id: %s found in schedule expression. Ignoring it and using server's timezone: %s")
void unknownTimezoneId(String timezoneId, String id); | @LogMessage(level = WARN) @Message(id = 15, value = STR) void unknownTimezoneId(String timezoneId, String id); | /**
* Logs a warning message indicating Unknown timezone id found in schedule expression. Ignoring it and using server's timezone
*/ | Logs a warning message indicating Unknown timezone id found in schedule expression. Ignoring it and using server's timezone | unknownTimezoneId | {
"repo_name": "golovnin/wildfly",
"path": "ejb3/src/main/java/org/jboss/as/ejb3/logging/EjbLogger.java",
"license": "lgpl-2.1",
"size": 147179
} | [
"org.jboss.logging.annotations.LogMessage",
"org.jboss.logging.annotations.Message"
] | import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; | import org.jboss.logging.annotations.*; | [
"org.jboss.logging"
] | org.jboss.logging; | 1,150,557 |
private static List<AppResult> getRestJobResultsFromJobExecutionId(String jobExecId) {
List<AppResult> results =
AppResult.find.select(AppResult.getSearchFields()).where().eq(AppResult.TABLE.JOB_EXEC_ID, jobExecId).order()
.desc(AppResult.TABLE.FINISH_TIME)
.fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields()).findList();
return results;
} | static List<AppResult> function(String jobExecId) { List<AppResult> results = AppResult.find.select(AppResult.getSearchFields()).where().eq(AppResult.TABLE.JOB_EXEC_ID, jobExecId).order() .desc(AppResult.TABLE.FINISH_TIME) .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields()).findList(); return results; } | /**
* Returns a list of AppResult with the given jobExecId
* @param jobExecId The job execution id of the job
* @return The list of AppResult filtered by job execution id
*/ | Returns a list of AppResult with the given jobExecId | getRestJobResultsFromJobExecutionId | {
"repo_name": "qubole/dr-elephant",
"path": "app/controllers/api/v1/Web.java",
"license": "apache-2.0",
"size": 53600
} | [
"java.util.List"
] | import java.util.List; | import java.util.*; | [
"java.util"
] | java.util; | 2,186,175 |
public void setMaxColumns(int maxColumns) {
if(maxColumns <= 0) {
throw new InvalidValueException("maxColumns", maxColumns);
}
this.maxColumns = maxColumns;
} | void function(int maxColumns) { if(maxColumns <= 0) { throw new InvalidValueException(STR, maxColumns); } this.maxColumns = maxColumns; } | /**
* set maximum number of columns in the legend table
* @param maxColumns
*/ | set maximum number of columns in the legend table | setMaxColumns | {
"repo_name": "geosolutions-it/mapfish-print",
"path": "src/main/java/org/mapfish/print/config/layout/LegendsBlock.java",
"license": "gpl-3.0",
"size": 55578
} | [
"org.mapfish.print.InvalidValueException"
] | import org.mapfish.print.InvalidValueException; | import org.mapfish.print.*; | [
"org.mapfish.print"
] | org.mapfish.print; | 2,883,271 |
public SnackbarWrapper buildWrapper() {
Snackbar snackbar = Snackbar.make(parentView, message, duration);
SnackbarWrapper wrapper = new SnackbarWrapper(snackbar)
.setAction(actionText, sanitisedActionClickListener())
.setActionTextAllCaps(actionAllCaps)
.addCallbacks(callbacks)
.setIconMargin(iconMargin);
if (actionTextColor != 0) {
wrapper.setActionTextColor(actionTextColor);
}
if (messageTextColor != 0) {
wrapper.setTextColor(messageTextColor);
}
if (appendMessages != null) {
wrapper.appendMessage(appendMessages);
}
if (backgroundColor != 0) {
wrapper.setBackgroundColor(backgroundColor);
}
if (icon != null) {
wrapper.setIcon(icon);
}
return wrapper;
} | SnackbarWrapper function() { Snackbar snackbar = Snackbar.make(parentView, message, duration); SnackbarWrapper wrapper = new SnackbarWrapper(snackbar) .setAction(actionText, sanitisedActionClickListener()) .setActionTextAllCaps(actionAllCaps) .addCallbacks(callbacks) .setIconMargin(iconMargin); if (actionTextColor != 0) { wrapper.setActionTextColor(actionTextColor); } if (messageTextColor != 0) { wrapper.setTextColor(messageTextColor); } if (appendMessages != null) { wrapper.appendMessage(appendMessages); } if (backgroundColor != 0) { wrapper.setBackgroundColor(backgroundColor); } if (icon != null) { wrapper.setIcon(icon); } return wrapper; } | /**
* Build a Snackbar using the options specified in the builder. Wrap this Snackbar into a SnackbarWrapper, which
* allows further customisation.
*
* @return A SnackbarWrapper, a class which wraps a Snackbar for further customisation.
*/ | Build a Snackbar using the options specified in the builder. Wrap this Snackbar into a SnackbarWrapper, which allows further customisation | buildWrapper | {
"repo_name": "andrewlord1990/SnackbarBuilder",
"path": "snackbarbuilder/src/main/java/com/github/andrewlord1990/snackbarbuilder/SnackbarBuilder.java",
"license": "apache-2.0",
"size": 20366
} | [
"android.support.design.widget.Snackbar"
] | import android.support.design.widget.Snackbar; | import android.support.design.widget.*; | [
"android.support"
] | android.support; | 1,144,093 |
public Sync4jDevice getDeviceModify() {
return tableResultSearchDevicePanel.getDeviceModify();
} | Sync4jDevice function() { return tableResultSearchDevicePanel.getDeviceModify(); } | /**
* Returns the modified device
* @return modified device
*/ | Returns the modified device | getDeviceModify | {
"repo_name": "accesstest3/cfunambol",
"path": "admin-suite/admin/src/com/funambol/admin/device/panels/SearchDevicePanel.java",
"license": "agpl-3.0",
"size": 13573
} | [
"com.funambol.framework.server.Sync4jDevice"
] | import com.funambol.framework.server.Sync4jDevice; | import com.funambol.framework.server.*; | [
"com.funambol.framework"
] | com.funambol.framework; | 650,468 |
private static int byteCompaction(int mode,
int[] codewords,
Charset encoding,
int codeIndex,
StringBuilder result) {
ByteArrayOutputStream decodedBytes = new ByteArrayOutputStream();
int count = 0;
long value = 0;
boolean end = false;
switch (mode) {
case BYTE_COMPACTION_MODE_LATCH:
// Total number of Byte Compaction characters to be encoded
// is not a multiple of 6
int[] byteCompactedCodewords = new int[6];
int nextCode = codewords[codeIndex++];
while ((codeIndex < codewords[0]) && !end) {
byteCompactedCodewords[count++] = nextCode;
// Base 900
value = 900 * value + nextCode;
nextCode = codewords[codeIndex++];
// perhaps it should be ok to check only nextCode >= TEXT_COMPACTION_MODE_LATCH
switch (nextCode) {
case TEXT_COMPACTION_MODE_LATCH:
case BYTE_COMPACTION_MODE_LATCH:
case NUMERIC_COMPACTION_MODE_LATCH:
case BYTE_COMPACTION_MODE_LATCH_6:
case BEGIN_MACRO_PDF417_CONTROL_BLOCK:
case BEGIN_MACRO_PDF417_OPTIONAL_FIELD:
case MACRO_PDF417_TERMINATOR:
codeIndex--;
end = true;
break;
default:
if ((count % 5 == 0) && (count > 0)) {
// Decode every 5 codewords
// Convert to Base 256
for (int j = 0; j < 6; ++j) {
decodedBytes.write((byte) (value >> (8 * (5 - j))));
}
value = 0;
count = 0;
}
break;
}
}
// if the end of all codewords is reached the last codeword needs to be added
if (codeIndex == codewords[0] && nextCode < TEXT_COMPACTION_MODE_LATCH) {
byteCompactedCodewords[count++] = nextCode;
}
// If Byte Compaction mode is invoked with codeword 901,
// the last group of codewords is interpreted directly
// as one byte per codeword, without compaction.
for (int i = 0; i < count; i++) {
decodedBytes.write((byte) byteCompactedCodewords[i]);
}
break;
case BYTE_COMPACTION_MODE_LATCH_6:
// Total number of Byte Compaction characters to be encoded
// is an integer multiple of 6
while (codeIndex < codewords[0] && !end) {
int code = codewords[codeIndex++];
if (code < TEXT_COMPACTION_MODE_LATCH) {
count++;
// Base 900
value = 900 * value + code;
} else {
switch (code) {
case TEXT_COMPACTION_MODE_LATCH:
case BYTE_COMPACTION_MODE_LATCH:
case NUMERIC_COMPACTION_MODE_LATCH:
case BYTE_COMPACTION_MODE_LATCH_6:
case BEGIN_MACRO_PDF417_CONTROL_BLOCK:
case BEGIN_MACRO_PDF417_OPTIONAL_FIELD:
case MACRO_PDF417_TERMINATOR:
codeIndex--;
end = true;
break;
}
}
if ((count % 5 == 0) && (count > 0)) {
// Decode every 5 codewords
// Convert to Base 256
for (int j = 0; j < 6; ++j) {
decodedBytes.write((byte) (value >> (8 * (5 - j))));
}
value = 0;
count = 0;
}
}
break;
}
try {
result.append(decodedBytes.toString(encoding.name()));
} catch (UnsupportedEncodingException uee) {
// can't happen
throw new IllegalStateException(uee);
}
return codeIndex;
} | static int function(int mode, int[] codewords, Charset encoding, int codeIndex, StringBuilder result) { ByteArrayOutputStream decodedBytes = new ByteArrayOutputStream(); int count = 0; long value = 0; boolean end = false; switch (mode) { case BYTE_COMPACTION_MODE_LATCH: int[] byteCompactedCodewords = new int[6]; int nextCode = codewords[codeIndex++]; while ((codeIndex < codewords[0]) && !end) { byteCompactedCodewords[count++] = nextCode; value = 900 * value + nextCode; nextCode = codewords[codeIndex++]; switch (nextCode) { case TEXT_COMPACTION_MODE_LATCH: case BYTE_COMPACTION_MODE_LATCH: case NUMERIC_COMPACTION_MODE_LATCH: case BYTE_COMPACTION_MODE_LATCH_6: case BEGIN_MACRO_PDF417_CONTROL_BLOCK: case BEGIN_MACRO_PDF417_OPTIONAL_FIELD: case MACRO_PDF417_TERMINATOR: codeIndex--; end = true; break; default: if ((count % 5 == 0) && (count > 0)) { for (int j = 0; j < 6; ++j) { decodedBytes.write((byte) (value >> (8 * (5 - j)))); } value = 0; count = 0; } break; } } if (codeIndex == codewords[0] && nextCode < TEXT_COMPACTION_MODE_LATCH) { byteCompactedCodewords[count++] = nextCode; } for (int i = 0; i < count; i++) { decodedBytes.write((byte) byteCompactedCodewords[i]); } break; case BYTE_COMPACTION_MODE_LATCH_6: while (codeIndex < codewords[0] && !end) { int code = codewords[codeIndex++]; if (code < TEXT_COMPACTION_MODE_LATCH) { count++; value = 900 * value + code; } else { switch (code) { case TEXT_COMPACTION_MODE_LATCH: case BYTE_COMPACTION_MODE_LATCH: case NUMERIC_COMPACTION_MODE_LATCH: case BYTE_COMPACTION_MODE_LATCH_6: case BEGIN_MACRO_PDF417_CONTROL_BLOCK: case BEGIN_MACRO_PDF417_OPTIONAL_FIELD: case MACRO_PDF417_TERMINATOR: codeIndex--; end = true; break; } } if ((count % 5 == 0) && (count > 0)) { for (int j = 0; j < 6; ++j) { decodedBytes.write((byte) (value >> (8 * (5 - j)))); } value = 0; count = 0; } } break; } try { result.append(decodedBytes.toString(encoding.name())); } catch (UnsupportedEncodingException uee) { throw new IllegalStateException(uee); } return codeIndex; } | /**
* Byte Compaction mode (see 5.4.3) permits all 256 possible 8-bit byte values to be encoded.
* This includes all ASCII characters value 0 to 127 inclusive and provides for international
* character set support.
*
* @param mode The byte compaction mode i.e. 901 or 924
* @param codewords The array of codewords (data + error)
* @param encoding Currently active character encoding
* @param codeIndex The current index into the codeword array.
* @param result The decoded data is appended to the result.
* @return The next index into the codeword array.
*/ | Byte Compaction mode (see 5.4.3) permits all 256 possible 8-bit byte values to be encoded. This includes all ASCII characters value 0 to 127 inclusive and provides for international character set support | byteCompaction | {
"repo_name": "tanelihuuskonen/zxing",
"path": "core/src/main/java/com/google/zxing/pdf417/decoder/DecodedBitStreamParser.java",
"license": "apache-2.0",
"size": 27032
} | [
"java.io.ByteArrayOutputStream",
"java.io.UnsupportedEncodingException",
"java.nio.charset.Charset"
] | import java.io.ByteArrayOutputStream; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; | import java.io.*; import java.nio.charset.*; | [
"java.io",
"java.nio"
] | java.io; java.nio; | 1,398,118 |
public String getDocumentTitle() {
String documentTypeLabel = KewApiServiceLocator.getDocumentTypeService().getDocumentTypeByName(this.getDocumentHeader().getWorkflowDocument().getDocumentTypeName()).getLabel();
if (null == documentTypeLabel) {
documentTypeLabel = "";
}
String description = this.getDocumentHeader().getDocumentDescription();
if (null == description) {
description = "";
}
return documentTypeLabel + " - " + description;
} | String function() { String documentTypeLabel = KewApiServiceLocator.getDocumentTypeService().getDocumentTypeByName(this.getDocumentHeader().getWorkflowDocument().getDocumentTypeName()).getLabel(); if (null == documentTypeLabel) { documentTypeLabel = STRSTR - " + description; } | /**
* This is the default document title implementation. It concatenates the document's data dictionary file label attribute and
* the document's document header description together. This title is used to populate workflow and will show up in document
* search results and user action lists.
*
* @see Document#getDocumentTitle()
*/ | This is the default document title implementation. It concatenates the document's data dictionary file label attribute and the document's document header description together. This title is used to populate workflow and will show up in document search results and user action lists | getDocumentTitle | {
"repo_name": "quikkian-ua-devops/will-financials",
"path": "kfs-kns/src/main/java/org/kuali/kfs/krad/document/DocumentBase.java",
"license": "agpl-3.0",
"size": 24542
} | [
"org.kuali.rice.kew.api.KewApiServiceLocator"
] | import org.kuali.rice.kew.api.KewApiServiceLocator; | import org.kuali.rice.kew.api.*; | [
"org.kuali.rice"
] | org.kuali.rice; | 264,174 |
public Item getCurrentItem()
{
return this.currentItem;
} | Item function() { return this.currentItem; } | /**
* Get current item
*
* @return item Item object
* @see Item
*/ | Get current item | getCurrentItem | {
"repo_name": "guildenstern70/jurpe",
"path": "jurpe/src/main/java/net/littlelite/jurpe/characters/PCharacter.java",
"license": "gpl-2.0",
"size": 33722
} | [
"net.littlelite.jurpe.items.Item"
] | import net.littlelite.jurpe.items.Item; | import net.littlelite.jurpe.items.*; | [
"net.littlelite.jurpe"
] | net.littlelite.jurpe; | 1,829,374 |
@RequiredScope({view})
@ResponseStatus(HttpStatus.OK)
@RequestMapping(value = UrlHelpers.FILE_HANDLE_HANDLE_ID, method = RequestMethod.GET)
public @ResponseBody FileHandle getFileHandle(
@PathVariable String handleId,
@RequestParam(value = AuthorizationConstants.USER_ID_PARAM) Long userId) throws FileUploadException,
IOException, DatastoreException, NotFoundException,
ServiceUnavailableException, JSONObjectAdapterException {
// Get the user ID
return fileService.getFileHandle(handleId, userId);
} | @RequiredScope({view}) @ResponseStatus(HttpStatus.OK) @RequestMapping(value = UrlHelpers.FILE_HANDLE_HANDLE_ID, method = RequestMethod.GET) @ResponseBody FileHandle function( @PathVariable String handleId, @RequestParam(value = AuthorizationConstants.USER_ID_PARAM) Long userId) throws FileUploadException, IOException, DatastoreException, NotFoundException, ServiceUnavailableException, JSONObjectAdapterException { return fileService.getFileHandle(handleId, userId); } | /**
* Get a FileHandle using its ID.
* <p>
* <b>Note:</b> Only the user that created the FileHandle can access it
* directly.
* </p>
*
* @param handleId
* The ID of the FileHandle to fetch.
* @param userId
* @return
* @throws FileUploadException
* @throws IOException
* @throws DatastoreException
* @throws NotFoundException
* @throws ServiceUnavailableException
* @throws JSONObjectAdapterException
*/ | Get a FileHandle using its ID. Note: Only the user that created the FileHandle can access it directly. | getFileHandle | {
"repo_name": "Sage-Bionetworks/Synapse-Repository-Services",
"path": "services/repository/src/main/java/org/sagebionetworks/file/controller/UploadController.java",
"license": "apache-2.0",
"size": 47138
} | [
"java.io.IOException",
"org.apache.commons.fileupload.FileUploadException",
"org.sagebionetworks.repo.model.AuthorizationConstants",
"org.sagebionetworks.repo.model.DatastoreException",
"org.sagebionetworks.repo.model.file.FileHandle",
"org.sagebionetworks.repo.web.NotFoundException",
"org.sagebionetworks.repo.web.RequiredScope",
"org.sagebionetworks.repo.web.ServiceUnavailableException",
"org.sagebionetworks.repo.web.UrlHelpers",
"org.sagebionetworks.schema.adapter.JSONObjectAdapterException",
"org.springframework.http.HttpStatus",
"org.springframework.web.bind.annotation.PathVariable",
"org.springframework.web.bind.annotation.RequestMapping",
"org.springframework.web.bind.annotation.RequestMethod",
"org.springframework.web.bind.annotation.RequestParam",
"org.springframework.web.bind.annotation.ResponseBody",
"org.springframework.web.bind.annotation.ResponseStatus"
] | import java.io.IOException; import org.apache.commons.fileupload.FileUploadException; import org.sagebionetworks.repo.model.AuthorizationConstants; import org.sagebionetworks.repo.model.DatastoreException; import org.sagebionetworks.repo.model.file.FileHandle; import org.sagebionetworks.repo.web.NotFoundException; import org.sagebionetworks.repo.web.RequiredScope; import org.sagebionetworks.repo.web.ServiceUnavailableException; import org.sagebionetworks.repo.web.UrlHelpers; import org.sagebionetworks.schema.adapter.JSONObjectAdapterException; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.ResponseStatus; | import java.io.*; import org.apache.commons.fileupload.*; import org.sagebionetworks.repo.model.*; import org.sagebionetworks.repo.model.file.*; import org.sagebionetworks.repo.web.*; import org.sagebionetworks.schema.adapter.*; import org.springframework.http.*; import org.springframework.web.bind.annotation.*; | [
"java.io",
"org.apache.commons",
"org.sagebionetworks.repo",
"org.sagebionetworks.schema",
"org.springframework.http",
"org.springframework.web"
] | java.io; org.apache.commons; org.sagebionetworks.repo; org.sagebionetworks.schema; org.springframework.http; org.springframework.web; | 2,383,749 |
public boolean isApplicable(DamageModifier type) throws IllegalArgumentException {
Validate.notNull(type, "Cannot have null DamageModifier");
return modifiers.containsKey(type);
} | boolean function(DamageModifier type) throws IllegalArgumentException { Validate.notNull(type, STR); return modifiers.containsKey(type); } | /**
* This checks to see if a particular modifier is valid for this event's
* caller, such that, {@link #setDamage(DamageModifier, double)} will not
* throw an {@link UnsupportedOperationException}.
* <p>
* {@link DamageModifier#BASE} is always applicable.
*
* @param type the modifier
* @return true if the modifier is supported by the caller, false otherwise
* @throws IllegalArgumentException if type is null
*/ | This checks to see if a particular modifier is valid for this event's caller, such that, <code>#setDamage(DamageModifier, double)</code> will not throw an <code>UnsupportedOperationException</code>. <code>DamageModifier#BASE</code> is always applicable | isApplicable | {
"repo_name": "GlowstonePlusPlus/Glowkit",
"path": "src/main/java/org/bukkit/event/entity/EntityDamageEvent.java",
"license": "gpl-3.0",
"size": 13397
} | [
"org.apache.commons.lang3.Validate"
] | import org.apache.commons.lang3.Validate; | import org.apache.commons.lang3.*; | [
"org.apache.commons"
] | org.apache.commons; | 1,654,231 |
// START SNIPPET: e2
public String slip(String body, @Headers Map<String, Object> headers, @Header(Exchange.SLIP_ENDPOINT) String previous) {
bodies.add(body);
if (previous != null) {
previouses.add(previous);
}
// get the state from the message headers and keep track how many times
// we have been invoked
int invoked = 0;
Object current = headers.get("invoked");
if (current != null) {
invoked = Integer.valueOf(current.toString());
}
invoked++;
// and store the state back on the headers
headers.put("invoked", invoked);
if (invoked == 1) {
return "mock:a";
} else if (invoked == 2) {
return "mock:b,mock:c";
} else if (invoked == 3) {
return "direct:foo";
} else if (invoked == 4) {
return "mock:result";
}
// no more so return null
return null;
}
// END SNIPPET: e2 | String function(String body, @Headers Map<String, Object> headers, @Header(Exchange.SLIP_ENDPOINT) String previous) { bodies.add(body); if (previous != null) { previouses.add(previous); } int invoked = 0; Object current = headers.get(STR); if (current != null) { invoked = Integer.valueOf(current.toString()); } invoked++; headers.put(STR, invoked); if (invoked == 1) { return STR; } else if (invoked == 2) { return STR; } else if (invoked == 3) { return STR; } else if (invoked == 4) { return STR; } return null; } | /**
* Use this method to compute dynamic where we should route next.
*
* @param body the message body
* @param headers the message headers where we can store state between invocations
* @param previous the previous slip
* @return endpoints to go, or <tt>null</tt> to indicate the end
*/ | Use this method to compute dynamic where we should route next | slip | {
"repo_name": "punkhorn/camel-upstream",
"path": "core/camel-core/src/test/java/org/apache/camel/processor/DynamicRouterExchangeHeaders2Test.java",
"license": "apache-2.0",
"size": 4641
} | [
"java.util.Map",
"org.apache.camel.Exchange",
"org.apache.camel.Header",
"org.apache.camel.Headers"
] | import java.util.Map; import org.apache.camel.Exchange; import org.apache.camel.Header; import org.apache.camel.Headers; | import java.util.*; import org.apache.camel.*; | [
"java.util",
"org.apache.camel"
] | java.util; org.apache.camel; | 307,992 |
@Test
public void testWebExceptionWithCauseAndStatus() throws Exception {
HttpPost postMethod = new HttpPost(nullconditionsUri + "/webappexceptionwithcauseandstatus");
HttpResponse resp = client.execute(postMethod);
// assertEquals(499, resp.getStatusLine().getStatusCode());
assertEquals("RuntimeExceptionMapper was used", asString(resp));
} | void function() throws Exception { HttpPost postMethod = new HttpPost(nullconditionsUri + STR); HttpResponse resp = client.execute(postMethod); assertEquals(STR, asString(resp)); } | /**
* Tests that a <code>WebApplicationException</code> constructed with a
* cause and status will return status and no response body by default.
*
* @throws Exception
*/ | Tests that a <code>WebApplicationException</code> constructed with a cause and status will return status and no response body by default | testWebExceptionWithCauseAndStatus | {
"repo_name": "kgibm/open-liberty",
"path": "dev/com.ibm.ws.jaxrs.2.0_fat/fat/src/com/ibm/ws/jaxrs20/fat/exceptionmappers/ExceptionMappersTest.java",
"license": "epl-1.0",
"size": 28476
} | [
"com.ibm.ws.jaxrs20.fat.TestUtils",
"org.apache.http.HttpResponse",
"org.apache.http.client.methods.HttpPost",
"org.junit.Assert"
] | import com.ibm.ws.jaxrs20.fat.TestUtils; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpPost; import org.junit.Assert; | import com.ibm.ws.jaxrs20.fat.*; import org.apache.http.*; import org.apache.http.client.methods.*; import org.junit.*; | [
"com.ibm.ws",
"org.apache.http",
"org.junit"
] | com.ibm.ws; org.apache.http; org.junit; | 1,633,372 |
public void cancelAlarm(Alarm alarm) {
Intent intent = new Intent(mContext, AlarmIntentService.class);
intent.putExtra(AlarmIntentService.ALARM_KEY, alarm);
PendingIntent pendingIntent = PendingIntent
.getService(mContext, alarm.id, intent, PendingIntent.FLAG_UPDATE_CURRENT);
mAlarmManager.cancel(pendingIntent);
} | void function(Alarm alarm) { Intent intent = new Intent(mContext, AlarmIntentService.class); intent.putExtra(AlarmIntentService.ALARM_KEY, alarm); PendingIntent pendingIntent = PendingIntent .getService(mContext, alarm.id, intent, PendingIntent.FLAG_UPDATE_CURRENT); mAlarmManager.cancel(pendingIntent); } | /**
* Cancels the scheduled alarm.
*
* @param alarm the alarm to be canceled.
*/ | Cancels the scheduled alarm | cancelAlarm | {
"repo_name": "wiki2014/Learning-Summary",
"path": "alps/developers/samples/android/security/DirectBoot/Application/src/main/java/com/example/android/directboot/alarms/AlarmUtil.java",
"license": "gpl-3.0",
"size": 3561
} | [
"android.app.PendingIntent",
"android.content.Intent"
] | import android.app.PendingIntent; import android.content.Intent; | import android.app.*; import android.content.*; | [
"android.app",
"android.content"
] | android.app; android.content; | 993,003 |
static void remove(Component component) {
remove(component, true);
} | static void remove(Component component) { remove(component, true); } | /**
* Removes the drag-and-drop hooks from the component and optionally
* from the all children. You should call this if you add and remove
* components after you've set up the drag-and-drop.
* This will recursively unregister all components contained within
* <var>c</var> if <var>c</var> is a {@link Container}.
*
* @param component The component to unregister as a drop target
* @since 1.0
*/ | Removes the drag-and-drop hooks from the component and optionally from the all children. You should call this if you add and remove components after you've set up the drag-and-drop. This will recursively unregister all components contained within c if c is a <code>Container</code> | remove | {
"repo_name": "StetsiukRoman/checkstyle",
"path": "src/main/java/com/puppycrawl/tools/checkstyle/gui/FileDrop.java",
"license": "lgpl-2.1",
"size": 11441
} | [
"java.awt.Component"
] | import java.awt.Component; | import java.awt.*; | [
"java.awt"
] | java.awt; | 1,126,293 |
public Date resolveRequestTime(I input); | Date function(I input); | /**
* Resolves the request time in the server's time that this request was made by the client. This method MUST NOT return null.
*/ | Resolves the request time in the server's time that this request was made by the client. This method MUST NOT return null | resolveRequestTime | {
"repo_name": "betfair/cougar",
"path": "cougar-framework/cougar-transport-api/src/main/java/com/betfair/cougar/transport/api/RequestTimeResolver.java",
"license": "apache-2.0",
"size": 960
} | [
"java.util.Date"
] | import java.util.Date; | import java.util.*; | [
"java.util"
] | java.util; | 221,874 |
public void run() throws Exception {
new Runner(optionsBuilder().build()).run();
} | void function() throws Exception { new Runner(optionsBuilder().build()).run(); } | /**
* Run benchmarks.
*
* @throws Exception If failed.
*/ | Run benchmarks | run | {
"repo_name": "ilantukh/ignite",
"path": "modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/runner/JmhIdeBenchmarkRunner.java",
"license": "apache-2.0",
"size": 5895
} | [
"org.openjdk.jmh.runner.Runner"
] | import org.openjdk.jmh.runner.Runner; | import org.openjdk.jmh.runner.*; | [
"org.openjdk.jmh"
] | org.openjdk.jmh; | 2,461,133 |
protected IndexShard newStartedShard(
final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException {
return newStartedShard(p -> newShard(p, settings, engineFactory), primary);
} | IndexShard function( final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException { return newStartedShard(p -> newShard(p, settings, engineFactory), primary); } | /**
* Creates a new empty shard with the specified settings and engine factory and starts it.
*
* @param primary controls whether the shard will be a primary or a replica.
* @param settings the settings to use for this shard
* @param engineFactory the engine factory to use for this shard
*/ | Creates a new empty shard with the specified settings and engine factory and starts it | newStartedShard | {
"repo_name": "HonzaKral/elasticsearch",
"path": "test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java",
"license": "apache-2.0",
"size": 44064
} | [
"java.io.IOException",
"org.elasticsearch.common.settings.Settings",
"org.elasticsearch.index.engine.EngineFactory"
] | import java.io.IOException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.EngineFactory; | import java.io.*; import org.elasticsearch.common.settings.*; import org.elasticsearch.index.engine.*; | [
"java.io",
"org.elasticsearch.common",
"org.elasticsearch.index"
] | java.io; org.elasticsearch.common; org.elasticsearch.index; | 1,845,832 |
public void close() throws SQLException {
JdbcUtil.closeQuietly(sqlConnection);
sqlConnection = null;
} | void function() throws SQLException { JdbcUtil.closeQuietly(sqlConnection); sqlConnection = null; } | /**
* Close the connection.
* <p>
* If the connection is closed, then any attempt to get the underlying
* connection will throw a SQLException
*
* @throws SQLException on error
*/ | Close the connection. If the connection is closed, then any attempt to get the underlying connection will throw a SQLException | close | {
"repo_name": "adaptris/interlok",
"path": "interlok-core/src/main/java/com/adaptris/jdbc/connection/FailoverConnection.java",
"license": "apache-2.0",
"size": 4298
} | [
"com.adaptris.core.util.JdbcUtil",
"java.sql.SQLException"
] | import com.adaptris.core.util.JdbcUtil; import java.sql.SQLException; | import com.adaptris.core.util.*; import java.sql.*; | [
"com.adaptris.core",
"java.sql"
] | com.adaptris.core; java.sql; | 2,553,216 |
protected final ByteBuffer getBufferInternal() {
return this.buffer;
} | final ByteBuffer function() { return this.buffer; } | /**
* Returns the actual ByteBuffer instance referenced by this object.
*
* @return
*/ | Returns the actual ByteBuffer instance referenced by this object | getBufferInternal | {
"repo_name": "chenxiuheng/js4ms",
"path": "js4ms-jsdk/common/src/main/java/org/js4ms/common/util/buffer/BufferBackedObject.java",
"license": "apache-2.0",
"size": 3969
} | [
"java.nio.ByteBuffer"
] | import java.nio.ByteBuffer; | import java.nio.*; | [
"java.nio"
] | java.nio; | 2,324,146 |
public void serialize(Collection<Instance> instances, JsonGenerator generator) {
serialize(instances, allProperties(), generator);
} | void function(Collection<Instance> instances, JsonGenerator generator) { serialize(instances, allProperties(), generator); } | /**
* Converts the provided collection of {@link Instance}s to a {@link JsonObject} with its properties and relations.
* The method is optimized for batch loading of relations and should be used for serializing more than one instance.
*
* @param instances the provided instances to serialize
* @param generator the JSON generator where the instance is serialized
*/ | Converts the provided collection of <code>Instance</code>s to a <code>JsonObject</code> with its properties and relations. The method is optimized for batch loading of relations and should be used for serializing more than one instance | serialize | {
"repo_name": "SirmaITT/conservation-space-1.7.0",
"path": "docker/sirma-platform/platform/seip-parent/platform/rest-api/src/main/java/com/sirma/itt/seip/rest/handlers/writers/InstanceToJsonSerializer.java",
"license": "lgpl-3.0",
"size": 20086
} | [
"com.sirma.itt.seip.domain.instance.Instance",
"java.util.Collection",
"javax.json.stream.JsonGenerator"
] | import com.sirma.itt.seip.domain.instance.Instance; import java.util.Collection; import javax.json.stream.JsonGenerator; | import com.sirma.itt.seip.domain.instance.*; import java.util.*; import javax.json.stream.*; | [
"com.sirma.itt",
"java.util",
"javax.json"
] | com.sirma.itt; java.util; javax.json; | 845,441 |
public IEntity saveToMemento(IEntity memento); | IEntity function(IEntity memento); | /**
* Save the state into the memento.
* @param memento to be created if null, to be updated if not
* @return the memto to persist
*/ | Save the state into the memento | saveToMemento | {
"repo_name": "apruden/onyx",
"path": "onyx-core/src/main/java/org/obiba/onyx/core/domain/IMemento.java",
"license": "gpl-3.0",
"size": 1058
} | [
"org.obiba.core.domain.IEntity"
] | import org.obiba.core.domain.IEntity; | import org.obiba.core.domain.*; | [
"org.obiba.core"
] | org.obiba.core; | 2,237,201 |
private void notifyExecute()
{
String name = currentTask.getName();
Iterator<InitializationListener> i = initListeners.iterator();
while (i.hasNext())
i.next().onExecute(name);
} | void function() { String name = currentTask.getName(); Iterator<InitializationListener> i = initListeners.iterator(); while (i.hasNext()) i.next().onExecute(name); } | /**
* Calls the <code>onExecute</code> method of each subscriber in the
* notification set.
*/ | Calls the <code>onExecute</code> method of each subscriber in the notification set | notifyExecute | {
"repo_name": "knabar/openmicroscopy",
"path": "components/insight/SRC/org/openmicroscopy/shoola/env/init/Initializer.java",
"license": "gpl-2.0",
"size": 11164
} | [
"java.util.Iterator"
] | import java.util.Iterator; | import java.util.*; | [
"java.util"
] | java.util; | 338,067 |
public void seekToBeginning(TopicPartition... partitions) {
acquire();
try {
Collection<TopicPartition> parts = partitions.length == 0 ? this.subscriptions.assignedPartitions()
: Arrays.asList(partitions);
for (TopicPartition tp : parts) {
log.debug("Seeking to beginning of partition {}", tp);
subscriptions.needOffsetReset(tp, OffsetResetStrategy.EARLIEST);
}
} finally {
release();
}
} | void function(TopicPartition... partitions) { acquire(); try { Collection<TopicPartition> parts = partitions.length == 0 ? this.subscriptions.assignedPartitions() : Arrays.asList(partitions); for (TopicPartition tp : parts) { log.debug(STR, tp); subscriptions.needOffsetReset(tp, OffsetResetStrategy.EARLIEST); } } finally { release(); } } | /**
* Seek to the first offset for each of the given partitions
*/ | Seek to the first offset for each of the given partitions | seekToBeginning | {
"repo_name": "binn-yang/kafka",
"path": "clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java",
"license": "apache-2.0",
"size": 59647
} | [
"java.util.Arrays",
"java.util.Collection",
"org.apache.kafka.common.TopicPartition"
] | import java.util.Arrays; import java.util.Collection; import org.apache.kafka.common.TopicPartition; | import java.util.*; import org.apache.kafka.common.*; | [
"java.util",
"org.apache.kafka"
] | java.util; org.apache.kafka; | 50,306 |
@Override
public String getSelectByExampleWithBLOBsMethodName(
IntrospectedTable introspectedTable) {
StringBuilder sb = new StringBuilder();
sb.append("select");
sb.append(introspectedTable.getFullyQualifiedTable()
.getDomainObjectName());
sb.append("ByExample");
Rules rules = introspectedTable.getRules();
if (rules.generateSelectByExampleWithoutBLOBs()) {
sb.append("WithBLOBs");
}
return sb.toString();
} | String function( IntrospectedTable introspectedTable) { StringBuilder sb = new StringBuilder(); sb.append(STR); sb.append(introspectedTable.getFullyQualifiedTable() .getDomainObjectName()); sb.append(STR); Rules rules = introspectedTable.getRules(); if (rules.generateSelectByExampleWithoutBLOBs()) { sb.append(STR); } return sb.toString(); } | /**
* 1. if this will be the only selectByExample, then the result should be
* selectByExample. 2. Else the method name should be
* selectByExampleWithBLOBs
*/ | 1. if this will be the only selectByExample, then the result should be selectByExample. 2. Else the method name should be selectByExampleWithBLOBs | getSelectByExampleWithBLOBsMethodName | {
"repo_name": "solmix/datax",
"path": "generator/core/src/main/java/org/solmix/generator/internal/ExtendedDAOMethodNameCalculator.java",
"license": "lgpl-2.1",
"size": 8769
} | [
"org.solmix.generator.api.IntrospectedTable",
"org.solmix.generator.internal.rules.Rules"
] | import org.solmix.generator.api.IntrospectedTable; import org.solmix.generator.internal.rules.Rules; | import org.solmix.generator.api.*; import org.solmix.generator.internal.rules.*; | [
"org.solmix.generator"
] | org.solmix.generator; | 885,898 |
private void processFlowUpdate(Flow flow) {
final String flowId = flow.getFlowId();
ImmutablePair<Flow, Flow> cachedFlow = flowCache.getFlow(flowId);
//check whether flow path was changed
if (!flowPathWasChanges(cachedFlow.getLeft(), flow)) {
Set<PathNode> affectedNodes = getAffectedNodes(cachedFlow.getLeft().getFlowPath().getPath(),
flow.getFlowPath().getPath());
logger.debug("Saving flow {} new path for possibility to rollback it", flow.getFlowId());
affectedNodes.stream()
.map(PathNode::getSwitchId)
//we need to store only deactivated switches, so when they come up again we will be able
//to try to reroute flows through this switches
.filter(switchId -> !networkCache.switchIsOperable(switchId))
.forEach(switchId -> {
Set<String> flows = reroutedFlows.get(switchId);
if (CollectionUtils.isEmpty(flows)) {
reroutedFlows.put(switchId, Sets.newHashSet(flowId));
} else {
flows.add(flowId);
}
});
}
} | void function(Flow flow) { final String flowId = flow.getFlowId(); ImmutablePair<Flow, Flow> cachedFlow = flowCache.getFlow(flowId); if (!flowPathWasChanges(cachedFlow.getLeft(), flow)) { Set<PathNode> affectedNodes = getAffectedNodes(cachedFlow.getLeft().getFlowPath().getPath(), flow.getFlowPath().getPath()); logger.debug(STR, flow.getFlowId()); affectedNodes.stream() .map(PathNode::getSwitchId) .filter(switchId -> !networkCache.switchIsOperable(switchId)) .forEach(switchId -> { Set<String> flows = reroutedFlows.get(switchId); if (CollectionUtils.isEmpty(flows)) { reroutedFlows.put(switchId, Sets.newHashSet(flowId)); } else { flows.add(flowId); } }); } } | /**
* Checks whether flow path was changed. If so we store switches through which flow was built.
* @param flow flow to be processed
*/ | Checks whether flow path was changed. If so we store switches through which flow was built | processFlowUpdate | {
"repo_name": "nikitamarchenko/open-kilda",
"path": "services/wfm/src/main/java/org/openkilda/wfm/topology/cache/CacheBolt.java",
"license": "apache-2.0",
"size": 27329
} | [
"com.google.common.collect.Sets",
"java.util.Set",
"org.apache.commons.collections.CollectionUtils",
"org.openkilda.messaging.info.event.PathNode",
"org.openkilda.messaging.model.Flow",
"org.openkilda.messaging.model.ImmutablePair"
] | import com.google.common.collect.Sets; import java.util.Set; import org.apache.commons.collections.CollectionUtils; import org.openkilda.messaging.info.event.PathNode; import org.openkilda.messaging.model.Flow; import org.openkilda.messaging.model.ImmutablePair; | import com.google.common.collect.*; import java.util.*; import org.apache.commons.collections.*; import org.openkilda.messaging.info.event.*; import org.openkilda.messaging.model.*; | [
"com.google.common",
"java.util",
"org.apache.commons",
"org.openkilda.messaging"
] | com.google.common; java.util; org.apache.commons; org.openkilda.messaging; | 1,536,430 |
public HostService host() {
return hostService;
} | HostService function() { return hostService; } | /**
* Returns a reference to the host service.
*
* @return host service reference
*/ | Returns a reference to the host service | host | {
"repo_name": "opennetworkinglab/onos",
"path": "web/gui/src/main/java/org/onosproject/ui/impl/topo/util/ServicesBundle.java",
"license": "apache-2.0",
"size": 5813
} | [
"org.onosproject.net.host.HostService"
] | import org.onosproject.net.host.HostService; | import org.onosproject.net.host.*; | [
"org.onosproject.net"
] | org.onosproject.net; | 723,470 |
@Test
public void testGetDataNodesForGenomes() throws Exception {
assertEquals(tree.getDataNodesForGenomes(genomeIDs, 0).size(), 1);
assertEquals(tree.getDataNodesForGenomes(genomeIDs, 1).size(), 3);
for (int i = 1; i < 5; i++) {
Set<DataNode> testArray = tree.getDataNodesForGenomes(genomeIDs, i);
assertEquals(testArray.size(), 3);
for (DataNode node : testArray) {
assertEquals(node.getStrands().size(), 1);
}
}
} | void function() throws Exception { assertEquals(tree.getDataNodesForGenomes(genomeIDs, 0).size(), 1); assertEquals(tree.getDataNodesForGenomes(genomeIDs, 1).size(), 3); for (int i = 1; i < 5; i++) { Set<DataNode> testArray = tree.getDataNodesForGenomes(genomeIDs, i); assertEquals(testArray.size(), 3); for (DataNode node : testArray) { assertEquals(node.getStrands().size(), 1); } } } | /**
* Test if nodes are returned multiple genomes, and test if level works.
*
* @throws Exception if fail.
*/ | Test if nodes are returned multiple genomes, and test if level works | testGetDataNodesForGenomes | {
"repo_name": "ProgrammingLife2016/PL5-2016",
"path": "backEnd/src/test/java/datatree/DataTreeTest.java",
"license": "apache-2.0",
"size": 6289
} | [
"java.util.Set",
"org.junit.Assert"
] | import java.util.Set; import org.junit.Assert; | import java.util.*; import org.junit.*; | [
"java.util",
"org.junit"
] | java.util; org.junit; | 528,381 |
@SuppressWarnings("unchecked")
public Collection<ToolResource> getAllTools(LicenseType licenseType) throws RepositoryBackendException {
return (Collection<ToolResource>) getAllResources(licenseType, ResourceType.TOOL);
} | @SuppressWarnings(STR) Collection<ToolResource> function(LicenseType licenseType) throws RepositoryBackendException { return (Collection<ToolResource>) getAllResources(licenseType, ResourceType.TOOL); } | /**
* Get all products with a given license type
*
* @param License type
* @param userId
* @param password
* @param apiKey
* @throws RepositoryException
*/ | Get all products with a given license type | getAllTools | {
"repo_name": "ashleyrobertson/tool.lars",
"path": "client-lib/src/main/java/com/ibm/ws/repository/connections/RepositoryConnectionList.java",
"license": "apache-2.0",
"size": 29839
} | [
"com.ibm.ws.repository.common.enums.LicenseType",
"com.ibm.ws.repository.common.enums.ResourceType",
"com.ibm.ws.repository.exceptions.RepositoryBackendException",
"com.ibm.ws.repository.resources.ToolResource",
"java.util.Collection"
] | import com.ibm.ws.repository.common.enums.LicenseType; import com.ibm.ws.repository.common.enums.ResourceType; import com.ibm.ws.repository.exceptions.RepositoryBackendException; import com.ibm.ws.repository.resources.ToolResource; import java.util.Collection; | import com.ibm.ws.repository.common.enums.*; import com.ibm.ws.repository.exceptions.*; import com.ibm.ws.repository.resources.*; import java.util.*; | [
"com.ibm.ws",
"java.util"
] | com.ibm.ws; java.util; | 1,454,733 |
public Object execute(Handler handler) throws Exception {
int parallelism = Runtime.getRuntime().availableProcessors();
System.out.println("parallelism = " + parallelism);
JAI.getDefaultInstance().getTileScheduler().setParallelism(parallelism);
Dimension frameSize = getFrameSize();
int numXFrames = 1 + (product.getSceneRasterWidth() - 1) / frameSize.width;
int numYFrames = 1 + (product.getSceneRasterHeight() - 1) / frameSize.height;
Rectangle sceneRegion = new Rectangle(product.getSceneRasterWidth(), product.getSceneRasterHeight());
for (int frameY = 0; frameY < numYFrames; frameY++) {
for (int frameX = 0; frameX < numXFrames; frameX++) {
Rectangle frameRegion = new Rectangle(frameX * frameSize.width,
frameY * frameSize.height,
frameSize.width,
frameSize.height).intersection(sceneRegion);
int numBands = product.getNumBands();
Band[] bandArray = new Band[numBands];
ProductData[] dataArray = new ProductData[numBands];
for (int b = 0; b < numBands; b++) {
Band band = product.getBandAt(b);
PlanarImage planarImage = band.getSourceImage();
Point[] indices = planarImage.getTileIndices(null);
System.out.println("indices = " + indices.length);
TileRequest tileRequest = planarImage.queueTiles(indices);
Raster raster = planarImage.getData();
System.out.println("raster = " + raster);
ProductData data = band.createCompatibleRasterData(frameRegion.width, frameRegion.height);
band.readRasterData(frameRegion.x, frameRegion.y, frameRegion.width, frameRegion.height, data);
bandArray[b] = band;
dataArray[b] = data;
}
MyFrame frame = new MyFrame(frameRegion, bandArray, dataArray);
handler.frameComputed(frame);
}
}
return new Object();
} | Object function(Handler handler) throws Exception { int parallelism = Runtime.getRuntime().availableProcessors(); System.out.println(STR + parallelism); JAI.getDefaultInstance().getTileScheduler().setParallelism(parallelism); Dimension frameSize = getFrameSize(); int numXFrames = 1 + (product.getSceneRasterWidth() - 1) / frameSize.width; int numYFrames = 1 + (product.getSceneRasterHeight() - 1) / frameSize.height; Rectangle sceneRegion = new Rectangle(product.getSceneRasterWidth(), product.getSceneRasterHeight()); for (int frameY = 0; frameY < numYFrames; frameY++) { for (int frameX = 0; frameX < numXFrames; frameX++) { Rectangle frameRegion = new Rectangle(frameX * frameSize.width, frameY * frameSize.height, frameSize.width, frameSize.height).intersection(sceneRegion); int numBands = product.getNumBands(); Band[] bandArray = new Band[numBands]; ProductData[] dataArray = new ProductData[numBands]; for (int b = 0; b < numBands; b++) { Band band = product.getBandAt(b); PlanarImage planarImage = band.getSourceImage(); Point[] indices = planarImage.getTileIndices(null); System.out.println(STR + indices.length); TileRequest tileRequest = planarImage.queueTiles(indices); Raster raster = planarImage.getData(); System.out.println(STR + raster); ProductData data = band.createCompatibleRasterData(frameRegion.width, frameRegion.height); band.readRasterData(frameRegion.x, frameRegion.y, frameRegion.width, frameRegion.height, data); bandArray[b] = band; dataArray[b] = data; } MyFrame frame = new MyFrame(frameRegion, bandArray, dataArray); handler.frameComputed(frame); } } return new Object(); } | /**
* Calls the given handler for all frames of the product the operator executor product.
*/ | Calls the given handler for all frames of the product the operator executor product | execute | {
"repo_name": "arraydev/snap-engine",
"path": "snap-gpf/src/main/java/org/esa/snap/framework/gpf/experimental/OperatorExecutor2.java",
"license": "gpl-3.0",
"size": 5900
} | [
"java.awt.Dimension",
"java.awt.Point",
"java.awt.Rectangle",
"java.awt.image.Raster",
"javax.media.jai.JAI",
"javax.media.jai.PlanarImage",
"javax.media.jai.TileRequest",
"org.esa.snap.framework.datamodel.Band",
"org.esa.snap.framework.datamodel.ProductData"
] | import java.awt.Dimension; import java.awt.Point; import java.awt.Rectangle; import java.awt.image.Raster; import javax.media.jai.JAI; import javax.media.jai.PlanarImage; import javax.media.jai.TileRequest; import org.esa.snap.framework.datamodel.Band; import org.esa.snap.framework.datamodel.ProductData; | import java.awt.*; import java.awt.image.*; import javax.media.jai.*; import org.esa.snap.framework.datamodel.*; | [
"java.awt",
"javax.media",
"org.esa.snap"
] | java.awt; javax.media; org.esa.snap; | 2,148,656 |
public JSONObject connectAndQueryWithGet(ApiQuery query) {
try {
HttpResponse<JsonNode> queryForResponse = Unirest.get(url)
.header("Accept", "application/json")
.header("Authorization", "Bearer " + apiKey)
.queryString(query.getParams())
.asJson();
return queryForResponse.getBody().getObject();
} catch (UnirestException e) {
e.printStackTrace();
}
return null;
} | JSONObject function(ApiQuery query) { try { HttpResponse<JsonNode> queryForResponse = Unirest.get(url) .header(STR, STR) .header(STR, STR + apiKey) .queryString(query.getParams()) .asJson(); return queryForResponse.getBody().getObject(); } catch (UnirestException e) { e.printStackTrace(); } return null; } | /**
* Used to send a GET request to the Paystack API
*
* @param query - APIQuery containing parameters to send
* @return - JSONObject containing API response
*/ | Used to send a GET request to the Paystack API | connectAndQueryWithGet | {
"repo_name": "SeunAdelekan/PaystackJava",
"path": "src/me/iyanuadelekan/paystackjava/core/ApiConnection.java",
"license": "mit",
"size": 7392
} | [
"com.mashape.unirest.http.HttpResponse",
"com.mashape.unirest.http.JsonNode",
"com.mashape.unirest.http.Unirest",
"com.mashape.unirest.http.exceptions.UnirestException",
"org.json.JSONObject"
] | import com.mashape.unirest.http.HttpResponse; import com.mashape.unirest.http.JsonNode; import com.mashape.unirest.http.Unirest; import com.mashape.unirest.http.exceptions.UnirestException; import org.json.JSONObject; | import com.mashape.unirest.http.*; import com.mashape.unirest.http.exceptions.*; import org.json.*; | [
"com.mashape.unirest",
"org.json"
] | com.mashape.unirest; org.json; | 1,921,797 |
protected static Bitmap createBitmapAndGcIfNecessary(int width, int height) {
try {
return Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
} catch (OutOfMemoryError e) {
System.gc();
return Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
}
} | static Bitmap function(int width, int height) { try { return Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888); } catch (OutOfMemoryError e) { System.gc(); return Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888); } } | /**
* Creates a bitmap with the given width and height.
* <p/>
* If it fails with an OutOfMemory error, it will force a GC and then try to create the bitmap
* one more time.
*
* @param width width of the bitmap
* @param height height of the bitmap
*/ | Creates a bitmap with the given width and height. If it fails with an OutOfMemory error, it will force a GC and then try to create the bitmap one more time | createBitmapAndGcIfNecessary | {
"repo_name": "anton46/shimmer-android",
"path": "shimmer-android/src/main/java/com/facebook/shimmer/ShimmerFrameLayout.java",
"license": "bsd-2-clause",
"size": 29314
} | [
"android.graphics.Bitmap"
] | import android.graphics.Bitmap; | import android.graphics.*; | [
"android.graphics"
] | android.graphics; | 782,269 |
private synchronized void initConnection () throws Exception {
for (int i = 0; i < this.topology.size(); i ++) {
for (int j = 0; j < this.connectRetries; j ++) {
try {
this.cluster =
Cluster.builder()
.addContactPoint(this.topology.get(i).getIp() )
.withQueryOptions(
new QueryOptions().setFetchSize(this.fetchSize) )
.build();
this.cluster.init();
this.session = this.cluster.connect();
reconnectFlag.set(false);
return;
} catch (Exception e) {
if (i == (this.topology.size() - 1) &&
j == (this.connectRetries - 1) ) {
reconnectFlag.set(false);
throw e;
}
}
}
}
} | synchronized void function () throws Exception { for (int i = 0; i < this.topology.size(); i ++) { for (int j = 0; j < this.connectRetries; j ++) { try { this.cluster = Cluster.builder() .addContactPoint(this.topology.get(i).getIp() ) .withQueryOptions( new QueryOptions().setFetchSize(this.fetchSize) ) .build(); this.cluster.init(); this.session = this.cluster.connect(); reconnectFlag.set(false); return; } catch (Exception e) { if (i == (this.topology.size() - 1) && j == (this.connectRetries - 1) ) { reconnectFlag.set(false); throw e; } } } } } | /**
* initConnection
* tries connect to one of cassandra instances defined in CassandraProperties
* @throws Exception
*/ | initConnection tries connect to one of cassandra instances defined in CassandraProperties | initConnection | {
"repo_name": "vangav/vos_backend",
"path": "src/com/vangav/backend/cassandra/Cassandra.java",
"license": "mit",
"size": 20196
} | [
"com.datastax.driver.core.Cluster",
"com.datastax.driver.core.QueryOptions"
] | import com.datastax.driver.core.Cluster; import com.datastax.driver.core.QueryOptions; | import com.datastax.driver.core.*; | [
"com.datastax.driver"
] | com.datastax.driver; | 159,211 |
public String getIdentifier()
{
return ID3v24Frames.FRAME_ID_COMPOSER;
} | String function() { return ID3v24Frames.FRAME_ID_COMPOSER; } | /**
* The ID3v2 frame identifier
*
* @return the ID3v2 frame identifier for this frame type
*/ | The ID3v2 frame identifier | getIdentifier | {
"repo_name": "nhminus/jaudiotagger-androidpatch",
"path": "src/org/jaudiotagger/tag/id3/framebody/FrameBodyTCOM.java",
"license": "lgpl-2.1",
"size": 2538
} | [
"org.jaudiotagger.tag.id3.ID3v24Frames"
] | import org.jaudiotagger.tag.id3.ID3v24Frames; | import org.jaudiotagger.tag.id3.*; | [
"org.jaudiotagger.tag"
] | org.jaudiotagger.tag; | 1,493,083 |
public void send(byte[] data, int length, InetAddress host) throws IOException {
send(data, length, host, DEFAULT_PORT);
} | void function(byte[] data, int length, InetAddress host) throws IOException { send(data, length, host, DEFAULT_PORT); } | /***
* Same as
* <code>send(data, length, host. DiscardUDPClient.DEFAULT_PORT)</code>.
***/ | Same as <code>send(data, length, host. DiscardUDPClient.DEFAULT_PORT)</code> | send | {
"repo_name": "jreadstone/zsyproject",
"path": "src/org/g4studio/core/net/DiscardUDPClient.java",
"license": "gpl-2.0",
"size": 2162
} | [
"java.io.IOException",
"java.net.InetAddress"
] | import java.io.IOException; import java.net.InetAddress; | import java.io.*; import java.net.*; | [
"java.io",
"java.net"
] | java.io; java.net; | 2,362,427 |
public StructuredName getStructuredName(String bareAddress) {
return names.get(bareAddress);
} | StructuredName function(String bareAddress) { return names.get(bareAddress); } | /**
* Get uses's name information.
*
* @param bareAddress
* @return <code>null</code> if there is no info.
*/ | Get uses's name information | getStructuredName | {
"repo_name": "bigbugbb/iTracker",
"path": "app/src/main/java/com/itracker/android/data/extension/vcard/VCardManager.java",
"license": "apache-2.0",
"size": 15261
} | [
"com.itracker.android.data.roster.StructuredName"
] | import com.itracker.android.data.roster.StructuredName; | import com.itracker.android.data.roster.*; | [
"com.itracker.android"
] | com.itracker.android; | 2,628,170 |
@Override
public void shutdown() {
if (gdb != null) {
final GraphDatabaseService db = gdb;
engine = null;
gdb = null;
db.shutdown();
}
} | void function() { if (gdb != null) { final GraphDatabaseService db = gdb; engine = null; gdb = null; db.shutdown(); } } | /**
* Shutdown the embedded database server.
*
* @author manbaum
* @since Oct 12, 2014
* @see com.dnw.neo.NeoAccessor#shutdown()
*/ | Shutdown the embedded database server | shutdown | {
"repo_name": "manbaum/dnw-depmap",
"path": "src/main/java/com/dnw/neo/EmbeddedNeoAccessor.java",
"license": "epl-1.0",
"size": 3756
} | [
"org.neo4j.graphdb.GraphDatabaseService"
] | import org.neo4j.graphdb.GraphDatabaseService; | import org.neo4j.graphdb.*; | [
"org.neo4j.graphdb"
] | org.neo4j.graphdb; | 318,313 |
private boolean checkPlayServices() {
int status = GooglePlayServicesUtil.isGooglePlayServicesAvailable(this);
if (status != ConnectionResult.SUCCESS) {
if (GooglePlayServicesUtil.isUserRecoverableError(status)) {
showErrorDialog(status);
} else {
Toast.makeText(this, "This device is not supported.",
Toast.LENGTH_LONG).show();
finish();
}
return false;
}
return true;
} | boolean function() { int status = GooglePlayServicesUtil.isGooglePlayServicesAvailable(this); if (status != ConnectionResult.SUCCESS) { if (GooglePlayServicesUtil.isUserRecoverableError(status)) { showErrorDialog(status); } else { Toast.makeText(this, STR, Toast.LENGTH_LONG).show(); finish(); } return false; } return true; } | /**
* Checks if the GooglePlayServices is installed
* @return true if playServices is installed or false if note
*/ | Checks if the GooglePlayServices is installed | checkPlayServices | {
"repo_name": "TutenStain/Plingnote",
"path": "src/com/plingnote/main/ActivityMain.java",
"license": "gpl-3.0",
"size": 8315
} | [
"android.widget.Toast",
"com.google.android.gms.common.ConnectionResult",
"com.google.android.gms.common.GooglePlayServicesUtil"
] | import android.widget.Toast; import com.google.android.gms.common.ConnectionResult; import com.google.android.gms.common.GooglePlayServicesUtil; | import android.widget.*; import com.google.android.gms.common.*; | [
"android.widget",
"com.google.android"
] | android.widget; com.google.android; | 1,588,119 |
private List<CatalogEntry> getOwnedEntries(UserRequest ureq) {
if (ureq.getUserSession().getRoles().isOLATAdmin()) {
return (List<CatalogEntry>) CatalogManager.getInstance().getRootCatalogEntries();
} else {
return (List<CatalogEntry>) CatalogManager.getInstance().getCatalogEntriesOwnedBy(ureq.getIdentity());
}
} | List<CatalogEntry> function(UserRequest ureq) { if (ureq.getUserSession().getRoles().isOLATAdmin()) { return (List<CatalogEntry>) CatalogManager.getInstance().getRootCatalogEntries(); } else { return (List<CatalogEntry>) CatalogManager.getInstance().getCatalogEntriesOwnedBy(ureq.getIdentity()); } } | /**
* Internal helper method to get list of catalog entries where current user is
* in the owner group
*
* @param ureq
* @return List of repo entries
*/ | Internal helper method to get list of catalog entries where current user is in the owner group | getOwnedEntries | {
"repo_name": "stevenhva/InfoLearn_OpenOLAT",
"path": "src/main/java/org/olat/catalog/ui/CatalogEntryMoveController.java",
"license": "apache-2.0",
"size": 4854
} | [
"java.util.List",
"org.olat.catalog.CatalogEntry",
"org.olat.catalog.CatalogManager",
"org.olat.core.gui.UserRequest"
] | import java.util.List; import org.olat.catalog.CatalogEntry; import org.olat.catalog.CatalogManager; import org.olat.core.gui.UserRequest; | import java.util.*; import org.olat.catalog.*; import org.olat.core.gui.*; | [
"java.util",
"org.olat.catalog",
"org.olat.core"
] | java.util; org.olat.catalog; org.olat.core; | 1,516,560 |
public Map<String, String> getDescriptions(); | Map<String, String> function(); | /**
* Returns the description key of the plugin.
*/ | Returns the description key of the plugin | getDescriptions | {
"repo_name": "rejeep/apes",
"path": "src/apes/interfaces/Plugin.java",
"license": "gpl-3.0",
"size": 365
} | [
"java.util.Map"
] | import java.util.Map; | import java.util.*; | [
"java.util"
] | java.util; | 1,404,587 |
public synchronized boolean cancel(Task p) {
LOGGER.log(Level.FINE, "Cancelling {0}", p);
for (Iterator<WaitingItem> itr = waitingList.iterator(); itr.hasNext();) {
Item item = itr.next();
if (item.task.equals(p)) {
itr.remove();
item.onCancelled();
return true;
}
}
// use bitwise-OR to make sure that both branches get evaluated all the time
return blockedProjects.cancel(p)!=null | buildables.cancel(p)!=null;
} | synchronized boolean function(Task p) { LOGGER.log(Level.FINE, STR, p); for (Iterator<WaitingItem> itr = waitingList.iterator(); itr.hasNext();) { Item item = itr.next(); if (item.task.equals(p)) { itr.remove(); item.onCancelled(); return true; } } return blockedProjects.cancel(p)!=null buildables.cancel(p)!=null; } | /**
* Cancels the item in the queue. If the item is scheduled more than once, cancels the first occurrence.
*
* @return true if the project was indeed in the queue and was removed.
* false if this was no-op.
*/ | Cancels the item in the queue. If the item is scheduled more than once, cancels the first occurrence | cancel | {
"repo_name": "mpeltonen/jenkins",
"path": "core/src/main/java/hudson/model/Queue.java",
"license": "mit",
"size": 65714
} | [
"java.util.Iterator",
"java.util.logging.Level"
] | import java.util.Iterator; import java.util.logging.Level; | import java.util.*; import java.util.logging.*; | [
"java.util"
] | java.util; | 1,542,375 |
public VpnConnectionInner withRemoteVpnSite(SubResource remoteVpnSite) {
this.remoteVpnSite = remoteVpnSite;
return this;
} | VpnConnectionInner function(SubResource remoteVpnSite) { this.remoteVpnSite = remoteVpnSite; return this; } | /**
* Set id of the connected vpn site.
*
* @param remoteVpnSite the remoteVpnSite value to set
* @return the VpnConnectionInner object itself.
*/ | Set id of the connected vpn site | withRemoteVpnSite | {
"repo_name": "selvasingh/azure-sdk-for-java",
"path": "sdk/network/mgmt-v2018_12_01/src/main/java/com/microsoft/azure/management/network/v2018_12_01/implementation/VpnConnectionInner.java",
"license": "mit",
"size": 11212
} | [
"com.microsoft.azure.SubResource"
] | import com.microsoft.azure.SubResource; | import com.microsoft.azure.*; | [
"com.microsoft.azure"
] | com.microsoft.azure; | 891,787 |
public Options useSpmdForXlaPartitioning(Boolean useSpmdForXlaPartitioning) {
this.useSpmdForXlaPartitioning = useSpmdForXlaPartitioning;
return this;
}
}
@OpInputsMetadata(
outputsClass = ReplicateMetadata.class
)
public static class Inputs extends RawOpInputs<ReplicateMetadata> {
public final long numReplicas;
public final long numCoresPerReplica;
public final String topology;
public final boolean useTpu;
public final long[] deviceAssignment;
public final long[] computationShape;
public final String[] hostComputeCore;
public final String[] paddingMap;
public final String stepMarkerLocation;
public final boolean allowSoftPlacement;
public final boolean useSpmdForXlaPartitioning;
public Inputs(GraphOperation op) {
super(new ReplicateMetadata(op), op, Arrays.asList("num_replicas", "num_cores_per_replica", "topology", "use_tpu", "device_assignment", "computation_shape", "host_compute_core", "padding_map", "step_marker_location", "allow_soft_placement", "use_spmd_for_xla_partitioning"));
int inputIndex = 0;
numReplicas = op.attributes().getAttrInt("num_replicas");
numCoresPerReplica = op.attributes().getAttrInt("num_cores_per_replica");
topology = op.attributes().getAttrString("topology");
useTpu = op.attributes().getAttrBool("use_tpu");
deviceAssignment = op.attributes().getAttrIntList("device_assignment");
computationShape = op.attributes().getAttrIntList("computation_shape");
hostComputeCore = op.attributes().getAttrStringList("host_compute_core");
paddingMap = op.attributes().getAttrStringList("padding_map");
stepMarkerLocation = op.attributes().getAttrString("step_marker_location");
allowSoftPlacement = op.attributes().getAttrBool("allow_soft_placement");
useSpmdForXlaPartitioning = op.attributes().getAttrBool("use_spmd_for_xla_partitioning");
}
} | Options function(Boolean useSpmdForXlaPartitioning) { this.useSpmdForXlaPartitioning = useSpmdForXlaPartitioning; return this; } } @OpInputsMetadata( outputsClass = ReplicateMetadata.class ) static class Inputs extends RawOpInputs<ReplicateMetadata> { public final long numReplicas; public final long numCoresPerReplica; public final String topology; public final boolean useTpu; public final long[] deviceAssignment; public final long[] computationShape; public final String[] hostComputeCore; public final String[] paddingMap; public final String stepMarkerLocation; public final boolean allowSoftPlacement; public final boolean function; Inputs(GraphOperation op) { super(new ReplicateMetadata(op), op, Arrays.asList(STR, STR, STR, STR, STR, STR, STR, STR, STR, STR, STR)); int inputIndex = 0; numReplicas = op.attributes().getAttrInt(STR); numCoresPerReplica = op.attributes().getAttrInt(STR); topology = op.attributes().getAttrString(STR); useTpu = op.attributes().getAttrBool(STR); deviceAssignment = op.attributes().getAttrIntList(STR); computationShape = op.attributes().getAttrIntList(STR); hostComputeCore = op.attributes().getAttrStringList(STR); paddingMap = op.attributes().getAttrStringList(STR); stepMarkerLocation = op.attributes().getAttrString(STR); allowSoftPlacement = op.attributes().getAttrBool(STR); function = op.attributes().getAttrBool(STR); } } | /**
* Sets the useSpmdForXlaPartitioning option.
*
* @param useSpmdForXlaPartitioning the useSpmdForXlaPartitioning option
* @return this Options instance.
*/ | Sets the useSpmdForXlaPartitioning option | useSpmdForXlaPartitioning | {
"repo_name": "tensorflow/java",
"path": "tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicateMetadata.java",
"license": "apache-2.0",
"size": 15971
} | [
"java.util.Arrays",
"org.tensorflow.GraphOperation",
"org.tensorflow.op.RawOpInputs",
"org.tensorflow.op.annotation.OpInputsMetadata"
] | import java.util.Arrays; import org.tensorflow.GraphOperation; import org.tensorflow.op.RawOpInputs; import org.tensorflow.op.annotation.OpInputsMetadata; | import java.util.*; import org.tensorflow.*; import org.tensorflow.op.*; import org.tensorflow.op.annotation.*; | [
"java.util",
"org.tensorflow",
"org.tensorflow.op"
] | java.util; org.tensorflow; org.tensorflow.op; | 888,528 |
private Project doLoad(String filename)
throws OpenException, InterruptedException {
URL url = TestZargoFilePersister.class.getResource(filename);
assertTrue("Unintended failure: resource to be tested is not found: "
+ filename + ", converted to URL: " + url, url != null);
ZargoFilePersister persister = new ZargoFilePersister();
String name = url.getFile();
Project p = persister.doLoad(new File(name));
return p;
}
| Project function(String filename) throws OpenException, InterruptedException { URL url = TestZargoFilePersister.class.getResource(filename); assertTrue(STR + filename + STR + url, url != null); ZargoFilePersister persister = new ZargoFilePersister(); String name = url.getFile(); Project p = persister.doLoad(new File(name)); return p; } | /**
* Tests that a project is loadable.
*
* @param filename of the project file to load
* @throws OpenException if something goes wrong.
*/ | Tests that a project is loadable | doLoad | {
"repo_name": "ckaestne/LEADT",
"path": "workspace/argouml_critics/argouml-app/tests/org/argouml/persistence/TestZargoFilePersister.java",
"license": "gpl-3.0",
"size": 8822
} | [
"java.io.File",
"org.argouml.kernel.Project"
] | import java.io.File; import org.argouml.kernel.Project; | import java.io.*; import org.argouml.kernel.*; | [
"java.io",
"org.argouml.kernel"
] | java.io; org.argouml.kernel; | 2,052,321 |
public static void centerWindow(final Window window) {
window.setLocationRelativeTo(null);
}
public static enum Standard {
QQVGA (160, 120, 4, 3),
HQVGA (240, 160, 3, 2),
QVGA (320, 240, 4, 3),
WQVGA (480, 270, 16, 9),
MCGA (320, 200, 16, 10),
VGA (640, 480, 4, 3),
SVGA (800, 600, 4, 3),
IBM_8514 (1024, 768, 4, 3),
XGA (1024, 768, 4, 3),
XGAP (1152, 864, 4, 3),
HD (1360, 768, 16, 9),
WXGA (1280, 800, 16, 10),
SXGA (1280, 1024, 5, 4),
SXGAP (1280, 1024, 5, 4),
WXGAP (1440, 900, 16, 10),
HDP (1600, 900, 16, 9),
UXGA (1600, 1200, 4, 3),
WSXGAP (1680, 1050, 16, 10),
FHD (1920, 1080, 16, 9),
WUXGA (1920, 1200, 16, 10),
FHDP (1920, 1280, 3, 2),
TWO_K (2048, 1080, 256, 135),
QWXGA (2048, 1152, 16, 9),
QXGA (2048, 1536, 4, 3),
QHD (2160, 1440, 3, 2),
WQHD (2560, 1440, 16, 9),
WQXGA (2560, 1600, 16, 10),
QSXGA (2560, 2048, 5, 4),
QWXGAP (2880, 1800, 16, 10),
WQSXGA (3200, 2048, 25, 16),
QUXGA (3200, 2400, 4, 3),
UHD (3840, 2160, 16, 9),
QFHD (3840, 2160, 16, 9),
WQUXGA (3840, 2400, 16, 10),
FOUR_K (4096, 2160, 2, 1),
HXGA (4096, 3072, 4, 3),
WSHD (5120, 2880, 16, 9),
FIVE_K (5120, 2880, 16, 9),
WHXGA (5120, 3200, 16, 10),
HSXGA (5120, 4096, 5, 4),
WHSXGA (6400, 4096, 25, 16),
HUXGA (6400, 4800, 4, 3),
EIGHT_K_UHD (7680, 4320, 16, 9),
WHUXGA (7680, 4800, 16, 10);
private final int xPixels;
private final int yPixels;
private final int xDAR;
private final int yDAR;
private Standard(final int width, final int height, final int xDAR, final int yDAR) {
this.xPixels = width;
this.yPixels = height;
this.xDAR = xDAR;
this.yDAR = yDAR;
}
| static void function(final Window window) { window.setLocationRelativeTo(null); } public static enum Standard { QQVGA (160, 120, 4, 3), HQVGA (240, 160, 3, 2), QVGA (320, 240, 4, 3), WQVGA (480, 270, 16, 9), MCGA (320, 200, 16, 10), VGA (640, 480, 4, 3), SVGA (800, 600, 4, 3), IBM_8514 (1024, 768, 4, 3), XGA (1024, 768, 4, 3), XGAP (1152, 864, 4, 3), HD (1360, 768, 16, 9), WXGA (1280, 800, 16, 10), SXGA (1280, 1024, 5, 4), SXGAP (1280, 1024, 5, 4), WXGAP (1440, 900, 16, 10), HDP (1600, 900, 16, 9), UXGA (1600, 1200, 4, 3), WSXGAP (1680, 1050, 16, 10), FHD (1920, 1080, 16, 9), WUXGA (1920, 1200, 16, 10), FHDP (1920, 1280, 3, 2), TWO_K (2048, 1080, 256, 135), QWXGA (2048, 1152, 16, 9), QXGA (2048, 1536, 4, 3), QHD (2160, 1440, 3, 2), WQHD (2560, 1440, 16, 9), WQXGA (2560, 1600, 16, 10), QSXGA (2560, 2048, 5, 4), QWXGAP (2880, 1800, 16, 10), WQSXGA (3200, 2048, 25, 16), QUXGA (3200, 2400, 4, 3), UHD (3840, 2160, 16, 9), QFHD (3840, 2160, 16, 9), WQUXGA (3840, 2400, 16, 10), FOUR_K (4096, 2160, 2, 1), HXGA (4096, 3072, 4, 3), WSHD (5120, 2880, 16, 9), FIVE_K (5120, 2880, 16, 9), WHXGA (5120, 3200, 16, 10), HSXGA (5120, 4096, 5, 4), WHSXGA (6400, 4096, 25, 16), HUXGA (6400, 4800, 4, 3), EIGHT_K_UHD (7680, 4320, 16, 9), WHUXGA (7680, 4800, 16, 10); private final int xPixels; private final int yPixels; private final int xDAR; private final int yDAR; private Standard(final int width, final int height, final int xDAR, final int yDAR) { this.xPixels = width; this.yPixels = height; this.xDAR = xDAR; this.yDAR = yDAR; } | /**
* Center the specified <tt>Window</tt> on the screen.
*
* @param window the window to be centered.
*/ | Center the specified Window on the screen | centerWindow | {
"repo_name": "Kurt-E-Clothier/java-card-game-engine",
"path": "src/games/engine/gui/Resolution.java",
"license": "mit",
"size": 12944
} | [
"java.awt.Window"
] | import java.awt.Window; | import java.awt.*; | [
"java.awt"
] | java.awt; | 1,235,592 |
@IgniteSpiConfiguration(optional = true)
public TcpCommunicationSpi setAckSendThreshold(int ackSndThreshold) {
cfg.ackSendThreshold(ackSndThreshold);
return (TcpCommunicationSpi) this;
} | @IgniteSpiConfiguration(optional = true) TcpCommunicationSpi function(int ackSndThreshold) { cfg.ackSendThreshold(ackSndThreshold); return (TcpCommunicationSpi) this; } | /**
* Sets number of received messages per connection to node after which acknowledgment message is sent.
* <p>
* Default to {@link TcpCommunicationSpi#DFLT_ACK_SND_THRESHOLD}.
*
* @param ackSndThreshold Number of received messages after which acknowledgment is sent.
* @return {@code this} for chaining.
*/ | Sets number of received messages per connection to node after which acknowledgment message is sent. Default to <code>TcpCommunicationSpi#DFLT_ACK_SND_THRESHOLD</code> | setAckSendThreshold | {
"repo_name": "ascherbakoff/ignite",
"path": "modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/internal/TcpCommunicationConfigInitializer.java",
"license": "apache-2.0",
"size": 34275
} | [
"org.apache.ignite.spi.IgniteSpiConfiguration",
"org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi"
] | import org.apache.ignite.spi.IgniteSpiConfiguration; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; | import org.apache.ignite.spi.*; import org.apache.ignite.spi.communication.tcp.*; | [
"org.apache.ignite"
] | org.apache.ignite; | 1,094,809 |
public List<String> propagatingConnections() {
return this.propagatingConnections;
} | List<String> function() { return this.propagatingConnections; } | /**
* Get the propagatingConnections property: List of all connections that advertise to this route table.
*
* @return the propagatingConnections value.
*/ | Get the propagatingConnections property: List of all connections that advertise to this route table | propagatingConnections | {
"repo_name": "Azure/azure-sdk-for-java",
"path": "sdk/resourcemanager/azure-resourcemanager-network/src/main/java/com/azure/resourcemanager/network/fluent/models/HubRouteTableProperties.java",
"license": "mit",
"size": 3766
} | [
"java.util.List"
] | import java.util.List; | import java.util.*; | [
"java.util"
] | java.util; | 2,835,875 |
private QueryContext completedChildQc(final QueryContext parentQueryContext)
{
final QueryContext childQueryContext = newTweakedEntity();
childQueryContext.setStaleDateTime(staleDateTimeFactory.getStaleDateTime());
childQueryContext.setParent(parentQueryContext);
childQueryContext.queue();
childQueryContext.start();
childQueryContext.finish();
return childQueryContext;
} | QueryContext function(final QueryContext parentQueryContext) { final QueryContext childQueryContext = newTweakedEntity(); childQueryContext.setStaleDateTime(staleDateTimeFactory.getStaleDateTime()); childQueryContext.setParent(parentQueryContext); childQueryContext.queue(); childQueryContext.start(); childQueryContext.finish(); return childQueryContext; } | /**
* Returns a child query context in completed state.
*
* @param parentQueryContext
*/ | Returns a child query context in completed state | completedChildQc | {
"repo_name": "openfurther/further-open-core",
"path": "fqe/fqe-impl/src/test/java/edu/utah/further/fqe/impl/schedule/jobs/UTestQuerySealer.java",
"license": "apache-2.0",
"size": 10089
} | [
"edu.utah.further.fqe.ds.api.domain.QueryContext"
] | import edu.utah.further.fqe.ds.api.domain.QueryContext; | import edu.utah.further.fqe.ds.api.domain.*; | [
"edu.utah.further"
] | edu.utah.further; | 1,858,112 |
public Iterable<DProject> queryByDisplayName(java.lang.String displayName) {
return queryByField(null, DProjectMapper.Field.DISPLAYNAME.getFieldName(), displayName);
} | Iterable<DProject> function(java.lang.String displayName) { return queryByField(null, DProjectMapper.Field.DISPLAYNAME.getFieldName(), displayName); } | /**
* query-by method for field displayName
* @param displayName the specified attribute
* @return an Iterable of DProjects for the specified displayName
*/ | query-by method for field displayName | queryByDisplayName | {
"repo_name": "Wadpam/spara",
"path": "spara-war/src/main/java/com/wadpam/spara/dao/GeneratedDProjectDaoImpl.java",
"license": "gpl-3.0",
"size": 3260
} | [
"com.wadpam.spara.domain.DProject"
] | import com.wadpam.spara.domain.DProject; | import com.wadpam.spara.domain.*; | [
"com.wadpam.spara"
] | com.wadpam.spara; | 2,762,038 |
@Test
public void testCreateObjectApiError()
throws IOException {
// Prepare the mock return values before invoking the method being tested.
when(mockStorage.objects()).thenReturn(mockStorageObjects);
setupNonConflictedSuccessfulWrite();
when(mockStorageObjects.insert(
eq(BUCKET_NAME), any(StorageObject.class), any(AbstractInputStreamContent.class)))
.thenReturn(mockStorageObjectsInsert);
when(mockClientRequestHelper.getRequestHeaders(eq(mockStorageObjectsInsert)))
.thenReturn(mockHeaders);
WritableByteChannel writeChannel = gcs.create(new StorageResourceId(BUCKET_NAME, OBJECT_NAME));
assertTrue(writeChannel.isOpen());
verify(mockStorage, times(3)).objects();
verify(mockStorageObjects, times(2)).insert(
eq(BUCKET_NAME), any(StorageObject.class), any(AbstractInputStreamContent.class));
verify(mockStorageObjects).get(BUCKET_NAME, OBJECT_NAME);
verify(mockStorageObjectsGet).execute();
verify(mockStorageObjectsInsert, times(2)).setDisableGZipContent(eq(true));
verify(mockStorageObjectsInsert, times(1)).setIfGenerationMatch(eq(0L));
verify(mockStorageObjectsInsert, times(1)).setIfGenerationMatch(eq(1L));
verify(mockErrorExtractor, times(1)).itemNotFound(any(IOException.class));
verify(mockBackOff, atLeastOnce()).nextBackOffMillis();
verify(mockBackOffFactory, atLeastOnce()).newBackOff();
verify(mockHeaders, times(2)).set(startsWith("X-Goog-Upload-"), anyInt());
verify(mockClientRequestHelper).getRequestHeaders(any(Storage.Objects.Insert.class));
verify(mockClientRequestHelper).setChunkSize(any(Storage.Objects.Insert.class), anyInt());
verify(mockClientRequestHelper).setDirectUploadEnabled(eq(mockStorageObjectsInsert), eq(true));
ArgumentCaptor<Runnable> runCaptor = ArgumentCaptor.forClass(Runnable.class);
verify(mockExecutorService).execute(runCaptor.capture());
// Set up the mock Insert to throw an exception when execute() is called.
Error fakeError = new Error("Fake error");
when(mockStorageObjectsInsert.execute())
.thenThrow(fakeError);
runCaptor.getValue().run();
try {
writeChannel.close();
fail("Expected Error");
} catch (Error error) {
assertEquals(fakeError, error);
}
verify(mockStorageObjectsInsert, times(2)).execute();
} | void function() throws IOException { when(mockStorage.objects()).thenReturn(mockStorageObjects); setupNonConflictedSuccessfulWrite(); when(mockStorageObjects.insert( eq(BUCKET_NAME), any(StorageObject.class), any(AbstractInputStreamContent.class))) .thenReturn(mockStorageObjectsInsert); when(mockClientRequestHelper.getRequestHeaders(eq(mockStorageObjectsInsert))) .thenReturn(mockHeaders); WritableByteChannel writeChannel = gcs.create(new StorageResourceId(BUCKET_NAME, OBJECT_NAME)); assertTrue(writeChannel.isOpen()); verify(mockStorage, times(3)).objects(); verify(mockStorageObjects, times(2)).insert( eq(BUCKET_NAME), any(StorageObject.class), any(AbstractInputStreamContent.class)); verify(mockStorageObjects).get(BUCKET_NAME, OBJECT_NAME); verify(mockStorageObjectsGet).execute(); verify(mockStorageObjectsInsert, times(2)).setDisableGZipContent(eq(true)); verify(mockStorageObjectsInsert, times(1)).setIfGenerationMatch(eq(0L)); verify(mockStorageObjectsInsert, times(1)).setIfGenerationMatch(eq(1L)); verify(mockErrorExtractor, times(1)).itemNotFound(any(IOException.class)); verify(mockBackOff, atLeastOnce()).nextBackOffMillis(); verify(mockBackOffFactory, atLeastOnce()).newBackOff(); verify(mockHeaders, times(2)).set(startsWith(STR), anyInt()); verify(mockClientRequestHelper).getRequestHeaders(any(Storage.Objects.Insert.class)); verify(mockClientRequestHelper).setChunkSize(any(Storage.Objects.Insert.class), anyInt()); verify(mockClientRequestHelper).setDirectUploadEnabled(eq(mockStorageObjectsInsert), eq(true)); ArgumentCaptor<Runnable> runCaptor = ArgumentCaptor.forClass(Runnable.class); verify(mockExecutorService).execute(runCaptor.capture()); Error fakeError = new Error(STR); when(mockStorageObjectsInsert.execute()) .thenThrow(fakeError); runCaptor.getValue().run(); try { writeChannel.close(); fail(STR); } catch (Error error) { assertEquals(fakeError, error); } verify(mockStorageObjectsInsert, times(2)).execute(); } | /**
* Test handling of various types of Errors thrown during JSON API call for
* GoogleCloudStorage.create(2).
*/ | Test handling of various types of Errors thrown during JSON API call for GoogleCloudStorage.create(2) | testCreateObjectApiError | {
"repo_name": "ravwojdyla/bigdata-interop",
"path": "gcs/src/test/java/com/google/cloud/hadoop/gcsio/GoogleCloudStorageTest.java",
"license": "apache-2.0",
"size": 127749
} | [
"com.google.api.client.http.AbstractInputStreamContent",
"com.google.api.services.storage.Storage",
"com.google.api.services.storage.model.Objects",
"com.google.api.services.storage.model.StorageObject",
"java.io.IOException",
"java.nio.channels.WritableByteChannel",
"org.junit.Assert",
"org.mockito.ArgumentCaptor",
"org.mockito.Matchers",
"org.mockito.Mockito"
] | import com.google.api.client.http.AbstractInputStreamContent; import com.google.api.services.storage.Storage; import com.google.api.services.storage.model.Objects; import com.google.api.services.storage.model.StorageObject; import java.io.IOException; import java.nio.channels.WritableByteChannel; import org.junit.Assert; import org.mockito.ArgumentCaptor; import org.mockito.Matchers; import org.mockito.Mockito; | import com.google.api.client.http.*; import com.google.api.services.storage.*; import com.google.api.services.storage.model.*; import java.io.*; import java.nio.channels.*; import org.junit.*; import org.mockito.*; | [
"com.google.api",
"java.io",
"java.nio",
"org.junit",
"org.mockito"
] | com.google.api; java.io; java.nio; org.junit; org.mockito; | 441,775 |
public synchronized void sendIgnorePacket() throws IOException
{
SecureRandom rnd = getOrCreateSecureRND();
byte[] data = new byte[rnd.nextInt(16)];
rnd.nextBytes(data);
sendIgnorePacket(data);
} | synchronized void function() throws IOException { SecureRandom rnd = getOrCreateSecureRND(); byte[] data = new byte[rnd.nextInt(16)]; rnd.nextBytes(data); sendIgnorePacket(data); } | /**
* Send an SSH_MSG_IGNORE packet. This method will generate a random data
* attribute (length between 0 (invlusive) and 16 (exclusive) bytes,
* contents are random bytes).
* <p>
* This method must only be called once the connection is established.
*
* @throws IOException
*/ | Send an SSH_MSG_IGNORE packet. This method will generate a random data attribute (length between 0 (invlusive) and 16 (exclusive) bytes, contents are random bytes). This method must only be called once the connection is established | sendIgnorePacket | {
"repo_name": "getconsole/serialbot",
"path": "src/com/trilead/ssh2/Connection.java",
"license": "apache-2.0",
"size": 55105
} | [
"java.io.IOException",
"java.security.SecureRandom"
] | import java.io.IOException; import java.security.SecureRandom; | import java.io.*; import java.security.*; | [
"java.io",
"java.security"
] | java.io; java.security; | 2,195,675 |
private void object2View() {
Product product = new Product();
try {
product.setIdProduct(this.getDataFile().getResultSet().getString("idProduct"));
product.setProductName(this.getDataFile().getResultSet().getString("productName"));
product.setQuantity(this.getDataFile().getResultSet().getInt("quantity"));
this.object2View(product);
} catch (SQLException | NullPointerException ex) {
Logger.getLogger(FraProduct.class.getName()).log(Level.SEVERE, null, ex);
}
} | void function() { Product product = new Product(); try { product.setIdProduct(this.getDataFile().getResultSet().getString(STR)); product.setProductName(this.getDataFile().getResultSet().getString(STR)); product.setQuantity(this.getDataFile().getResultSet().getInt(STR)); this.object2View(product); } catch (SQLException NullPointerException ex) { Logger.getLogger(FraProduct.class.getName()).log(Level.SEVERE, null, ex); } } | /**
* Result set -> object
*/ | Result set -> object | object2View | {
"repo_name": "jfmendozam/BillApp",
"path": "BillApp/src/billapp/view/FraProduct.java",
"license": "apache-2.0",
"size": 25170
} | [
"java.sql.SQLException",
"java.util.logging.Level",
"java.util.logging.Logger"
] | import java.sql.SQLException; import java.util.logging.Level; import java.util.logging.Logger; | import java.sql.*; import java.util.logging.*; | [
"java.sql",
"java.util"
] | java.sql; java.util; | 2,145,871 |
public interface Special {
Result apply(PrecedenceClimbingParser parser, SpecialOp op);
}
public static class Result {
final Token first;
final Token last;
final Token replacement;
public Result(Token first, Token last, Token replacement) {
this.first = first;
this.last = last;
this.replacement = replacement;
}
}
public static class Builder {
final List<Token> tokens = new ArrayList<>();
private final PrecedenceClimbingParser dummy =
new PrecedenceClimbingParser(ImmutableList.<Token>of()); | interface Special { Result function(PrecedenceClimbingParser parser, SpecialOp op); } public static class Result { final Token first; final Token last; final Token replacement; public Result(Token first, Token last, Token replacement) { this.first = first; this.last = last; this.replacement = replacement; } } public static class Builder { final List<Token> tokens = new ArrayList<>(); private final PrecedenceClimbingParser dummy = new PrecedenceClimbingParser(ImmutableList.<Token>of()); | /** Given an occurrence of this operator, identifies the range of tokens to
* be collapsed into a call of this operator, and the arguments to that
* call. */ | Given an occurrence of this operator, identifies the range of tokens to be collapsed into a call of this operator, and the arguments to that | apply | {
"repo_name": "sreev/incubator-calcite",
"path": "core/src/main/java/org/apache/calcite/util/PrecedenceClimbingParser.java",
"license": "apache-2.0",
"size": 11567
} | [
"com.google.common.collect.ImmutableList",
"java.util.ArrayList",
"java.util.List"
] | import com.google.common.collect.ImmutableList; import java.util.ArrayList; import java.util.List; | import com.google.common.collect.*; import java.util.*; | [
"com.google.common",
"java.util"
] | com.google.common; java.util; | 490,982 |
public void create(Page page, Map<String, Object> formData) {
PageDataHandler formPageDataHandler = getPageDataHandler(page.getName());
formPageDataHandler.handleParam(HandlerType.DATA_CREATE, page, formData);
int effectCount = sqlSessionTemplate.insert(
TemporaryStatementRegistry.getLastestName(page.getModuleName(), page.getName(), ACCESS_TYPE_CREATE), formData);
if (effectCount > 0) {
for (FormPage field : page.getFormFields()) {
if (field.isMultiValue()) {
((MultiValueComponent) field.getObjType()).save(page.getName(),
formData.get(page.getKeyName()).toString(), formData.get(field.getName()));
}
}
}
formPageDataHandler.handleResult(HandlerType.DATA_CREATE, page, formData);
}
| void function(Page page, Map<String, Object> formData) { PageDataHandler formPageDataHandler = getPageDataHandler(page.getName()); formPageDataHandler.handleParam(HandlerType.DATA_CREATE, page, formData); int effectCount = sqlSessionTemplate.insert( TemporaryStatementRegistry.getLastestName(page.getModuleName(), page.getName(), ACCESS_TYPE_CREATE), formData); if (effectCount > 0) { for (FormPage field : page.getFormFields()) { if (field.isMultiValue()) { ((MultiValueComponent) field.getObjType()).save(page.getName(), formData.get(page.getKeyName()).toString(), formData.get(field.getName())); } } } formPageDataHandler.handleResult(HandlerType.DATA_CREATE, page, formData); } | /**
* FormPage.PAGE_TYPE_CREATE && group.isEditable()
*
* @param page
* @param formData
*/ | FormPage.PAGE_TYPE_CREATE && group.isEditable() | create | {
"repo_name": "zdtjss/wform",
"path": "src/main/java/com/nway/platform/wform/access/FormDataAccess.java",
"license": "apache-2.0",
"size": 6265
} | [
"com.nway.platform.wform.access.handler.HandlerType",
"com.nway.platform.wform.access.handler.PageDataHandler",
"com.nway.platform.wform.access.mybatis.TemporaryStatementRegistry",
"com.nway.platform.wform.component.MultiValueComponent",
"com.nway.platform.wform.design.entity.FormPage",
"com.nway.platform.wform.design.entity.Page",
"java.util.Map"
] | import com.nway.platform.wform.access.handler.HandlerType; import com.nway.platform.wform.access.handler.PageDataHandler; import com.nway.platform.wform.access.mybatis.TemporaryStatementRegistry; import com.nway.platform.wform.component.MultiValueComponent; import com.nway.platform.wform.design.entity.FormPage; import com.nway.platform.wform.design.entity.Page; import java.util.Map; | import com.nway.platform.wform.access.handler.*; import com.nway.platform.wform.access.mybatis.*; import com.nway.platform.wform.component.*; import com.nway.platform.wform.design.entity.*; import java.util.*; | [
"com.nway.platform",
"java.util"
] | com.nway.platform; java.util; | 248,607 |
public void removeAll() {
for (Dataset dataset : datasets.values())
dataset.removeAll();
}
| void function() { for (Dataset dataset : datasets.values()) dataset.removeAll(); } | /**
* Remove all data from this data set.
*/ | Remove all data from this data set | removeAll | {
"repo_name": "JoeyLeeuwinga/Firemox",
"path": "src/main/java/net/sf/firemox/chart/ChartSets.java",
"license": "gpl-2.0",
"size": 2586
} | [
"net.sf.firemox.chart.datasets.Dataset"
] | import net.sf.firemox.chart.datasets.Dataset; | import net.sf.firemox.chart.datasets.*; | [
"net.sf.firemox"
] | net.sf.firemox; | 356,462 |
public void setReplyToAddresses(List<String> replyToAddresses) {
this.replyToAddresses = replyToAddresses;
} | void function(List<String> replyToAddresses) { this.replyToAddresses = replyToAddresses; } | /**
* List of reply-to email address(es) for the message, override it using 'CamelAwsSesReplyToAddresses' header.
*/ | List of reply-to email address(es) for the message, override it using 'CamelAwsSesReplyToAddresses' header | setReplyToAddresses | {
"repo_name": "adessaigne/camel",
"path": "components/camel-aws-ses/src/main/java/org/apache/camel/component/aws/ses/SesConfiguration.java",
"license": "apache-2.0",
"size": 6367
} | [
"java.util.List"
] | import java.util.List; | import java.util.*; | [
"java.util"
] | java.util; | 1,083,422 |
public Object read(Object template) throws DataSourceException {
throw new UnsupportedOperationException();
} | Object function(Object template) throws DataSourceException { throw new UnsupportedOperationException(); } | /**
* Read one object that matches the given template. <br>
* Used by the space for read templates with UID.<br>
*/ | Read one object that matches the given template. Used by the space for read templates with UID | read | {
"repo_name": "baboune/compass",
"path": "src/main/src/org/compass/needle/gigaspaces/datasource/CompassDataSource.java",
"license": "apache-2.0",
"size": 8715
} | [
"com.gigaspaces.datasource.DataSourceException"
] | import com.gigaspaces.datasource.DataSourceException; | import com.gigaspaces.datasource.*; | [
"com.gigaspaces.datasource"
] | com.gigaspaces.datasource; | 468,017 |
public void mutate() {
for (int count = 0; count < Neat.p_num_trait_params; count++) {
if (NeatRoutine.randfloat() > Neat.p_trait_param_mut_prob) {
params[count] += (NeatRoutine.randposneg() * NeatRoutine.randfloat()) * Neat.p_trait_mutation_power;
if (params[count] < 0)
params[count] = 0;
}
}
}
public Trait(char a) {
}
public Trait(int id, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double p8, double p9) {
params = new double[Neat.p_num_trait_params];
trait_id = id;
params[0] = p1;
params[1] = p2;
params[2] = p3;
params[3] = p4;
params[4] = p5;
params[5] = p6;
params[6] = p7;
params[7] = 0;
} | void function() { for (int count = 0; count < Neat.p_num_trait_params; count++) { if (NeatRoutine.randfloat() > Neat.p_trait_param_mut_prob) { params[count] += (NeatRoutine.randposneg() * NeatRoutine.randfloat()) * Neat.p_trait_mutation_power; if (params[count] < 0) params[count] = 0; } } } public Trait(char a) { } public Trait(int id, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double p8, double p9) { params = new double[Neat.p_num_trait_params]; trait_id = id; params[0] = p1; params[1] = p2; params[2] = p3; params[3] = p4; params[4] = p5; params[5] = p6; params[6] = p7; params[7] = 0; } | /**
* Insert the method's description here.
* Creation date: (18/01/2002 13.02.06)
*/ | Insert the method's description here. Creation date: (18/01/2002 13.02.06) | mutate | {
"repo_name": "AdaptiveComputationLab/neatfa",
"path": "runner/src/main/java/edu/unm/neat/jneat/Trait.java",
"license": "mit",
"size": 5001
} | [
"edu.unm.neat.jNeatCommon.NeatRoutine"
] | import edu.unm.neat.jNeatCommon.NeatRoutine; | import edu.unm.neat.*; | [
"edu.unm.neat"
] | edu.unm.neat; | 249,672 |
private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN =
Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
private static String convertMetatadataFileName(String oldFileName) {
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
if (matcher.matches()) {
//return the current metadata file name
return FSDataset.getMetaFileName(matcher.group(1),
Block.GRANDFATHER_GENERATION_STAMP);
}
return oldFileName;
} | static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = Pattern.compile(STR); private static String function(String oldFileName) { Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); if (matcher.matches()) { return FSDataset.getMetaFileName(matcher.group(1), Block.GRANDFATHER_GENERATION_STAMP); } return oldFileName; } | /**
* This is invoked on target file names when upgrading from pre generation
* stamp version (version -13) to correct the metatadata file name.
* @param oldFileName
* @return the new metadata file name with the default generation stamp.
*/ | This is invoked on target file names when upgrading from pre generation stamp version (version -13) to correct the metatadata file name | convertMetatadataFileName | {
"repo_name": "jchen123/hadoop-20-warehouse-fix",
"path": "src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java",
"license": "apache-2.0",
"size": 21389
} | [
"java.util.regex.Matcher",
"java.util.regex.Pattern",
"org.apache.hadoop.hdfs.protocol.Block"
] | import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.hdfs.protocol.Block; | import java.util.regex.*; import org.apache.hadoop.hdfs.protocol.*; | [
"java.util",
"org.apache.hadoop"
] | java.util; org.apache.hadoop; | 1,888,214 |
private static final DataParameter<Integer> SKELETON_VARIANT = EntityDataManager
.<Integer>createKey(TameableSkeleton.class, DataSerializers.VARINT);
private static final DataParameter<Boolean> SWINGING_ARMS = EntityDataManager
.<Boolean>createKey(TameableSkeleton.class, DataSerializers.BOOLEAN);
protected EntityAISit aiSit;
private final EntityAIAttackRangedBow aiArrowAttack = new EntityAIAttackRangedBow(this, 1.0D, 20, 15.0F);
private final EntityAIAttackMelee aiAttackOnCollide = new EntityAIAttackMelee(this, 1.2D, false) {
public void resetTask() {
super.resetTask();
TameableSkeleton.this.setSwingingArms(false);
} | static final DataParameter<Integer> SKELETON_VARIANT = EntityDataManager .<Integer>createKey(TameableSkeleton.class, DataSerializers.VARINT); private static final DataParameter<Boolean> SWINGING_ARMS = EntityDataManager .<Boolean>createKey(TameableSkeleton.class, DataSerializers.BOOLEAN); protected EntityAISit aiSit; private final EntityAIAttackRangedBow aiArrowAttack = new EntityAIAttackRangedBow(this, 1.0D, 20, 15.0F); private final EntityAIAttackMelee aiAttackOnCollide = new EntityAIAttackMelee(this, 1.2D, false) { public void function() { super.resetTask(); TameableSkeleton.this.setSwingingArms(false); } | /**
* Resets the task
*/ | Resets the task | resetTask | {
"repo_name": "EPIICTHUNDERCAT/TameableMobs",
"path": "src/main/java/com/github/epiicthundercat/tameablemobs/mobs/TameableSkeleton.java",
"license": "mit",
"size": 40155
} | [
"net.minecraft.entity.ai.EntityAIAttackMelee",
"net.minecraft.network.datasync.DataParameter",
"net.minecraft.network.datasync.DataSerializers",
"net.minecraft.network.datasync.EntityDataManager"
] | import net.minecraft.entity.ai.EntityAIAttackMelee; import net.minecraft.network.datasync.DataParameter; import net.minecraft.network.datasync.DataSerializers; import net.minecraft.network.datasync.EntityDataManager; | import net.minecraft.entity.ai.*; import net.minecraft.network.datasync.*; | [
"net.minecraft.entity",
"net.minecraft.network"
] | net.minecraft.entity; net.minecraft.network; | 919,102 |
@Test
public void testMergeTwoRegions() throws Exception {
final TableName tableName = TableName.valueOf(this.name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
List<RegionInfo> tableRegions = createTable(tableName);
RegionInfo[] regionsToMerge = new RegionInfo[2];
regionsToMerge[0] = tableRegions.get(0);
regionsToMerge[1] = tableRegions.get(1);
// collect AM metrics before test
collectAssignmentManagerMetrics();
MergeTableRegionsProcedure proc =
new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true);
long procId = procExec.submitProcedure(proc);
ProcedureTestingUtility.waitProcedure(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
assertRegionCount(tableName, initialRegionCount - 1);
assertEquals(mergeSubmittedCount + 1, mergeProcMetrics.getSubmittedCounter().getCount());
assertEquals(mergeFailedCount, mergeProcMetrics.getFailedCounter().getCount());
assertEquals(assignSubmittedCount + 1, assignProcMetrics.getSubmittedCounter().getCount());
assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount());
assertEquals(unassignSubmittedCount + 2, unassignProcMetrics.getSubmittedCounter().getCount());
assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount());
Pair<RegionInfo, RegionInfo> pair =
MetaTableAccessor.getRegionsFromMergeQualifier(UTIL.getConnection(),
proc.getMergedRegion().getRegionName());
assertTrue(pair.getFirst() != null && pair.getSecond() != null);
// Can I purge the merged regions from hbase:meta? Check that all went
// well by looking at the merged row up in hbase:meta. It should have no
// more mention of the merged regions; they are purged as last step in
// the merged regions cleanup.
UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true);
UTIL.getHBaseCluster().getMaster().getCatalogJanitor().triggerNow();
while (pair != null && pair.getFirst() != null && pair.getSecond() != null) {
pair = MetaTableAccessor.getRegionsFromMergeQualifier(UTIL.getConnection(),
proc.getMergedRegion().getRegionName());
}
} | void function() throws Exception { final TableName tableName = TableName.valueOf(this.name.getMethodName()); final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor(); List<RegionInfo> tableRegions = createTable(tableName); RegionInfo[] regionsToMerge = new RegionInfo[2]; regionsToMerge[0] = tableRegions.get(0); regionsToMerge[1] = tableRegions.get(1); collectAssignmentManagerMetrics(); MergeTableRegionsProcedure proc = new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true); long procId = procExec.submitProcedure(proc); ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); assertRegionCount(tableName, initialRegionCount - 1); assertEquals(mergeSubmittedCount + 1, mergeProcMetrics.getSubmittedCounter().getCount()); assertEquals(mergeFailedCount, mergeProcMetrics.getFailedCounter().getCount()); assertEquals(assignSubmittedCount + 1, assignProcMetrics.getSubmittedCounter().getCount()); assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount()); assertEquals(unassignSubmittedCount + 2, unassignProcMetrics.getSubmittedCounter().getCount()); assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount()); Pair<RegionInfo, RegionInfo> pair = MetaTableAccessor.getRegionsFromMergeQualifier(UTIL.getConnection(), proc.getMergedRegion().getRegionName()); assertTrue(pair.getFirst() != null && pair.getSecond() != null); UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true); UTIL.getHBaseCluster().getMaster().getCatalogJanitor().triggerNow(); while (pair != null && pair.getFirst() != null && pair.getSecond() != null) { pair = MetaTableAccessor.getRegionsFromMergeQualifier(UTIL.getConnection(), proc.getMergedRegion().getRegionName()); } } | /**
* This tests two region merges
*/ | This tests two region merges | testMergeTwoRegions | {
"repo_name": "ultratendency/hbase",
"path": "hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java",
"license": "apache-2.0",
"size": 15070
} | [
"java.util.List",
"org.apache.hadoop.hbase.MetaTableAccessor",
"org.apache.hadoop.hbase.TableName",
"org.apache.hadoop.hbase.client.RegionInfo",
"org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv",
"org.apache.hadoop.hbase.procedure2.ProcedureExecutor",
"org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility",
"org.apache.hadoop.hbase.util.Pair",
"org.junit.Assert"
] | import java.util.List; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.util.Pair; import org.junit.Assert; | import java.util.*; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.master.procedure.*; import org.apache.hadoop.hbase.procedure2.*; import org.apache.hadoop.hbase.util.*; import org.junit.*; | [
"java.util",
"org.apache.hadoop",
"org.junit"
] | java.util; org.apache.hadoop; org.junit; | 2,387,550 |
@ServiceMethod(returns = ReturnType.COLLECTION)
PagedFlux<PublicIpPrefixInner> listAsync(); | @ServiceMethod(returns = ReturnType.COLLECTION) PagedFlux<PublicIpPrefixInner> listAsync(); | /**
* Gets all the public IP prefixes in a subscription.
*
* @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return all the public IP prefixes in a subscription as paginated response with {@link PagedFlux}.
*/ | Gets all the public IP prefixes in a subscription | listAsync | {
"repo_name": "Azure/azure-sdk-for-java",
"path": "sdk/resourcemanager/azure-resourcemanager-network/src/main/java/com/azure/resourcemanager/network/fluent/PublicIpPrefixesClient.java",
"license": "mit",
"size": 24606
} | [
"com.azure.core.annotation.ReturnType",
"com.azure.core.annotation.ServiceMethod",
"com.azure.core.http.rest.PagedFlux",
"com.azure.resourcemanager.network.fluent.models.PublicIpPrefixInner"
] | import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceMethod; import com.azure.core.http.rest.PagedFlux; import com.azure.resourcemanager.network.fluent.models.PublicIpPrefixInner; | import com.azure.core.annotation.*; import com.azure.core.http.rest.*; import com.azure.resourcemanager.network.fluent.models.*; | [
"com.azure.core",
"com.azure.resourcemanager"
] | com.azure.core; com.azure.resourcemanager; | 2,452,640 |
public Observable<ServiceResponse<Page<USqlType>>> listTypesSinglePageAsync(final String accountName, final String databaseName, final String schemaName, final String filter, final Integer top, final Integer skip, final String select, final String orderby, final Boolean count) {
if (accountName == null) {
throw new IllegalArgumentException("Parameter accountName is required and cannot be null.");
}
if (this.client.adlaCatalogDnsSuffix() == null) {
throw new IllegalArgumentException("Parameter this.client.adlaCatalogDnsSuffix() is required and cannot be null.");
}
if (databaseName == null) {
throw new IllegalArgumentException("Parameter databaseName is required and cannot be null.");
}
if (schemaName == null) {
throw new IllegalArgumentException("Parameter schemaName is required and cannot be null.");
}
if (this.client.apiVersion() == null) {
throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null.");
} | Observable<ServiceResponse<Page<USqlType>>> function(final String accountName, final String databaseName, final String schemaName, final String filter, final Integer top, final Integer skip, final String select, final String orderby, final Boolean count) { if (accountName == null) { throw new IllegalArgumentException(STR); } if (this.client.adlaCatalogDnsSuffix() == null) { throw new IllegalArgumentException(STR); } if (databaseName == null) { throw new IllegalArgumentException(STR); } if (schemaName == null) { throw new IllegalArgumentException(STR); } if (this.client.apiVersion() == null) { throw new IllegalArgumentException(STR); } | /**
* Retrieves the list of types within the specified database and schema from the Data Lake Analytics catalog.
*
ServiceResponse<PageImpl<USqlType>> * @param accountName The Azure Data Lake Analytics account upon which to execute catalog operations.
ServiceResponse<PageImpl<USqlType>> * @param databaseName The name of the database containing the types.
ServiceResponse<PageImpl<USqlType>> * @param schemaName The name of the schema containing the types.
ServiceResponse<PageImpl<USqlType>> * @param filter OData filter. Optional.
ServiceResponse<PageImpl<USqlType>> * @param top The number of items to return. Optional.
ServiceResponse<PageImpl<USqlType>> * @param skip The number of items to skip over before returning elements. Optional.
ServiceResponse<PageImpl<USqlType>> * @param select OData Select statement. Limits the properties on each entry to just those requested, e.g. Categories?$select=CategoryName,Description. Optional.
ServiceResponse<PageImpl<USqlType>> * @param orderby OrderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. Optional.
ServiceResponse<PageImpl<USqlType>> * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the PagedList<USqlType> object wrapped in {@link ServiceResponse} if successful.
*/ | Retrieves the list of types within the specified database and schema from the Data Lake Analytics catalog | listTypesSinglePageAsync | {
"repo_name": "martinsawicki/azure-sdk-for-java",
"path": "azure-mgmt-datalake-analytics/src/main/java/com/microsoft/azure/management/datalake/analytics/implementation/CatalogsImpl.java",
"license": "mit",
"size": 687714
} | [
"com.microsoft.azure.Page",
"com.microsoft.azure.management.datalake.analytics.models.USqlType",
"com.microsoft.rest.ServiceResponse"
] | import com.microsoft.azure.Page; import com.microsoft.azure.management.datalake.analytics.models.USqlType; import com.microsoft.rest.ServiceResponse; | import com.microsoft.azure.*; import com.microsoft.azure.management.datalake.analytics.models.*; import com.microsoft.rest.*; | [
"com.microsoft.azure",
"com.microsoft.rest"
] | com.microsoft.azure; com.microsoft.rest; | 396,724 |
private static String serverUrl(HttpServletRequest req)
{
StringBuilder url = new StringBuilder();
url.append(req.getScheme());
url.append("://");
url.append(req.getServerName());
if (((req.getServerPort() != 80) && (!req.isSecure())) || ((req.getServerPort() != 443) && (req.isSecure())))
{
url.append(":");
url.append(req.getServerPort());
}
return url.toString();
} | static String function(HttpServletRequest req) { StringBuilder url = new StringBuilder(); url.append(req.getScheme()); url.append(STR:"); url.append(req.getServerPort()); } return url.toString(); } | /**
* This method is a duplicate of org.sakaiproject.util.web.Web.serverUrl()
* Duplicated here from org.sakaiproject.util.web.Web.java so that
* the JSF tag library doesn't have a direct jar dependency on more of Sakai.
*/ | This method is a duplicate of org.sakaiproject.util.web.Web.serverUrl() Duplicated here from org.sakaiproject.util.web.Web.java so that the JSF tag library doesn't have a direct jar dependency on more of Sakai | serverUrl | {
"repo_name": "eemirtekin/Sakai-10.6-TR",
"path": "jsf/jsf-widgets/src/java/org/sakaiproject/jsf/renderer/CourierRenderer.java",
"license": "apache-2.0",
"size": 4076
} | [
"javax.servlet.http.HttpServletRequest"
] | import javax.servlet.http.HttpServletRequest; | import javax.servlet.http.*; | [
"javax.servlet"
] | javax.servlet; | 722,855 |
// ___ ___ ___ _ _ _ _ ___ _ ___ ___ _____ ___ _ _ ___ ___
// | _ \ __/ __| | | | | /_\ | _ \ | | |_ _/ __|_ _| __| \| | __| _ \
// | / _| (_ | |_| | |__ / _ \| / | |__ | |\__ \ | | | _|| .` | _|| /
// |_|_\___\___|\___/|____/_/ \_\_|_\ |____|___|___/ |_| |___|_|\_|___|_|_\
//
@EventHandler(ignoreCancelled = true, priority = EventPriority.HIGH) //RoboMWM - High priority to allow plugins to clear the blocklist if they so choose
public void regularExplosions(EntityExplodeEvent event)
{
if (event instanceof FakeEntityExplodeEvent || !(event.getEntity() instanceof Ghast || event.getEntity() instanceof TNTPrimed))
return;
final Entity sourceEntity = event.getEntity();
final World world = event.getLocation().getWorld();
final Location location = sourceEntity.getLocation();
final boolean flyOtherPlugins = CFG.getBoolean(RootNode.EXPLOSIONS_FYLING_BLOCKS_ENABLE_OTHER, world.getName());
final boolean customGhastExplosion = CFG.getBoolean(RootNode.EXPLOSIONS_GHASTS_ENABLE, world.getName());
final boolean customTntExplosion = CFG.getBoolean(RootNode.EXPLOSIONS_TNT_ENABLE, world.getName());
final boolean multipleExplosions = CFG.getBoolean(RootNode.BETTER_TNT, world.getName());
//cancel explosion if no worldDamage should be done
final boolean tntWorldDamage = event.getLocation().getBlockY() > CFG.getInt(RootNode.EXPLOSIONS_Y, world.getName())
? CFG.getBoolean(RootNode.EXPLOSIONS_TNT_ABOVE_WORLD_GRIEF, world.getName())
: CFG.getBoolean(RootNode.EXPLOSIONS_TNT_BELOW_WORLD_GRIEF, world.getName());
// TNT
if (sourceEntity instanceof TNTPrimed)
{
if (customTntExplosion && event.blockList().size() > 0 && (flyOtherPlugins || event.getYield() == 0.25)) //getYield value of 0.25 somewhat ensures this is a vanilla TNT explosion.
{
if (!multipleExplosions)
{
CreateExplosionTask explosionTask = new CreateExplosionTask(plugin, location, ExplosionType.TNT, sourceEntity);
plugin.getServer().getScheduler().scheduleSyncDelayedTask(plugin, explosionTask, 1L);
} else //multiple explosions will also handle the custom size
{
multipleExplosions(location, sourceEntity, ExplosionType.TNT);
}
if (!tntWorldDamage && CFG.isEnabledIn(world.getName()))
event.setCancelled(true);
}
}
// GHASTS
else if (sourceEntity instanceof Fireball)
{
if (customGhastExplosion)
{
Fireball fireball = (Fireball) sourceEntity;
if (fireball.getShooter() instanceof Ghast)
{
event.setCancelled(true);
// same as vanilla TNT, plus fire
new CreateExplosionTask(plugin, sourceEntity.getLocation(), ExplosionType.GHAST_FIREBALL, sourceEntity).run();
}
}
}
} | void function(EntityExplodeEvent event) { if (event instanceof FakeEntityExplodeEvent !(event.getEntity() instanceof Ghast event.getEntity() instanceof TNTPrimed)) return; final Entity sourceEntity = event.getEntity(); final World world = event.getLocation().getWorld(); final Location location = sourceEntity.getLocation(); final boolean flyOtherPlugins = CFG.getBoolean(RootNode.EXPLOSIONS_FYLING_BLOCKS_ENABLE_OTHER, world.getName()); final boolean customGhastExplosion = CFG.getBoolean(RootNode.EXPLOSIONS_GHASTS_ENABLE, world.getName()); final boolean customTntExplosion = CFG.getBoolean(RootNode.EXPLOSIONS_TNT_ENABLE, world.getName()); final boolean multipleExplosions = CFG.getBoolean(RootNode.BETTER_TNT, world.getName()); final boolean tntWorldDamage = event.getLocation().getBlockY() > CFG.getInt(RootNode.EXPLOSIONS_Y, world.getName()) ? CFG.getBoolean(RootNode.EXPLOSIONS_TNT_ABOVE_WORLD_GRIEF, world.getName()) : CFG.getBoolean(RootNode.EXPLOSIONS_TNT_BELOW_WORLD_GRIEF, world.getName()); if (sourceEntity instanceof TNTPrimed) { if (customTntExplosion && event.blockList().size() > 0 && (flyOtherPlugins event.getYield() == 0.25)) { if (!multipleExplosions) { CreateExplosionTask explosionTask = new CreateExplosionTask(plugin, location, ExplosionType.TNT, sourceEntity); plugin.getServer().getScheduler().scheduleSyncDelayedTask(plugin, explosionTask, 1L); } else { multipleExplosions(location, sourceEntity, ExplosionType.TNT); } if (!tntWorldDamage && CFG.isEnabledIn(world.getName())) event.setCancelled(true); } } else if (sourceEntity instanceof Fireball) { if (customGhastExplosion) { Fireball fireball = (Fireball) sourceEntity; if (fireball.getShooter() instanceof Ghast) { event.setCancelled(true); new CreateExplosionTask(plugin, sourceEntity.getLocation(), ExplosionType.GHAST_FIREBALL, sourceEntity).run(); } } } } | /**
* Regular listener:
* Bigger (custom) explosions
*/ | Regular listener: Bigger (custom) explosions | regularExplosions | {
"repo_name": "MLG-Fortress/ExtraHardMode",
"path": "src/main/java/com/extrahardmode/features/Explosions.java",
"license": "agpl-3.0",
"size": 20252
} | [
"com.extrahardmode.config.ExplosionType",
"com.extrahardmode.config.RootNode",
"com.extrahardmode.events.fakeevents.FakeEntityExplodeEvent",
"com.extrahardmode.task.CreateExplosionTask",
"org.bukkit.Location",
"org.bukkit.World",
"org.bukkit.entity.Entity",
"org.bukkit.entity.Fireball",
"org.bukkit.entity.Ghast",
"org.bukkit.entity.TNTPrimed",
"org.bukkit.event.entity.EntityExplodeEvent"
] | import com.extrahardmode.config.ExplosionType; import com.extrahardmode.config.RootNode; import com.extrahardmode.events.fakeevents.FakeEntityExplodeEvent; import com.extrahardmode.task.CreateExplosionTask; import org.bukkit.Location; import org.bukkit.World; import org.bukkit.entity.Entity; import org.bukkit.entity.Fireball; import org.bukkit.entity.Ghast; import org.bukkit.entity.TNTPrimed; import org.bukkit.event.entity.EntityExplodeEvent; | import com.extrahardmode.config.*; import com.extrahardmode.events.fakeevents.*; import com.extrahardmode.task.*; import org.bukkit.*; import org.bukkit.entity.*; import org.bukkit.event.entity.*; | [
"com.extrahardmode.config",
"com.extrahardmode.events",
"com.extrahardmode.task",
"org.bukkit",
"org.bukkit.entity",
"org.bukkit.event"
] | com.extrahardmode.config; com.extrahardmode.events; com.extrahardmode.task; org.bukkit; org.bukkit.entity; org.bukkit.event; | 94,698 |
private void attachToWindow() {
if (mAttached) {
// Can happen if the view is still running
// exit animation.
mHolder.rootView.clearAnimation();
detachFromWindow();
} else {
getConfig().getTriggers().incrementLaunchCount(mHolder.context, this);
}
mAttached = true;
boolean overlapStatusBar = mShownAtTop && getConfig().isStatusBarOverlapEnabled();
mHolder.rootView.setShownOnTop(mShownAtTop);
// Define the padding
Resources res = mHolder.context.getResources();
final int paddingTop = overlapStatusBar
? 0
: res.getDimensionPixelSize(R.dimen.headsup_root_padding_top);
View v = mHolder.containerView;
v.setPadding(v.getPaddingLeft(), paddingTop, v.getPaddingRight(), v.getPaddingBottom());
v.setTranslationY(0);
// And the rotation
if (!mShownAtTop) v.setBackground(res.getDrawable(R.drawable.bg_shade_flipped));
else v.setBackground(res.getDrawable(R.drawable.bg_shade));
// Add the view to the window.
int layoutInScreenFlag = overlapStatusBar ? WindowManager.LayoutParams.FLAG_LAYOUT_IN_SCREEN : 0;
WindowManager.LayoutParams lp = new WindowManager.LayoutParams(
WindowManager.LayoutParams.MATCH_PARENT,
WindowManager.LayoutParams.WRAP_CONTENT,
WindowManager.LayoutParams.TYPE_SYSTEM_ERROR,
WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE
| WindowManager.LayoutParams.FLAG_NOT_TOUCH_MODAL
| WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH
| layoutInScreenFlag,
PixelFormat.TRANSLUCENT);
lp.gravity = (mShownAtTop ? Gravity.TOP : Gravity.BOTTOM) | Gravity.CENTER_HORIZONTAL;
mHolder.wm.addView(mHolder.rootView, lp);
} | void function() { if (mAttached) { mHolder.rootView.clearAnimation(); detachFromWindow(); } else { getConfig().getTriggers().incrementLaunchCount(mHolder.context, this); } mAttached = true; boolean overlapStatusBar = mShownAtTop && getConfig().isStatusBarOverlapEnabled(); mHolder.rootView.setShownOnTop(mShownAtTop); Resources res = mHolder.context.getResources(); final int paddingTop = overlapStatusBar ? 0 : res.getDimensionPixelSize(R.dimen.headsup_root_padding_top); View v = mHolder.containerView; v.setPadding(v.getPaddingLeft(), paddingTop, v.getPaddingRight(), v.getPaddingBottom()); v.setTranslationY(0); if (!mShownAtTop) v.setBackground(res.getDrawable(R.drawable.bg_shade_flipped)); else v.setBackground(res.getDrawable(R.drawable.bg_shade)); int layoutInScreenFlag = overlapStatusBar ? WindowManager.LayoutParams.FLAG_LAYOUT_IN_SCREEN : 0; WindowManager.LayoutParams lp = new WindowManager.LayoutParams( WindowManager.LayoutParams.MATCH_PARENT, WindowManager.LayoutParams.WRAP_CONTENT, WindowManager.LayoutParams.TYPE_SYSTEM_ERROR, WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE WindowManager.LayoutParams.FLAG_NOT_TOUCH_MODAL WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH layoutInScreenFlag, PixelFormat.TRANSLUCENT); lp.gravity = (mShownAtTop ? Gravity.TOP : Gravity.BOTTOM) Gravity.CENTER_HORIZONTAL; mHolder.wm.addView(mHolder.rootView, lp); } | /**
* Adds {@link #mHolder#rootView view} to window.
*
* @see #detachFromWindow()
*/ | Adds <code>#mHolder#rootView view</code> to window | attachToWindow | {
"repo_name": "AChep/HeadsUp",
"path": "project/app/src/main/java/com/achep/headsup/HeadsUpBase.java",
"license": "gpl-2.0",
"size": 24088
} | [
"android.content.res.Resources",
"android.graphics.PixelFormat",
"android.view.Gravity",
"android.view.View",
"android.view.WindowManager"
] | import android.content.res.Resources; import android.graphics.PixelFormat; import android.view.Gravity; import android.view.View; import android.view.WindowManager; | import android.content.res.*; import android.graphics.*; import android.view.*; | [
"android.content",
"android.graphics",
"android.view"
] | android.content; android.graphics; android.view; | 2,899,306 |
public void showButtonPress(HardwareWalletEvent event) {
// Do nothing
} | void function(HardwareWalletEvent event) { } | /**
* Handles state transition to a "button press" panel
*
* Usually this will be a "Confirm" panel following a "Request" and the
* panel will show text mirroring the Trezor
*
* Clicking a button on the device will trigger further state transitions
*
* @param event The hardware wallet event containing payload and context
*/ | Handles state transition to a "button press" panel Usually this will be a "Confirm" panel following a "Request" and the panel will show text mirroring the Trezor Clicking a button on the device will trigger further state transitions | showButtonPress | {
"repo_name": "akonring/multibit-hd-modified",
"path": "mbhd-swing/src/main/java/org/multibit/hd/ui/views/wizards/AbstractHardwareWalletWizardModel.java",
"license": "mit",
"size": 10347
} | [
"org.multibit.hd.hardware.core.events.HardwareWalletEvent"
] | import org.multibit.hd.hardware.core.events.HardwareWalletEvent; | import org.multibit.hd.hardware.core.events.*; | [
"org.multibit.hd"
] | org.multibit.hd; | 2,791,996 |
public final String getNameByValue(String value)
throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("value", value);
return (String) _valuesToNames.get(value);
} | final String function(String value) throws IllegalArgumentException { MandatoryArgumentChecker.check("value", value); return (String) _valuesToNames.get(value); } | /**
* Gets the name matching the specified value.
*
* @param value
* the value to match a corresponding name by, cannot be
* <code>null</code>.
*
* @return
* the corresponding name, or <code>null</code> if there is none.
*
* @throws IllegalArgumentException
* if <code>value == null</code>.
*/ | Gets the name matching the specified value | getNameByValue | {
"repo_name": "muloem/xins",
"path": "src/java-common/org/xins/common/types/EnumType.java",
"license": "bsd-3-clause",
"size": 6710
} | [
"org.xins.common.MandatoryArgumentChecker"
] | import org.xins.common.MandatoryArgumentChecker; | import org.xins.common.*; | [
"org.xins.common"
] | org.xins.common; | 1,725,282 |
public IAsset getCloseNodeImage() {
return m_objCloseNodeImage;
} | IAsset function() { return m_objCloseNodeImage; } | /**
* Returns the closeNodeImage.
* @return IAsset
*/ | Returns the closeNodeImage | getCloseNodeImage | {
"repo_name": "apache/tapestry3",
"path": "tapestry-contrib/src/org/apache/tapestry/contrib/tree/components/TreeNodeView.java",
"license": "apache-2.0",
"size": 13907
} | [
"org.apache.tapestry.IAsset"
] | import org.apache.tapestry.IAsset; | import org.apache.tapestry.*; | [
"org.apache.tapestry"
] | org.apache.tapestry; | 758,018 |
@Override
protected File getParent(final File from) {
if (from.getParentFile() != null) {
if (from.isFile()) {
return getParent(from.getParentFile());
} else {
return from.getParentFile();
}
} else {
return from;
}
} | File function(final File from) { if (from.getParentFile() != null) { if (from.isFile()) { return getParent(from.getParentFile()); } else { return from.getParentFile(); } } else { return from; } } | /**
* Return the path to the parent directory. Should return the root if
* from is root.
*
* @param from
*/ | Return the path to the parent directory. Should return the root if from is root | getParent | {
"repo_name": "0359xiaodong/NoNonsense-FilePicker",
"path": "library/src/main/java/com/nononsenseapps/filepicker/FilePickerFragment.java",
"license": "gpl-2.0",
"size": 6060
} | [
"java.io.File"
] | import java.io.File; | import java.io.*; | [
"java.io"
] | java.io; | 1,551,437 |
private Resource toResource(String clusterName, String alias, CredentialStoreType credentialStoreType, Set<String> requestedIds) {
Resource resource = new ResourceImpl(Type.Credential);
setResourceProperty(resource, CREDENTIAL_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds);
setResourceProperty(resource, CREDENTIAL_ALIAS_PROPERTY_ID, alias, requestedIds);
setResourceProperty(resource, CREDENTIAL_TYPE_PROPERTY_ID, credentialStoreType.name().toLowerCase(), requestedIds);
return resource;
}
private class CreateResourcesCommand implements Command<String> {
private final Map<String, Object> properties;
public CreateResourcesCommand(Map<String, Object> properties) {
this.properties = properties;
} | Resource function(String clusterName, String alias, CredentialStoreType credentialStoreType, Set<String> requestedIds) { Resource resource = new ResourceImpl(Type.Credential); setResourceProperty(resource, CREDENTIAL_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds); setResourceProperty(resource, CREDENTIAL_ALIAS_PROPERTY_ID, alias, requestedIds); setResourceProperty(resource, CREDENTIAL_TYPE_PROPERTY_ID, credentialStoreType.name().toLowerCase(), requestedIds); return resource; } private class CreateResourcesCommand implements Command<String> { private final Map<String, Object> properties; public CreateResourcesCommand(Map<String, Object> properties) { this.properties = properties; } | /**
* Creates a new resource from the given cluster name, alias, and persist values.
*
* @param clusterName a cluster name
* @param alias an alias
* @param credentialStoreType the relevant credential store type
* @param requestedIds the properties to include in the resulting resource instance
* @return a resource
*/ | Creates a new resource from the given cluster name, alias, and persist values | toResource | {
"repo_name": "zouzhberk/ambaridemo",
"path": "demo-server/src/main/java/org/apache/ambari/server/controller/internal/CredentialResourceProvider.java",
"license": "apache-2.0",
"size": 18475
} | [
"java.util.Map",
"java.util.Set",
"org.apache.ambari.server.controller.spi.Resource",
"org.apache.ambari.server.security.credential.Credential",
"org.apache.ambari.server.security.encryption.CredentialStoreType"
] | import java.util.Map; import java.util.Set; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.security.credential.Credential; import org.apache.ambari.server.security.encryption.CredentialStoreType; | import java.util.*; import org.apache.ambari.server.controller.spi.*; import org.apache.ambari.server.security.credential.*; import org.apache.ambari.server.security.encryption.*; | [
"java.util",
"org.apache.ambari"
] | java.util; org.apache.ambari; | 1,358,646 |
public FaxJobStatus[] pollForFaxJobStatues(FaxJob[] faxJobs); | FaxJobStatus[] function(FaxJob[] faxJobs); | /**
* This function polls the new statues for the provided fax jobs.
*
* @param faxJobs
* The fax jobs to poll
* @return The fax job statues
*/ | This function polls the new statues for the provided fax jobs | pollForFaxJobStatues | {
"repo_name": "ZhernakovMikhail/fax4j",
"path": "src/main/java/org/fax4j/spi/FaxClientSpi.java",
"license": "apache-2.0",
"size": 3057
} | [
"org.fax4j.FaxJob",
"org.fax4j.FaxJobStatus"
] | import org.fax4j.FaxJob; import org.fax4j.FaxJobStatus; | import org.fax4j.*; | [
"org.fax4j"
] | org.fax4j; | 537,432 |
interface WithUnknown {
WithCreate withUnknown(UnknownTarget unknown);
}
interface WithCreate extends Creatable<StorageTarget>, DefinitionStages.WithClfs, DefinitionStages.WithJunctions, DefinitionStages.WithNfs3, DefinitionStages.WithProvisioningState, DefinitionStages.WithTargetType, DefinitionStages.WithUnknown {
}
}
interface Update extends Appliable<StorageTarget>, UpdateStages.WithClfs, UpdateStages.WithJunctions, UpdateStages.WithNfs3, UpdateStages.WithProvisioningState, UpdateStages.WithTargetType, UpdateStages.WithUnknown {
} | interface WithUnknown { WithCreate withUnknown(UnknownTarget unknown); } interface WithCreate extends Creatable<StorageTarget>, DefinitionStages.WithClfs, DefinitionStages.WithJunctions, DefinitionStages.WithNfs3, DefinitionStages.WithProvisioningState, DefinitionStages.WithTargetType, DefinitionStages.WithUnknown { } } interface Update extends Appliable<StorageTarget>, UpdateStages.WithClfs, UpdateStages.WithJunctions, UpdateStages.WithNfs3, UpdateStages.WithProvisioningState, UpdateStages.WithTargetType, UpdateStages.WithUnknown { } | /**
* Specifies unknown.
* @param unknown Properties when unknown target
* @return the next definition stage
*/ | Specifies unknown | withUnknown | {
"repo_name": "selvasingh/azure-sdk-for-java",
"path": "sdk/storagecache/mgmt-v2019_08_01/src/main/java/com/microsoft/azure/management/storagecache/v2019_08_01/StorageTarget.java",
"license": "mit",
"size": 8886
} | [
"com.microsoft.azure.arm.model.Appliable",
"com.microsoft.azure.arm.model.Creatable"
] | import com.microsoft.azure.arm.model.Appliable; import com.microsoft.azure.arm.model.Creatable; | import com.microsoft.azure.arm.model.*; | [
"com.microsoft.azure"
] | com.microsoft.azure; | 332,704 |
public byte[] readFile(String fileName) throws Exception {
Log.i(TAG, "Reading " + fileName);
SecureFileInputStream fis = openFile(fileName);
UnicodeReader unicodeReader = new UnicodeReader(fis);
try {
return unicodeReader.readAll().getBytes("UTF-8");
} catch (IOException e) {
Log.e(TAG, MainApplication.getInstance().getString(R.string.error_message_error_reading, fileName), e);
}
finally {
fis.close();
unicodeReader.close();
}
return null;
}
| byte[] function(String fileName) throws Exception { Log.i(TAG, STR + fileName); SecureFileInputStream fis = openFile(fileName); UnicodeReader unicodeReader = new UnicodeReader(fis); try { return unicodeReader.readAll().getBytes("UTF-8"); } catch (IOException e) { Log.e(TAG, MainApplication.getInstance().getString(R.string.error_message_error_reading, fileName), e); } finally { fis.close(); unicodeReader.close(); } return null; } | /**
* Returns a BufferedInputStream corresponding to the data
* stored in the virtual secure filesystem as fileName.
*
* @throws FileNotFoundException if fileName does not exist in the virtual
* secure filesystem.
*/ | Returns a BufferedInputStream corresponding to the data stored in the virtual secure filesystem as fileName | readFile | {
"repo_name": "benetech/Martus-Secure-App-Generator",
"path": "SecureAppGenerator/src/main/resources/static/SecureAppBuildMaster/secure-app/src/org/benetech/secureapp/collect/io/SecureFileStorageManager.java",
"license": "gpl-2.0",
"size": 5776
} | [
"android.util.Log",
"java.io.IOException",
"org.benetech.secureapp.application.MainApplication",
"org.martus.android.library.io.SecureFileInputStream",
"org.martus.util.UnicodeReader"
] | import android.util.Log; import java.io.IOException; import org.benetech.secureapp.application.MainApplication; import org.martus.android.library.io.SecureFileInputStream; import org.martus.util.UnicodeReader; | import android.util.*; import java.io.*; import org.benetech.secureapp.application.*; import org.martus.android.library.io.*; import org.martus.util.*; | [
"android.util",
"java.io",
"org.benetech.secureapp",
"org.martus.android",
"org.martus.util"
] | android.util; java.io; org.benetech.secureapp; org.martus.android; org.martus.util; | 1,359,387 |
public void setAsciiStream(int parameterIndex, InputStream parameterValue,
int length) throws SQLException {
if (JDTrace.isTraceOn()) { // @H1A
JDTrace.logInformation(this, "setAsciiStream()"); // @H1A
if (parameterValue == null) // @H1A
JDTrace.logInformation(this, "parameter index: " + parameterIndex
+ " value: NULL"); // @H1A
else
JDTrace.logInformation(this, "parameter index: " + parameterIndex
+ " length: " + length); // @H1A
} // @H1A
// Validate the length parameter
if (length < 0)
JDError.throwSQLException(this, JDError.EXC_BUFFER_LENGTH_INVALID);
// @J0A added the code from setValue in this method because streams and
// readers are handled specially
synchronized (internalLock_) {
checkOpen();
// Validate the parameter index.
if ((parameterIndex < 1) || (parameterIndex > parameterCount_))
JDError.throwSQLException(this, JDError.EXC_DESCRIPTOR_INDEX_INVALID);
// Check if the parameter index refers to the return value parameter.
// This is an OUT parameter, so sets are not allowed. If its not
// parameter index 1, then decrement the parameter index, since we
// are "faking" the return value parameter.
if (useReturnValueParameter_) {
if (parameterIndex == 1)
JDError.throwSQLException(this, JDError.EXC_PARAMETER_TYPE_INVALID);
else
--parameterIndex;
}
// Check that the parameter is an input parameter.
if (!parameterRow_.isInput(parameterIndex))
JDError.throwSQLException(this, JDError.EXC_PARAMETER_TYPE_INVALID);
// Set the parameter data. If there is a type mismatch,
// set() with throw an exception.
SQLData sqlData = parameterRow_.getSQLType(parameterIndex);
if (parameterValue != null) {
try {
// If the data is a locator, then set its handle.
int sqlType = sqlData.getSQLType(); // @xml3
if (sqlType == SQLData.CLOB_LOCATOR
|| sqlType == SQLData.BLOB_LOCATOR
|| sqlType == SQLData.DBCLOB_LOCATOR || // @pdc jdbc40
sqlType == SQLData.XML_LOCATOR) // @xml3
{
SQLLocator sqlDataAsLocator = (SQLLocator) sqlData;
sqlDataAsLocator.setHandle(parameterRow_
.getFieldLOBLocatorHandle(parameterIndex));
if (JDTrace.isTraceOn())
JDTrace.logInformation(
this,
"locator handle: "
+ parameterRow_.getFieldLOBLocatorHandle(parameterIndex));
sqlData.set(new ConvTableReader(parameterValue, 819, 0,
LOB_BLOCK_SIZE), null, length); // @J0M hacked this to use the
// scale parm for the length
} else {
sqlData.set(JDUtilities.readerToString(new ConvTableReader(
parameterValue, 819, 0, LOB_BLOCK_SIZE), length), null, -1);
}
} catch (UnsupportedEncodingException uee) {
}
testDataTruncation(parameterIndex, sqlData);
}
// Parameters can be null; you can call one of the set methods to null out
// a
// field of the database.
parameterNulls_[parameterIndex - 1] = (parameterValue == null);
parameterDefaults_[parameterIndex - 1] = false; // @EIA
parameterUnassigned_[parameterIndex - 1] = false; // @EIA
parameterSet_[parameterIndex - 1] = true;
}
// @J0M setValue (parameterIndex,
// @J0M (parameterValue == null) ? null : JDUtilities.streamToString
// (parameterValue, length, "ISO8859_1"), // @B2C
// @J0M null, -1); //@P0C
} | void function(int parameterIndex, InputStream parameterValue, int length) throws SQLException { if (JDTrace.isTraceOn()) { JDTrace.logInformation(this, STR); if (parameterValue == null) JDTrace.logInformation(this, STR + parameterIndex + STR); else JDTrace.logInformation(this, STR + parameterIndex + STR + length); } if (length < 0) JDError.throwSQLException(this, JDError.EXC_BUFFER_LENGTH_INVALID); synchronized (internalLock_) { checkOpen(); if ((parameterIndex < 1) (parameterIndex > parameterCount_)) JDError.throwSQLException(this, JDError.EXC_DESCRIPTOR_INDEX_INVALID); if (useReturnValueParameter_) { if (parameterIndex == 1) JDError.throwSQLException(this, JDError.EXC_PARAMETER_TYPE_INVALID); else --parameterIndex; } if (!parameterRow_.isInput(parameterIndex)) JDError.throwSQLException(this, JDError.EXC_PARAMETER_TYPE_INVALID); SQLData sqlData = parameterRow_.getSQLType(parameterIndex); if (parameterValue != null) { try { int sqlType = sqlData.getSQLType(); if (sqlType == SQLData.CLOB_LOCATOR sqlType == SQLData.BLOB_LOCATOR sqlType == SQLData.DBCLOB_LOCATOR sqlType == SQLData.XML_LOCATOR) { SQLLocator sqlDataAsLocator = (SQLLocator) sqlData; sqlDataAsLocator.setHandle(parameterRow_ .getFieldLOBLocatorHandle(parameterIndex)); if (JDTrace.isTraceOn()) JDTrace.logInformation( this, STR + parameterRow_.getFieldLOBLocatorHandle(parameterIndex)); sqlData.set(new ConvTableReader(parameterValue, 819, 0, LOB_BLOCK_SIZE), null, length); } else { sqlData.set(JDUtilities.readerToString(new ConvTableReader( parameterValue, 819, 0, LOB_BLOCK_SIZE), length), null, -1); } } catch (UnsupportedEncodingException uee) { } testDataTruncation(parameterIndex, sqlData); } parameterNulls_[parameterIndex - 1] = (parameterValue == null); parameterDefaults_[parameterIndex - 1] = false; } } | /**
* Sets an input parameter to an ASCII stream value. The driver reads the data
* from the stream as needed until no more bytes are available. The driver
* converts this to an SQL VARCHAR value.
*
* @param parameterIndex
* The parameter index (1-based).
* @param parameterValue
* The parameter value or null to set the value to SQL NULL.
* @param length
* The number of bytes in the stream.
* @exception SQLException
* If the statement is not open, the index is not valid, the
* parameter is not an input parameter, the length is not valid,
* the input stream does not contain all ASCII characters, or an
* error occurs while reading the input stream.
**/ | Sets an input parameter to an ASCII stream value. The driver reads the data from the stream as needed until no more bytes are available. The driver converts this to an SQL VARCHAR value | setAsciiStream | {
"repo_name": "devjunix/libjt400-java",
"path": "src/com/ibm/as400/access/AS400JDBCPreparedStatement.java",
"license": "epl-1.0",
"size": 198430
} | [
"java.io.InputStream",
"java.io.UnsupportedEncodingException",
"java.sql.SQLException"
] | import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.sql.SQLException; | import java.io.*; import java.sql.*; | [
"java.io",
"java.sql"
] | java.io; java.sql; | 861,447 |
public void addCard(final CardModel cardModel, final int amount) {
Collection<IChartKey> keys = provider.getKeys(cardModel, filter);
for (IChartKey key : keys) {
if (key.getIntegerKey() + 1 > values.length) {
int[] tmp = new int[key.getIntegerKey() + 1];
System.arraycopy(values, 0, tmp, 0, values.length);
values = tmp;
}
values[key.getIntegerKey()] += amount;
}
list.clear();
int max = 0;
for (int value : values) {
if (value > max)
max = value;
}
double[] amounts = new double[max + 1];
for (int index = max + 1; index-- > 0;) {
amounts[values[index]] = index;
}
try {
addSeries("data1", amounts, amounts.length, 0, amounts.length);
} catch (Exception e) {
e.printStackTrace();
}
fireDatasetChanged();
}
| void function(final CardModel cardModel, final int amount) { Collection<IChartKey> keys = provider.getKeys(cardModel, filter); for (IChartKey key : keys) { if (key.getIntegerKey() + 1 > values.length) { int[] tmp = new int[key.getIntegerKey() + 1]; System.arraycopy(values, 0, tmp, 0, values.length); values = tmp; } values[key.getIntegerKey()] += amount; } list.clear(); int max = 0; for (int value : values) { if (value > max) max = value; } double[] amounts = new double[max + 1]; for (int index = max + 1; index-- > 0;) { amounts[values[index]] = index; } try { addSeries("data1", amounts, amounts.length, 0, amounts.length); } catch (Exception e) { e.printStackTrace(); } fireDatasetChanged(); } | /**
* Add cards to all data sets.
*
* @param cardModel
* the card to add.
* @param amount
* the amount of card to add.
*/ | Add cards to all data sets | addCard | {
"repo_name": "JoeyLeeuwinga/Firemox",
"path": "src/main/java/net/sf/firemox/chart/datasets/HistogramDataset.java",
"license": "gpl-2.0",
"size": 3540
} | [
"java.util.Collection",
"net.sf.firemox.chart.IChartKey",
"net.sf.firemox.clickable.target.card.CardModel"
] | import java.util.Collection; import net.sf.firemox.chart.IChartKey; import net.sf.firemox.clickable.target.card.CardModel; | import java.util.*; import net.sf.firemox.chart.*; import net.sf.firemox.clickable.target.card.*; | [
"java.util",
"net.sf.firemox"
] | java.util; net.sf.firemox; | 834,515 |
public Name add(int posn, Rdn comp) {
if (comp == null) {
throw new NullPointerException("Cannot set comp to null");
}
rdns.add(posn, comp);
unparsed = null; // no longer valid
return this;
} | Name function(int posn, Rdn comp) { if (comp == null) { throw new NullPointerException(STR); } rdns.add(posn, comp); unparsed = null; return this; } | /**
* Adds a single RDN at a specified position within this
* LDAP name.
* RDNs of this LDAP name at or after the index (if any) of the new
* RDN are shifted up by one (away from index 0) to accommodate
* the new RDN.
*
* @param comp The non-null RDN to add.
* @param posn The index at which to add the new RDN.
* Must be in the range [0,size()].
* @return The updated LdapName, not a new instance.
* Cannot be null.
* @exception IndexOutOfBoundsException
* If posn is outside the specified range.
*/ | Adds a single RDN at a specified position within this LDAP name. RDNs of this LDAP name at or after the index (if any) of the new RDN are shifted up by one (away from index 0) to accommodate the new RDN | add | {
"repo_name": "andreagenso/java2scala",
"path": "test/J2s/java/openjdk-6-src-b27/jdk/src/share/classes/javax/naming/ldap/LdapName.java",
"license": "apache-2.0",
"size": 29197
} | [
"javax.naming.Name"
] | import javax.naming.Name; | import javax.naming.*; | [
"javax.naming"
] | javax.naming; | 460,169 |
public static RenderingControlProxy reloadRenderingControl(Registry context,
long pixelsID, List<RenderingEnginePrx> reList)
throws RenderingServiceException, DSOutOfServiceException
{
if (!(registry.equals(context)))
throw new IllegalArgumentException("Not allow to access method.");
if (reList == null || reList.size() == 0)
throw new IllegalArgumentException("No RE specified.");
RenderingControlProxy proxy = (RenderingControlProxy)
singleton.rndSvcProxies.get(pixelsID);
if (proxy != null) {
proxy.shutDown();
proxy.setRenderingEngine(reList.get(0));
reList.remove(0);
List<RenderingControl> slaves = proxy.getSlaves();
if (slaves.size() == reList.size()) {
Iterator<RenderingControl> i = slaves.iterator();
int index = 0;
while (i.hasNext()) {
proxy = (RenderingControlProxy) i.next();
proxy.shutDown();
proxy.setRenderingEngine(reList.get(index));
}
index++;
}
}
return proxy;
} | static RenderingControlProxy function(Registry context, long pixelsID, List<RenderingEnginePrx> reList) throws RenderingServiceException, DSOutOfServiceException { if (!(registry.equals(context))) throw new IllegalArgumentException(STR); if (reList == null reList.size() == 0) throw new IllegalArgumentException(STR); RenderingControlProxy proxy = (RenderingControlProxy) singleton.rndSvcProxies.get(pixelsID); if (proxy != null) { proxy.shutDown(); proxy.setRenderingEngine(reList.get(0)); reList.remove(0); List<RenderingControl> slaves = proxy.getSlaves(); if (slaves.size() == reList.size()) { Iterator<RenderingControl> i = slaves.iterator(); int index = 0; while (i.hasNext()) { proxy = (RenderingControlProxy) i.next(); proxy.shutDown(); proxy.setRenderingEngine(reList.get(index)); } index++; } } return proxy; } | /**
* Reloads the rendering engine.
*
* @param context Reference to the registry. To ensure that agents cannot
* call the method. It must be a reference to the
* container's registry.
* @param pixelsID The ID of the pixels set.
* @param reList The {@link RenderingEngine}s.
* @return See above.
* @throws RenderingServiceException If an error occurred while setting
* the value.
* @throws DSOutOfServiceException If the connection is broken.
*/ | Reloads the rendering engine | reloadRenderingControl | {
"repo_name": "emilroz/openmicroscopy",
"path": "components/insight/SRC/org/openmicroscopy/shoola/env/rnd/PixelsServicesFactory.java",
"license": "gpl-2.0",
"size": 27446
} | [
"java.util.Iterator",
"java.util.List",
"org.openmicroscopy.shoola.env.config.Registry",
"org.openmicroscopy.shoola.env.data.DSOutOfServiceException"
] | import java.util.Iterator; import java.util.List; import org.openmicroscopy.shoola.env.config.Registry; import org.openmicroscopy.shoola.env.data.DSOutOfServiceException; | import java.util.*; import org.openmicroscopy.shoola.env.config.*; import org.openmicroscopy.shoola.env.data.*; | [
"java.util",
"org.openmicroscopy.shoola"
] | java.util; org.openmicroscopy.shoola; | 402,941 |
public static <T> T find(List<T> list, T object) {
for (T element : list) {
if (element.equals(object)) {
return element;
}
}
return null;
} | static <T> T function(List<T> list, T object) { for (T element : list) { if (element.equals(object)) { return element; } } return null; } | /**
* Looks for an object in a {@link List}. Two objects can be equal but have different fields.
* For example a {@link FileInfo} object is equal to another if the paths are equal, but two
* equal {@link FileInfo} objects can have different {@link FileInfo#getLastModified()} times.
*
* @param list the list to be searched
* @param object the object to be found
* @param <T> a class that implements equals correctly
* @return the object
*/ | Looks for an object in a <code>List</code>. Two objects can be equal but have different fields. For example a <code>FileInfo</code> object is equal to another if the paths are equal, but two equal <code>FileInfo</code> objects can have different <code>FileInfo#getLastModified()</code> times | find | {
"repo_name": "alexghitulescu/SyncSSH",
"path": "src/com/adg/sync/ssh/util/Util.java",
"license": "bsd-2-clause",
"size": 1687
} | [
"java.util.List"
] | import java.util.List; | import java.util.*; | [
"java.util"
] | java.util; | 970,483 |
private static BlobKey receiveAndCheckPutResponse(
InputStream is, MessageDigest md, BlobKey.BlobType blobType)
throws IOException {
int response = is.read();
if (response < 0) {
throw new EOFException("Premature end of response");
}
else if (response == RETURN_OKAY) {
BlobKey remoteKey = BlobKey.readFromInputStream(is);
byte[] localHash = md.digest();
if (blobType != remoteKey.getType()) {
throw new IOException("Detected data corruption during transfer");
}
if (!Arrays.equals(localHash, remoteKey.getHash())) {
throw new IOException("Detected data corruption during transfer");
}
return remoteKey;
}
else if (response == RETURN_ERROR) {
Throwable cause = BlobUtils.readExceptionFromStream(is);
throw new IOException("Server side error: " + cause.getMessage(), cause);
}
else {
throw new IOException("Unrecognized response: " + response + '.');
}
} | static BlobKey function( InputStream is, MessageDigest md, BlobKey.BlobType blobType) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException(STR); } else if (response == RETURN_OKAY) { BlobKey remoteKey = BlobKey.readFromInputStream(is); byte[] localHash = md.digest(); if (blobType != remoteKey.getType()) { throw new IOException(STR); } if (!Arrays.equals(localHash, remoteKey.getHash())) { throw new IOException(STR); } return remoteKey; } else if (response == RETURN_ERROR) { Throwable cause = BlobUtils.readExceptionFromStream(is); throw new IOException(STR + cause.getMessage(), cause); } else { throw new IOException(STR + response + '.'); } } | /**
* Reads the response from the input stream and throws in case of errors.
*
* @param is
* stream to read from
* @param md
* message digest to check the response against
* @param blobType
* whether the BLOB should be permanent or transient
*
* @throws IOException
* if the response is an error, the message digest does not match or reading the response
* failed
*/ | Reads the response from the input stream and throws in case of errors | receiveAndCheckPutResponse | {
"repo_name": "hequn8128/flink",
"path": "flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobOutputStream.java",
"license": "apache-2.0",
"size": 5868
} | [
"java.io.EOFException",
"java.io.IOException",
"java.io.InputStream",
"java.security.MessageDigest",
"java.util.Arrays"
] | import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.security.MessageDigest; import java.util.Arrays; | import java.io.*; import java.security.*; import java.util.*; | [
"java.io",
"java.security",
"java.util"
] | java.io; java.security; java.util; | 132,386 |
public void readPacketData(PacketBuffer buf) throws IOException
{
this.channel = buf.readStringFromBuffer(20);
int i = buf.readableBytes();
if (i >= 0 && i <= 32767)
{
this.data = new PacketBuffer(buf.readBytes(i));
}
else
{
throw new IOException("Payload may not be larger than 32767 bytes");
}
} | void function(PacketBuffer buf) throws IOException { this.channel = buf.readStringFromBuffer(20); int i = buf.readableBytes(); if (i >= 0 && i <= 32767) { this.data = new PacketBuffer(buf.readBytes(i)); } else { throw new IOException(STR); } } | /**
* Reads the raw packet data from the data stream.
*/ | Reads the raw packet data from the data stream | readPacketData | {
"repo_name": "trixmot/mod1",
"path": "build/tmp/recompileMc/sources/net/minecraft/network/play/client/C17PacketCustomPayload.java",
"license": "lgpl-2.1",
"size": 2404
} | [
"java.io.IOException",
"net.minecraft.network.PacketBuffer"
] | import java.io.IOException; import net.minecraft.network.PacketBuffer; | import java.io.*; import net.minecraft.network.*; | [
"java.io",
"net.minecraft.network"
] | java.io; net.minecraft.network; | 1,105,597 |
public void flushBuffer()
throws IOException
{
//assert out!=null
if( out==null ) {
throw new IOException( "Buffer overflow, no sink " + limit + " " +
buff.length );
}
out.realWriteBytes( buff, start, end-start );
end=start;
} | void function() throws IOException { if( out==null ) { throw new IOException( STR + limit + " " + buff.length ); } out.realWriteBytes( buff, start, end-start ); end=start; } | /** Send the buffer to the sink. Called by append() when the limit is reached.
* You can also call it explicitely to force the data to be written.
*
* @throws IOException
*/ | Send the buffer to the sink. Called by append() when the limit is reached. You can also call it explicitely to force the data to be written | flushBuffer | {
"repo_name": "plumer/codana",
"path": "tomcat_files/6.0.43/ByteChunk.java",
"license": "mit",
"size": 23932
} | [
"java.io.IOException"
] | import java.io.IOException; | import java.io.*; | [
"java.io"
] | java.io; | 563,451 |
void removeAttributes(PerunSession sess, Resource resource, Group group, List<? extends AttributeDefinition> attribute) throws InternalErrorException, WrongAttributeAssignmentException, WrongAttributeValueException, WrongReferenceAttributeValueException; | void removeAttributes(PerunSession sess, Resource resource, Group group, List<? extends AttributeDefinition> attribute) throws InternalErrorException, WrongAttributeAssignmentException, WrongAttributeValueException, WrongReferenceAttributeValueException; | /**
* Batch version of removeAttribute.
* @see cz.metacentrum.perun.core.api.AttributesManager#removeAttribute(PerunSession sess, Resource resource, Group group, AttributeDefinition attribute)
*/ | Batch version of removeAttribute | removeAttributes | {
"repo_name": "jirmauritz/perun",
"path": "perun-core/src/main/java/cz/metacentrum/perun/core/bl/AttributesManagerBl.java",
"license": "bsd-2-clause",
"size": 193360
} | [
"cz.metacentrum.perun.core.api.AttributeDefinition",
"cz.metacentrum.perun.core.api.Group",
"cz.metacentrum.perun.core.api.PerunSession",
"cz.metacentrum.perun.core.api.Resource",
"cz.metacentrum.perun.core.api.exceptions.InternalErrorException",
"cz.metacentrum.perun.core.api.exceptions.WrongAttributeAssignmentException",
"cz.metacentrum.perun.core.api.exceptions.WrongAttributeValueException",
"cz.metacentrum.perun.core.api.exceptions.WrongReferenceAttributeValueException",
"java.util.List"
] | import cz.metacentrum.perun.core.api.AttributeDefinition; import cz.metacentrum.perun.core.api.Group; import cz.metacentrum.perun.core.api.PerunSession; import cz.metacentrum.perun.core.api.Resource; import cz.metacentrum.perun.core.api.exceptions.InternalErrorException; import cz.metacentrum.perun.core.api.exceptions.WrongAttributeAssignmentException; import cz.metacentrum.perun.core.api.exceptions.WrongAttributeValueException; import cz.metacentrum.perun.core.api.exceptions.WrongReferenceAttributeValueException; import java.util.List; | import cz.metacentrum.perun.core.api.*; import cz.metacentrum.perun.core.api.exceptions.*; import java.util.*; | [
"cz.metacentrum.perun",
"java.util"
] | cz.metacentrum.perun; java.util; | 1,296,665 |
boolean isIndividual(ItemStack stack); | boolean isIndividual(ItemStack stack); | /**
* Tests the itemstack for genetic information.
*
* @param stack
* @return true if the itemstack is an individual.
*/ | Tests the itemstack for genetic information | isIndividual | {
"repo_name": "Vexatos/PeripheralsPlusPlus",
"path": "src/api/resources/reference/forestry/api/genetics/IAlleleRegistry.java",
"license": "gpl-2.0",
"size": 6100
} | [
"net.minecraft.item.ItemStack"
] | import net.minecraft.item.ItemStack; | import net.minecraft.item.*; | [
"net.minecraft.item"
] | net.minecraft.item; | 1,753,824 |
public KafkaFuture<Void> unregisterTopology(final KafkaFutureImpl<Void> removeTopologyFuture,
final String topologyName) {
try {
lock();
log.info("Beginning removal of NamedTopology {}, old topology version is {}", topologyName, version.topologyVersion.get());
version.topologyVersion.incrementAndGet();
version.activeTopologyWaiters.add(new TopologyVersionWaiters(topologyVersion(), removeTopologyFuture));
final InternalTopologyBuilder removedBuilder = builders.remove(topologyName);
removedBuilder.fullSourceTopicNames().forEach(allInputTopics::remove);
removedBuilder.allSourcePatternStrings().forEach(allInputTopics::remove);
log.info("Finished removing NamedTopology {}, topology version was updated to {}", topologyName, version.topologyVersion.get());
} catch (final Throwable throwable) {
log.error("Failed to remove NamedTopology {}, please retry.", topologyName);
removeTopologyFuture.completeExceptionally(throwable);
} finally {
unlock();
}
return removeTopologyFuture;
} | KafkaFuture<Void> function(final KafkaFutureImpl<Void> removeTopologyFuture, final String topologyName) { try { lock(); log.info(STR, topologyName, version.topologyVersion.get()); version.topologyVersion.incrementAndGet(); version.activeTopologyWaiters.add(new TopologyVersionWaiters(topologyVersion(), removeTopologyFuture)); final InternalTopologyBuilder removedBuilder = builders.remove(topologyName); removedBuilder.fullSourceTopicNames().forEach(allInputTopics::remove); removedBuilder.allSourcePatternStrings().forEach(allInputTopics::remove); log.info(STR, topologyName, version.topologyVersion.get()); } catch (final Throwable throwable) { log.error(STR, topologyName); removeTopologyFuture.completeExceptionally(throwable); } finally { unlock(); } return removeTopologyFuture; } | /**
* Removes the topology and registers a future that listens for all threads on the older version to see the update
*/ | Removes the topology and registers a future that listens for all threads on the older version to see the update | unregisterTopology | {
"repo_name": "TiVo/kafka",
"path": "streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java",
"license": "apache-2.0",
"size": 26336
} | [
"org.apache.kafka.common.KafkaFuture",
"org.apache.kafka.common.internals.KafkaFutureImpl"
] | import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.internals.KafkaFutureImpl; | import org.apache.kafka.common.*; import org.apache.kafka.common.internals.*; | [
"org.apache.kafka"
] | org.apache.kafka; | 1,358,685 |
public Node previousNode() {
Node result = previousSibling(currentNode, root);
if (result == null) {
result = parentNode(currentNode);
if (result != null) {
currentNode = result;
}
return result;
}
Node n = lastChild(result);
Node last = n;
while (n != null) {
last = n;
n = lastChild(last);
}
return currentNode = (last != null) ? last : result;
} | Node function() { Node result = previousSibling(currentNode, root); if (result == null) { result = parentNode(currentNode); if (result != null) { currentNode = result; } return result; } Node n = lastChild(result); Node last = n; while (n != null) { last = n; n = lastChild(last); } return currentNode = (last != null) ? last : result; } | /**
* <b>DOM</b>: Implements {@link TreeWalker#previousNode()}.
*/ | DOM: Implements <code>TreeWalker#previousNode()</code> | previousNode | {
"repo_name": "Squeegee/batik",
"path": "sources/org/apache/batik/dom/traversal/DOMTreeWalker.java",
"license": "apache-2.0",
"size": 10667
} | [
"org.w3c.dom.Node"
] | import org.w3c.dom.Node; | import org.w3c.dom.*; | [
"org.w3c.dom"
] | org.w3c.dom; | 122,872 |
private void lockLocally(
final Collection<KeyCacheObject> keys,
AffinityTopologyVersion topVer
) {
if (log.isDebugEnabled())
log.debug("Before locally locking keys : " + keys);
IgniteInternalFuture<Exception> fut = cctx.colocated().lockAllAsync(cctx,
tx,
threadId,
lockVer,
topVer,
keys,
read,
retval,
timeout,
createTtl,
accessTtl,
filter,
skipStore,
keepBinary); | void function( final Collection<KeyCacheObject> keys, AffinityTopologyVersion topVer ) { if (log.isDebugEnabled()) log.debug(STR + keys); IgniteInternalFuture<Exception> fut = cctx.colocated().lockAllAsync(cctx, tx, threadId, lockVer, topVer, keys, read, retval, timeout, createTtl, accessTtl, filter, skipStore, keepBinary); | /**
* Locks given keys directly through dht cache.
* @param keys Collection of keys.
* @param topVer Topology version to lock on.
*/ | Locks given keys directly through dht cache | lockLocally | {
"repo_name": "andrey-kuznetsov/ignite",
"path": "modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java",
"license": "apache-2.0",
"size": 59925
} | [
"java.util.Collection",
"org.apache.ignite.internal.IgniteInternalFuture",
"org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion",
"org.apache.ignite.internal.processors.cache.KeyCacheObject"
] | import java.util.Collection; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.KeyCacheObject; | import java.util.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.affinity.*; import org.apache.ignite.internal.processors.cache.*; | [
"java.util",
"org.apache.ignite"
] | java.util; org.apache.ignite; | 1,849,390 |
@Override
public void setModifiedDate(java.util.Date modifiedDate) {
_workingUnit.setModifiedDate(modifiedDate);
} | void function(java.util.Date modifiedDate) { _workingUnit.setModifiedDate(modifiedDate); } | /**
* Sets the modified date of this working unit.
*
* @param modifiedDate the modified date of this working unit
*/ | Sets the modified date of this working unit | setModifiedDate | {
"repo_name": "hltn/opencps",
"path": "portlets/opencps-portlet/docroot/WEB-INF/service/org/opencps/usermgt/model/WorkingUnitWrapper.java",
"license": "agpl-3.0",
"size": 19474
} | [
"java.util.Date"
] | import java.util.Date; | import java.util.*; | [
"java.util"
] | java.util; | 2,797,932 |
public void testPooledReuseOnClose() throws SQLException
{
// PooledConnection from a ConnectionPoolDataSource
ConnectionPoolDataSource cpds =
J2EEDataSource.getConnectionPoolDataSource();
subtestPooledReuseOnClose(cpds.getPooledConnection());
subtestPooledCloseOnClose(cpds.getPooledConnection());
// DERBY-3401 - removing a callback during a close causes problems.
subtestPooledRemoveListenerOnClose(cpds.getPooledConnection());
subtestPooledAddListenerOnClose(cpds.getPooledConnection());
// PooledConnection from an XDataSource
XADataSource xads = J2EEDataSource.getXADataSource();
subtestPooledReuseOnClose(xads.getXAConnection());
subtestPooledCloseOnClose(xads.getXAConnection());
// DERBY-3401 - removing a callback during a close causes problems.
subtestPooledRemoveListenerOnClose(xads.getXAConnection());
subtestPooledAddListenerOnClose(xads.getXAConnection());
} | void function() throws SQLException { ConnectionPoolDataSource cpds = J2EEDataSource.getConnectionPoolDataSource(); subtestPooledReuseOnClose(cpds.getPooledConnection()); subtestPooledCloseOnClose(cpds.getPooledConnection()); subtestPooledRemoveListenerOnClose(cpds.getPooledConnection()); subtestPooledAddListenerOnClose(cpds.getPooledConnection()); XADataSource xads = J2EEDataSource.getXADataSource(); subtestPooledReuseOnClose(xads.getXAConnection()); subtestPooledCloseOnClose(xads.getXAConnection()); subtestPooledRemoveListenerOnClose(xads.getXAConnection()); subtestPooledAddListenerOnClose(xads.getXAConnection()); } | /**
* Test that a PooledConnection can be reused and closed
* (separately) during the close event raised by the
* closing of its logical connection.
* DERBY-2142.
* @throws SQLException
*
*/ | Test that a PooledConnection can be reused and closed (separately) during the close event raised by the closing of its logical connection. DERBY-2142 | testPooledReuseOnClose | {
"repo_name": "apache/derby",
"path": "java/org.apache.derby.tests/org/apache/derbyTesting/functionTests/tests/jdbcapi/J2EEDataSourceTest.java",
"license": "apache-2.0",
"size": 186130
} | [
"java.sql.SQLException",
"javax.sql.ConnectionPoolDataSource",
"javax.sql.XADataSource",
"org.apache.derbyTesting.junit.J2EEDataSource"
] | import java.sql.SQLException; import javax.sql.ConnectionPoolDataSource; import javax.sql.XADataSource; import org.apache.derbyTesting.junit.J2EEDataSource; | import java.sql.*; import javax.sql.*; import org.apache.*; | [
"java.sql",
"javax.sql",
"org.apache"
] | java.sql; javax.sql; org.apache; | 1,054,659 |
@Override
public void setNormalBuffer(FloatBuffer buff) {
logger.warning("SharedMesh does not allow the manipulation"
+ "of the the mesh data.");
} | void function(FloatBuffer buff) { logger.warning(STR + STR); } | /**
* <code>setNormalBuffer</code> is not supported by SharedMesh.
*
* @param buff
* the new normal buffer.
*/ | <code>setNormalBuffer</code> is not supported by SharedMesh | setNormalBuffer | {
"repo_name": "accelazh/ThreeBodyProblem",
"path": "lib/jME2_0_1-Stable/src/com/jme/scene/SharedMesh.java",
"license": "mit",
"size": 21654
} | [
"java.nio.FloatBuffer"
] | import java.nio.FloatBuffer; | import java.nio.*; | [
"java.nio"
] | java.nio; | 475,486 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.