name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_PrivateCellUtil_compare | /**
* Used when a cell needs to be compared with a key byte[] such as cases of finding the index from
* the index block, bloom keys from the bloom blocks This byte[] is expected to be serialized in
* the KeyValue serialization format If the KeyValue (Cell's) serialization format changes this
* method cannot be used.
* @param comparator the {@link CellComparator} to use for comparison
* @param left the cell to be compared
* @param key the serialized key part of a KeyValue
* @param offset the offset in the key byte[]
* @param length the length of the key byte[]
* @return an int greater than 0 if left is greater than right lesser than 0 if left is lesser
* than right equal to 0 if left is equal to right
*/
public static final int compare(CellComparator comparator, Cell left, byte[] key, int offset,
int length) {
// row
short rrowlength = Bytes.toShort(key, offset);
int c = comparator.compareRows(left, key, offset + Bytes.SIZEOF_SHORT, rrowlength);
if (c != 0) return c;
// Compare the rest of the two KVs without making any assumptions about
// the common prefix. This function will not compare rows anyway, so we
// don't need to tell it that the common prefix includes the row.
return compareWithoutRow(comparator, left, key, offset, length, rrowlength);
} | 3.68 |
hadoop_RBFMetrics_locateGetter | /**
* Finds the appropriate getter for a field name.
*
* @param fieldName The legacy name of the field.
* @return The matching getter or null if not found.
*/
private static Method locateGetter(BaseRecord record, String fieldName) {
for (Method m : record.getClass().getMethods()) {
if (m.getName().equalsIgnoreCase("get" + fieldName)) {
return m;
}
}
return null;
} | 3.68 |
flink_RunLengthDecoder_readNextGroup | /** Reads the next group. */
void readNextGroup() {
try {
int header = readUnsignedVarInt();
this.mode = (header & 1) == 0 ? MODE.RLE : MODE.PACKED;
switch (mode) {
case RLE:
this.currentCount = header >>> 1;
this.currentValue = readIntLittleEndianPaddedOnBitWidth();
return;
case PACKED:
int numGroups = header >>> 1;
this.currentCount = numGroups * 8;
if (this.currentBuffer.length < this.currentCount) {
this.currentBuffer = new int[this.currentCount];
}
currentBufferIdx = 0;
int valueIndex = 0;
while (valueIndex < this.currentCount) {
// values are bit packed 8 at a time, so reading bitWidth will always work
ByteBuffer buffer = in.slice(bitWidth);
if (buffer.hasArray()) {
// byte array has better performance than ByteBuffer
this.packer.unpack8Values(
buffer.array(),
buffer.arrayOffset() + buffer.position(),
this.currentBuffer,
valueIndex);
} else {
this.packer.unpack8Values(
buffer, buffer.position(), this.currentBuffer, valueIndex);
}
valueIndex += 8;
}
return;
default:
throw new ParquetDecodingException("not a valid mode " + this.mode);
}
} catch (IOException e) {
throw new ParquetDecodingException("Failed to read from input stream", e);
}
} | 3.68 |
framework_RangeValidator_setMinValueIncluded | /**
* Sets whether the minimum value is part of the accepted range.
*
* @param minValueIncluded
* true if the minimum value should be part of the range, false
* otherwise
*/
public void setMinValueIncluded(boolean minValueIncluded) {
this.minValueIncluded = minValueIncluded;
} | 3.68 |
hbase_BalancerClusterState_registerRegion | /** Helper for Cluster constructor to handle a region */
private void registerRegion(RegionInfo region, int regionIndex, int serverIndex,
Map<String, Deque<BalancerRegionLoad>> loads, RegionHDFSBlockLocationFinder regionFinder) {
String tableName = region.getTable().getNameAsString();
if (!tablesToIndex.containsKey(tableName)) {
tables.add(tableName);
tablesToIndex.put(tableName, tablesToIndex.size());
}
int tableIndex = tablesToIndex.get(tableName);
regionsToIndex.put(region, regionIndex);
regions[regionIndex] = region;
regionIndexToServerIndex[regionIndex] = serverIndex;
initialRegionIndexToServerIndex[regionIndex] = serverIndex;
regionIndexToTableIndex[regionIndex] = tableIndex;
// region load
if (loads != null) {
Deque<BalancerRegionLoad> rl = loads.get(region.getRegionNameAsString());
// That could have failed if the RegionLoad is using the other regionName
if (rl == null) {
// Try getting the region load using encoded name.
rl = loads.get(region.getEncodedName());
}
regionLoads[regionIndex] = rl;
}
if (regionFinder != null) {
// region location
List<ServerName> loc = regionFinder.getTopBlockLocations(region);
regionLocations[regionIndex] = new int[loc.size()];
for (int i = 0; i < loc.size(); i++) {
regionLocations[regionIndex][i] = loc.get(i) == null
? -1
: (serversToIndex.get(loc.get(i).getAddress()) == null
? -1
: serversToIndex.get(loc.get(i).getAddress()));
}
}
} | 3.68 |
flink_StreamExecutionEnvironment_getConfig | /** Gets the config object. */
public ExecutionConfig getConfig() {
return config;
} | 3.68 |
hadoop_AbfsClientThrottlingAnalyzer_addBytesTransferred | /**
* Updates metrics with results from the current storage operation.
*
* @param count The count of bytes transferred.
* @param isFailedOperation True if the operation failed; otherwise false.
*/
public void addBytesTransferred(long count, boolean isFailedOperation) {
AbfsOperationMetrics metrics = blobMetrics.get();
if (isFailedOperation) {
metrics.addBytesFailed(count);
metrics.incrementOperationsFailed();
} else {
metrics.addBytesSuccessful(count);
metrics.incrementOperationsSuccessful();
}
blobMetrics.set(metrics);
} | 3.68 |
morf_SqlServerMetaDataProvider_isPrimaryKeyIndex | /**
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#isPrimaryKeyIndex(RealName)
*/
@Override
protected boolean isPrimaryKeyIndex(RealName indexName) {
return indexName.getDbName().endsWith("_PK");
} | 3.68 |
framework_AbstractComponent_setVisible | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Component#setVisible(boolean)
*/
@Override
public void setVisible(boolean visible) {
if (isVisible() == visible) {
return;
}
this.visible = visible;
if (visible) {
/*
* If the visibility state is toggled from invisible to visible it
* affects all children (the whole hierarchy) in addition to this
* component.
*/
markAsDirtyRecursive();
}
if (getParent() != null) {
// Must always repaint the parent (at least the hierarchy) when
// visibility of a child component changes.
getParent().markAsDirty();
}
} | 3.68 |
morf_FieldLiteral_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return (dataType.equals(DataType.STRING)
? "\"" + value + "\""
: value == null ? "NULL" : value) + super.toString();
} | 3.68 |
framework_FileUploadHandler_removePath | /**
* Removes any possible path information from the filename and returns the
* filename. Separators / and \\ are used.
*
* @param filename
* @return
*/
private static String removePath(String filename) {
if (filename != null) {
filename = filename.replaceAll("^.*[/\\\\]", "");
}
return filename;
} | 3.68 |
Activiti_BaseEntityEventListener_onEntityEvent | /**
* Called when an event is received, which is not a create, an update or delete.
*/
protected void onEntityEvent(ActivitiEvent event) {
// Default implementation is a NO-OP
} | 3.68 |
hudi_HoodieAvroUtils_wrapValueIntoAvro | /**
* Wraps a value into Avro type wrapper.
*
* @param value Java value.
* @return A wrapped value with Avro type wrapper.
*/
public static Object wrapValueIntoAvro(Comparable<?> value) {
if (value == null) {
return null;
} else if (value instanceof Date || value instanceof LocalDate) {
// NOTE: Due to breaking changes in code-gen b/w Avro 1.8.2 and 1.10, we can't
// rely on logical types to do proper encoding of the native Java types,
// and hereby have to encode value manually
LocalDate localDate = value instanceof LocalDate
? (LocalDate) value
: ((Date) value).toLocalDate();
return DateWrapper.newBuilder(DATE_WRAPPER_BUILDER_STUB.get())
.setValue((int) localDate.toEpochDay())
.build();
} else if (value instanceof BigDecimal) {
Schema valueSchema = DecimalWrapper.SCHEMA$.getField("value").schema();
BigDecimal upcastDecimal = tryUpcastDecimal((BigDecimal) value, (LogicalTypes.Decimal) valueSchema.getLogicalType());
return DecimalWrapper.newBuilder(DECIMAL_WRAPPER_BUILDER_STUB.get())
.setValue(AVRO_DECIMAL_CONVERSION.toBytes(upcastDecimal, valueSchema, valueSchema.getLogicalType()))
.build();
} else if (value instanceof Timestamp) {
// NOTE: Due to breaking changes in code-gen b/w Avro 1.8.2 and 1.10, we can't
// rely on logical types to do proper encoding of the native Java types,
// and hereby have to encode value manually
Instant instant = ((Timestamp) value).toInstant();
return TimestampMicrosWrapper.newBuilder(TIMESTAMP_MICROS_WRAPPER_BUILDER_STUB.get())
.setValue(instantToMicros(instant))
.build();
} else if (value instanceof Boolean) {
return BooleanWrapper.newBuilder(BOOLEAN_WRAPPER_BUILDER_STUB.get()).setValue((Boolean) value).build();
} else if (value instanceof Integer) {
return IntWrapper.newBuilder(INT_WRAPPER_BUILDER_STUB.get()).setValue((Integer) value).build();
} else if (value instanceof Long) {
return LongWrapper.newBuilder(LONG_WRAPPER_BUILDER_STUB.get()).setValue((Long) value).build();
} else if (value instanceof Float) {
return FloatWrapper.newBuilder(FLOAT_WRAPPER_BUILDER_STUB.get()).setValue((Float) value).build();
} else if (value instanceof Double) {
return DoubleWrapper.newBuilder(DOUBLE_WRAPPER_BUILDER_STUB.get()).setValue((Double) value).build();
} else if (value instanceof ByteBuffer) {
return BytesWrapper.newBuilder(BYTES_WRAPPER_BUILDER_STUB.get()).setValue((ByteBuffer) value).build();
} else if (value instanceof String || value instanceof Utf8) {
return StringWrapper.newBuilder(STRING_WRAPPER_BUILDER_STUB.get()).setValue(value.toString()).build();
} else {
throw new UnsupportedOperationException(String.format("Unsupported type of the value (%s)", value.getClass()));
}
} | 3.68 |
hbase_SplitTableRegionProcedure_splitStoreFiles | /**
* Create Split directory
* @param env MasterProcedureEnv
*/
private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv env,
final HRegionFileSystem regionFs) throws IOException {
final Configuration conf = env.getMasterConfiguration();
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
// The following code sets up a thread pool executor with as many slots as
// there's files to split. It then fires up everything, waits for
// completion and finally checks for any exception
//
// Note: From HBASE-26187, splitStoreFiles now creates daughter region dirs straight under the
// table dir. In case of failure, the proc would go through this again, already existing
// region dirs and split files would just be ignored, new split files should get created.
int nbFiles = 0;
final Map<String, Collection<StoreFileInfo>> files =
new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
String family = cfd.getNameAsString();
StoreFileTracker tracker =
StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
Collection<StoreFileInfo> sfis = tracker.load();
if (sfis == null) {
continue;
}
Collection<StoreFileInfo> filteredSfis = null;
for (StoreFileInfo sfi : sfis) {
// Filter. There is a lag cleaning up compacted reference files. They get cleared
// after a delay in case outstanding Scanners still have references. Because of this,
// the listing of the Store content may have straggler reference files. Skip these.
// It should be safe to skip references at this point because we checked above with
// the region if it thinks it is splittable and if we are here, it thinks it is
// splitable.
if (sfi.isReference()) {
LOG.info("Skipping split of " + sfi + "; presuming ready for archiving.");
continue;
}
if (filteredSfis == null) {
filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
files.put(family, filteredSfis);
}
filteredSfis.add(sfi);
nbFiles++;
}
}
if (nbFiles == 0) {
// no file needs to be splitted.
return new Pair<>(Collections.emptyList(), Collections.emptyList());
}
// Max #threads is the smaller of the number of storefiles or the default max determined above.
int maxThreads = Math.min(
conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX,
conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)),
nbFiles);
LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region="
+ getParentRegion().getShortNameToLog() + ", threads=" + maxThreads);
final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads,
new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d").setDaemon(true)
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);
// Split each store file.
for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
byte[] familyName = Bytes.toBytes(e.getKey());
final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
final Collection<StoreFileInfo> storeFiles = e.getValue();
if (storeFiles != null && storeFiles.size() > 0) {
final Configuration storeConfiguration =
StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd);
for (StoreFileInfo storeFileInfo : storeFiles) {
// As this procedure is running on master, use CacheConfig.DISABLED means
// don't cache any block.
// We also need to pass through a suitable CompoundConfiguration as if this
// is running in a regionserver's Store context, or we might not be able
// to read the hfiles.
storeFileInfo.setConf(storeConfiguration);
StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName,
new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED));
futures.add(threadPool.submit(sfs));
}
}
}
// Shutdown the pool
threadPool.shutdown();
// Wait for all the tasks to finish.
// When splits ran on the RegionServer, how-long-to-wait-configuration was named
// hbase.regionserver.fileSplitTimeout. If set, use its value.
long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout",
conf.getLong("hbase.regionserver.fileSplitTimeout", 600000));
try {
boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
if (stillRunning) {
threadPool.shutdownNow();
// wait for the thread to shutdown completely.
while (!threadPool.isTerminated()) {
Thread.sleep(50);
}
throw new IOException(
"Took too long to split the" + " files and create the references, aborting split");
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
List<Path> daughterA = new ArrayList<>();
List<Path> daughterB = new ArrayList<>();
// Look for any exception
for (Future<Pair<Path, Path>> future : futures) {
try {
Pair<Path, Path> p = future.get();
if (p.getFirst() != null) {
daughterA.add(p.getFirst());
}
if (p.getSecond() != null) {
daughterB.add(p.getSecond());
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("pid=" + getProcId() + " split storefiles for region "
+ getParentRegion().getShortNameToLog() + " Daughter A: " + daughterA
+ " storefiles, Daughter B: " + daughterB + " storefiles.");
}
return new Pair<>(daughterA, daughterB);
} | 3.68 |
framework_VLayoutSlot_positionInDirection | /**
* Position the slot vertically and set the height and the bottom margin, or
* horizontally and set the width and the right margin, depending on the
* indicated direction.
*
* @param currentLocation
* the top position or the left position for this slot depending
* on the indicated direction
* @param allocatedSpace
* how much space is available for this slot in the indicated
* direction
* @param endingMargin
* the bottom margin or the right margin this slot should have
* depending on the indicated direction (removed if negative)
* @param isVertical
* {@code true} if the positioning should be done vertically,
* {@code false} if horizontally
*/
public void positionInDirection(double currentLocation,
double allocatedSpace, double endingMargin, boolean isVertical) {
if (isVertical) {
positionVertically(currentLocation, allocatedSpace, endingMargin);
} else {
positionHorizontally(currentLocation, allocatedSpace, endingMargin);
}
} | 3.68 |
hadoop_ReencryptionHandler_reencryptEncryptionZone | /**
* Re-encrypts a zone by recursively iterating all paths inside the zone,
* in lexicographic order.
* Files are re-encrypted, and subdirs are processed during iteration.
*
* @param zoneId the Zone's id.
* @throws IOException
* @throws InterruptedException
*/
void reencryptEncryptionZone(final long zoneId)
throws IOException, InterruptedException {
throttleTimerAll.reset().start();
throttleTimerLocked.reset();
final INode zoneNode;
final ZoneReencryptionStatus zs;
traverser.readLock();
try {
zoneNode = dir.getInode(zoneId);
// start re-encrypting the zone from the beginning
if (zoneNode == null) {
LOG.info("Directory with id {} removed during re-encrypt, skipping",
zoneId);
return;
}
if (!zoneNode.isDirectory()) {
LOG.info("Cannot re-encrypt directory with id {} because it's not a"
+ " directory.", zoneId);
return;
}
zs = getReencryptionStatus().getZoneStatus(zoneId);
assert zs != null;
// Only costly log FullPathName here once, and use id elsewhere.
LOG.info("Re-encrypting zone {}(id={})", zoneNode.getFullPathName(),
zoneId);
if (zs.getLastCheckpointFile() == null) {
// new re-encryption
traverser.traverseDir(zoneNode.asDirectory(), zoneId,
HdfsFileStatus.EMPTY_NAME,
new ZoneTraverseInfo(zs.getEzKeyVersionName()));
} else {
// resuming from a past re-encryption
restoreFromLastProcessedFile(zoneId, zs);
}
// save the last batch and mark complete
traverser.submitCurrentBatch(zoneId);
LOG.info("Submission completed of zone {} for re-encryption.", zoneId);
reencryptionUpdater.markZoneSubmissionDone(zoneId);
} finally {
traverser.readUnlock();
}
} | 3.68 |
dubbo_MessageFormatter_arrayFormat | /**
* Same principle as the {@link #format(String, Object)} and
* {@link #format(String, Object, Object)} methods except that any number of
* arguments can be passed in an array.
*
* @param messagePattern The message pattern which will be parsed and formatted
* @param argArray An array of arguments to be substituted in place of formatting
* anchors
* @return The formatted message
*/
static FormattingTuple arrayFormat(final String messagePattern, final Object[] argArray) {
Throwable throwableCandidate = getThrowableCandidate(argArray);
if (messagePattern == null) {
return new FormattingTuple(null, argArray, throwableCandidate);
}
if (argArray == null) {
return new FormattingTuple(messagePattern);
}
int i = 0;
int j;
StringBuffer sbuf = new StringBuffer(messagePattern.length() + 50);
int l;
for (l = 0; l < argArray.length; l++) {
j = messagePattern.indexOf(DELIM_STR, i);
if (j == -1) {
// no more variables
if (i == 0) { // this is a simple string
return new FormattingTuple(messagePattern, argArray, throwableCandidate);
} else { // add the tail string which contains no variables and return
// the result.
sbuf.append(messagePattern.substring(i));
return new FormattingTuple(sbuf.toString(), argArray, throwableCandidate);
}
} else {
if (isEscapedDelimeter(messagePattern, j)) {
if (!isDoubleEscaped(messagePattern, j)) {
l--; // DELIM_START was escaped, thus should not be incremented
sbuf.append(messagePattern, i, j - 1);
sbuf.append(DELIM_START);
i = j + 1;
} else {
// The escape character preceding the delimiter start is
// itself escaped: "abc x:\\{}"
// we have to consume one backward slash
sbuf.append(messagePattern, i, j - 1);
deeplyAppendParameter(sbuf, argArray[l], new HashMap<Object[], Void>());
i = j + 2;
}
} else {
// normal case
sbuf.append(messagePattern, i, j);
deeplyAppendParameter(sbuf, argArray[l], new HashMap<Object[], Void>());
i = j + 2;
}
}
}
// append the characters following the last {} pair.
sbuf.append(messagePattern.substring(i));
if (l < argArray.length - 1) {
return new FormattingTuple(sbuf.toString(), argArray, throwableCandidate);
} else {
return new FormattingTuple(sbuf.toString(), argArray, null);
}
} | 3.68 |
framework_ConnectorTracker_getCurrentSyncId | /**
* Gets the most recently generated server sync id.
* <p>
* The sync id is incremented by one whenever a new response is being
* written. This id is then sent over to the client. The client then adds
* the most recent sync id to each communication packet it sends back to the
* server. This way, the server knows at what state the client is when the
* packet is sent. If the state has changed on the server side since that,
* the server can try to adjust the way it handles the actions from the
* client side.
* <p>
* The sync id value <code>-1</code> is ignored to facilitate testing with
* pre-recorded requests.
*
* @see #setWritingResponse(boolean)
* @see #connectorWasPresentAsRequestWasSent(String, long)
* @since 7.2
* @return the current sync id
*/
public int getCurrentSyncId() {
return currentSyncId;
} | 3.68 |
hadoop_HadoopLogsAnalyzer_readBalancedLine | // This can return either the Pair of the !!file line and the XMLconf
// file, or null and an ordinary line. Returns just null if there's
// no more input.
private Pair<String, String> readBalancedLine() throws IOException {
String line = readCountedLine();
if (line == null) {
return null;
}
while (line.indexOf('\f') > 0) {
line = line.substring(line.indexOf('\f'));
}
if (line.length() != 0 && line.charAt(0) == '\f') {
String subjectLine = readCountedLine();
if (subjectLine != null && subjectLine.length() != 0
&& apparentConfFileHeader(line) && apparentXMLFileStart(subjectLine)) {
StringBuilder sb = new StringBuilder();
while (subjectLine != null && subjectLine.indexOf('\f') > 0) {
subjectLine = subjectLine.substring(subjectLine.indexOf('\f'));
}
while (subjectLine != null
&& (subjectLine.length() == 0 || subjectLine.charAt(0) != '\f')) {
sb.append(subjectLine);
subjectLine = readCountedLine();
}
if (subjectLine != null) {
unreadCountedLine(subjectLine);
}
return new Pair<String, String>(line, sb.toString());
}
// here we had a file line, but it introduced a log segment, not
// a conf file. We want to just ignore the file line.
return readBalancedLine();
}
String endlineString = (version == 0 ? " " : " .");
if (line.length() < endlineString.length()) {
return new Pair<String, String>(null, line);
}
if (!endlineString.equals(line.substring(line.length()
- endlineString.length()))) {
StringBuilder sb = new StringBuilder(line);
String addedLine;
do {
addedLine = readCountedLine();
if (addedLine == null) {
return new Pair<String, String>(null, sb.toString());
}
while (addedLine.indexOf('\f') > 0) {
addedLine = addedLine.substring(addedLine.indexOf('\f'));
}
if (addedLine.length() > 0 && addedLine.charAt(0) == '\f') {
unreadCountedLine(addedLine);
return new Pair<String, String>(null, sb.toString());
}
sb.append("\n");
sb.append(addedLine);
} while (!endlineString.equals(addedLine.substring(addedLine.length()
- endlineString.length())));
line = sb.toString();
}
return new Pair<String, String>(null, line);
} | 3.68 |
framework_RefreshRenderedCellsOnlyIfAttached_removeTableParent | /**
* Remove Table's parent component.
*
*/
protected void removeTableParent() {
removeComponent(layout);
} | 3.68 |
hbase_DumpRegionServerMetrics_dumpMetrics | /**
* Dump out a subset of regionserver mbeans only, not all of them, as json on System.out.
*/
public static String dumpMetrics() throws MalformedObjectNameException, IOException {
StringWriter sw = new StringWriter(1024 * 100); // Guess this size
try (PrintWriter writer = new PrintWriter(sw)) {
JSONBean dumper = new JSONBean();
try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
jsonBeanWriter.write(mbeanServer, new ObjectName("java.lang:type=Memory"), null, false);
jsonBeanWriter.write(mbeanServer,
new ObjectName("Hadoop:service=HBase,name=RegionServer,sub=IPC"), null, false);
jsonBeanWriter.write(mbeanServer,
new ObjectName("Hadoop:service=HBase,name=RegionServer,sub=Replication"), null, false);
jsonBeanWriter.write(mbeanServer,
new ObjectName("Hadoop:service=HBase,name=RegionServer,sub=Server"), null, false);
}
}
sw.close();
return sw.toString();
} | 3.68 |
hadoop_OBSFileSystem_getUri | /**
* Return a URI whose scheme and authority identify this FileSystem.
*
* @return the URI of this filesystem.
*/
@Override
public URI getUri() {
return uri;
} | 3.68 |
hbase_SimpleRpcServerResponder_processAllResponses | /**
* Process all the responses for this connection
* @return true if all the calls were processed or that someone else is doing it. false if there
* is still some work to do. In this case, we expect the caller to delay us.
*/
private boolean processAllResponses(final SimpleServerRpcConnection connection)
throws IOException {
// We want only one writer on the channel for a connection at a time.
connection.responseWriteLock.lock();
try {
for (int i = 0; i < 20; i++) {
// protection if some handlers manage to need all the responder
RpcResponse resp = connection.responseQueue.pollFirst();
if (resp == null) {
return true;
}
if (!processResponse(connection, resp)) {
connection.responseQueue.addFirst(resp);
return false;
}
}
} finally {
connection.responseWriteLock.unlock();
}
return connection.responseQueue.isEmpty();
} | 3.68 |
hadoop_HsNavBlock_render | /*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override protected void render(Block html) {
DIV<Hamlet> nav = html.
div("#nav").
h3("Application").
ul().
li().a(url("about"), "About").__().
li().a(url("app"), "Jobs").__().__();
if (app.getJob() != null) {
String jobid = MRApps.toString(app.getJob().getID());
nav.
h3("Job").
ul().
li().a(url("job", jobid), "Overview").__().
li().a(url("jobcounters", jobid), "Counters").__().
li().a(url("conf", jobid), "Configuration").__().
li().a(url("tasks", jobid, "m"), "Map tasks").__().
li().a(url("tasks", jobid, "r"), "Reduce tasks").__().__();
if (app.getTask() != null) {
String taskid = MRApps.toString(app.getTask().getID());
nav.
h3("Task").
ul().
li().a(url("task", taskid), "Task Overview").__().
li().a(url("taskcounters", taskid), "Counters").__().__();
}
}
Hamlet.UL<DIV<Hamlet>> tools = WebPageUtils.appendToolSection(nav, conf);
if (tools != null) {
tools.__().__();
}
} | 3.68 |
hadoop_JobMetaData_createSkyline | /**
* Normalized container launch/release time, and generate the
* {@link ResourceSkyline}.
*/
public final void createSkyline() {
final long jobSubmissionTime = resourceSkyline.getJobSubmissionTime();
Resource containerSpec = resourceSkyline.getContainerSpec();
final TreeMap<Long, Resource> resourceOverTime = new TreeMap<>();
final RLESparseResourceAllocation skylineList =
new RLESparseResourceAllocation(resourceOverTime,
new DefaultResourceCalculator());
resourceSkyline.setSkylineList(skylineList);
if (containerSpec == null) {
// if RmParser fails to extract container resource spec from logs, we will
// statically set
// it to be <1core, 1GB>
containerSpec = Resource.newInstance(1024, 1);
}
resourceSkyline.setContainerSpec(containerSpec);
for (final Map.Entry<String, Long> entry : rawStart.entrySet()) {
final long timeStart = entry.getValue();
final Long timeEnd = rawEnd.get(entry.getKey());
if (timeEnd == null) {
LOGGER.warn("container release time not found for {}.", entry.getKey());
} else {
final ReservationInterval riAdd =
new ReservationInterval((timeStart - jobSubmissionTime) / 1000,
(timeEnd - jobSubmissionTime) / 1000);
resourceSkyline.getSkylineList().addInterval(riAdd, containerSpec);
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertWithAutoGeneratedId | /**
* Tests that an insert from a select works when no defaults are supplied.
*/
@Test
public void testInsertWithAutoGeneratedId() {
SelectStatement sourceStmt = new SelectStatement(new FieldReference("version"),
new FieldReference(STRING_FIELD))
.from(new TableReference(OTHER_TABLE));
InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE))
.fields(new FieldReference("version"),
new FieldReference(STRING_FIELD))
.from(sourceStmt);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertSQLEquals("Insert from a select with no default for id", expectedAutoGenerateIdStatement(), sql);
} | 3.68 |
morf_AddIndex_getNewIndex | /**
* @return The new index.
*/
public Index getNewIndex() {
return newIndex;
} | 3.68 |
flink_ResourceCounter_withResources | /**
* Creates a resource counter with the specified set of resources.
*
* @param resources resources with which to initialize the resource counter
* @return ResourceCounter which contains the specified set of resources
*/
public static ResourceCounter withResources(Map<ResourceProfile, Integer> resources) {
return new ResourceCounter(new HashMap<>(resources));
} | 3.68 |
hadoop_Retryer_updateStatus | /**
* Returns true if status update interval has been reached.
*
* @return true if status update interval has been reached.
*/
public boolean updateStatus() {
return (this.delay > 0) && this.delay % this.statusUpdateInterval == 0;
} | 3.68 |
dubbo_MetadataInfo_getServiceInfo | /**
* Get service info of an interface with specified group, version and protocol
* @param protocolServiceKey key is of format '{group}/{interface name}:{version}:{protocol}'
* @return the specific service info related to protocolServiceKey
*/
public ServiceInfo getServiceInfo(String protocolServiceKey) {
return services.get(protocolServiceKey);
} | 3.68 |
flink_UserDefinedFunctionHelper_instantiateFunction | /**
* Instantiates a {@link UserDefinedFunction} assuming a JVM function with default constructor.
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static UserDefinedFunction instantiateFunction(Class<?> functionClass) {
if (!UserDefinedFunction.class.isAssignableFrom(functionClass)) {
throw new ValidationException(
String.format(
"Function '%s' does not extend from '%s'.",
functionClass.getName(), UserDefinedFunction.class.getName()));
}
validateClass((Class) functionClass, true);
try {
return (UserDefinedFunction) functionClass.newInstance();
} catch (Exception e) {
throw new ValidationException(
String.format(
"Cannot instantiate user-defined function class '%s'.",
functionClass.getName()),
e);
}
} | 3.68 |
morf_SchemaValidator_equals | /**
* {@inheritDoc}
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (!(obj instanceof IndexSignature)) return false;
IndexSignature other = (IndexSignature) obj;
return Objects.equals(other.index.columnNames(), index.columnNames()) && other.index.isUnique() == index.isUnique();
} | 3.68 |
dubbo_StringUtils_hasText | /**
* Check the cs String whether contains non whitespace characters.
*
* @param cs
* @return
*/
public static boolean hasText(CharSequence cs) {
return !isBlank(cs);
} | 3.68 |
hmily_HmilyTccTransactionExecutor_participantConfirm | /**
* Participant confirm object.
*
* @param hmilyParticipantList the hmily participant list
* @param selfParticipantId the self participant id
* @return the object
*/
public Object participantConfirm(final List<HmilyParticipant> hmilyParticipantList, final Long selfParticipantId) {
if (CollectionUtils.isEmpty(hmilyParticipantList)) {
return null;
}
List<Object> results = Lists.newArrayListWithCapacity(hmilyParticipantList.size());
for (HmilyParticipant hmilyParticipant : hmilyParticipantList) {
try {
if (hmilyParticipant.getParticipantId().equals(selfParticipantId)) {
final Object result = HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.LOCAL, hmilyParticipant);
results.add(result);
HmilyRepositoryStorage.removeHmilyParticipant(hmilyParticipant);
} else {
final Object result = HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.RPC, hmilyParticipant);
results.add(result);
}
} catch (Throwable throwable) {
throw new HmilyRuntimeException(" hmilyParticipant execute confirm exception:" + hmilyParticipant.toString());
} finally {
HmilyContextHolder.remove();
}
}
HmilyParticipantCacheManager.getInstance().removeByKey(selfParticipantId);
return results.get(0);
} | 3.68 |
hbase_RpcServer_unsetCurrentCall | /**
* Used by {@link org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore}. For
* master's rpc call, it may generate new procedure and mutate the region which store procedure.
* There are some check about rpc when mutate region, such as rpc timeout check. So unset the rpc
* call to avoid the rpc check.
* @return the currently ongoing rpc call
*/
public static Optional<RpcCall> unsetCurrentCall() {
Optional<RpcCall> rpcCall = getCurrentCall();
CurCall.set(null);
return rpcCall;
} | 3.68 |
flink_ParameterTool_get | /**
* Returns the String value for the given key. If the key does not exist it will return null.
*/
@Override
public String get(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
return data.get(key);
} | 3.68 |
framework_VColorPickerArea_isOpen | /**
* Check the popup's marked state.
*
* @return true if the popup has been marked being open, false otherwise.
*/
public boolean isOpen() {
return isOpen;
} | 3.68 |
hadoop_RouterRMAdminService_init | /**
* Initializes the wrapper with the specified parameters.
*
* @param interceptor the first interceptor in the pipeline
*/
public synchronized void init(RMAdminRequestInterceptor interceptor) {
this.rootInterceptor = interceptor;
} | 3.68 |
morf_H2Dialect_getSqlForDateToYyyymmddHHmmss | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmddHHmmss(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForDateToYyyymmddHHmmss(Function function) {
String sqlExpression = getSqlFrom(function.getArguments().get(0));
// Example for CURRENT_TIMESTAMP() -> 2015-06-23 11:25:08.11
return String.format("CAST(SUBSTRING(%1$s, 1, 4)||SUBSTRING(%1$s, 6, 2)||SUBSTRING(%1$s, 9, 2)||SUBSTRING(%1$s, 12, 2)||SUBSTRING(%1$s, 15, 2)||SUBSTRING(%1$s, 18, 2) AS DECIMAL(14))", sqlExpression);
} | 3.68 |
shardingsphere-elasticjob_ExecutionService_getDisabledItems | /**
* Get disabled sharding items.
*
* @param items sharding items need to be got
* @return disabled sharding items
*/
public List<Integer> getDisabledItems(final List<Integer> items) {
List<Integer> result = new ArrayList<>(items.size());
for (int each : items) {
if (jobNodeStorage.isJobNodeExisted(ShardingNode.getDisabledNode(each))) {
result.add(each);
}
}
return result;
} | 3.68 |
framework_ServiceInitEvent_getAddedConnectorIdGenerators | /**
* Gets an unmodifiable list of all connector id generators that have been
* added for the service.
*
* @return the current list of added connector id generators
*
* @since 8.1
*/
public List<ConnectorIdGenerator> getAddedConnectorIdGenerators() {
return Collections.unmodifiableList(addedConnectorIdGenerators);
} | 3.68 |
hbase_SpaceQuotaRefresherChore_checkQuotaTableExists | /**
* Checks if hbase:quota exists in hbase:meta
* @return true if hbase:quota table is in meta, else returns false.
* @throws IOException throws IOException
*/
boolean checkQuotaTableExists() throws IOException {
try (Admin admin = getConnection().getAdmin()) {
return admin.tableExists(QuotaUtil.QUOTA_TABLE_NAME);
}
} | 3.68 |
rocketmq-connect_BufferedRecords_executeUpdates | /**
* @return an optional count of all updated rows or an empty optional if no info is available
*/
private Optional<Long> executeUpdates() throws DorisException {
Optional<Long> count = Optional.empty();
if (updatePreparedRecords.isEmpty()) {
return count;
}
for (ConnectRecord record : updatePreparedRecords) {
String jsonData = DorisDialect.convertToUpdateJsonString(record);
try {
log.info("[executeUpdates]" + jsonData);
loader.loadJson(jsonData, record.getSchema().getName());
} catch (DorisException e) {
log.error("executeUpdates failed");
throw e;
} catch (Exception e) {
throw new DorisException("doris error");
}
count = count.isPresent()
? count.map(total -> total + 1)
: Optional.of(1L);
}
return count;
} | 3.68 |
framework_Overlay_getOwner | /**
* Get owner (Widget that made this Overlay, not the layout parent) of
* Overlay.
*
* @return Owner (creator) or null if not defined
*/
public Widget getOwner() {
return owner;
} | 3.68 |
flink_ExtractionUtils_getStructuredField | /**
* Returns the field of a structured type. The logic is as broad as possible to support both
* Java and Scala in different flavors.
*/
public static Field getStructuredField(Class<?> clazz, String fieldName) {
final String normalizedFieldName = fieldName.toUpperCase();
final List<Field> fields = collectStructuredFields(clazz);
for (Field field : fields) {
if (field.getName().toUpperCase().equals(normalizedFieldName)) {
return field;
}
}
throw extractionError(
"Could not find a field named '%s' in class '%s' for structured type.",
fieldName, clazz.getName());
} | 3.68 |
hbase_HBaseRpcController_setTableName | /** Sets Region's table name. */
default void setTableName(TableName tableName) {
} | 3.68 |
flink_HiveParserRexNodeConverter_convertIN | // converts IN for constant value list, RexSubQuery won't get here
private RexNode convertIN(ExprNodeGenericFuncDesc func) throws SemanticException {
List<RexNode> childRexNodes = new ArrayList<>();
for (ExprNodeDesc childExpr : func.getChildren()) {
childRexNodes.add(convert(childExpr));
}
if (funcConverter.hasOverloadedOp(
HiveParserIN.INSTANCE, SqlFunctionCategory.USER_DEFINED_FUNCTION)) {
return cluster.getRexBuilder().makeCall(HiveParserIN.INSTANCE, childRexNodes);
} else {
// hive module is not loaded, calcite converts IN using either OR or inline table
// (LogicalValues), we do the same here but only support OR for now
RexNode leftKey = childRexNodes.get(0);
Preconditions.checkState(
leftKey instanceof RexInputRef,
"Expecting LHS key of IN to be a RexInputRef, actually got " + leftKey);
final List<RexNode> comparisons = new ArrayList<>();
for (int i = 1; i < childRexNodes.size(); i++) {
comparisons.add(
cluster.getRexBuilder()
.makeCall(
SqlStdOperatorTable.EQUALS, leftKey, childRexNodes.get(i)));
}
return RexUtil.composeDisjunction(cluster.getRexBuilder(), comparisons, true);
}
} | 3.68 |
hadoop_PlacementConstraintManager_validateConstraint | /**
* Validate a placement constraint and the set of allocation tags that will
* enable it.
*
* @param sourceTags the associated allocation tags
* @param placementConstraint the constraint
* @return true if constraint and tags are valid
*/
default boolean validateConstraint(Set<String> sourceTags,
PlacementConstraint placementConstraint) {
return true;
} | 3.68 |
hbase_SimpleRpcServer_setSocketSendBufSize | /**
* Sets the socket buffer size used for responding to RPCs.
* @param size send size
*/
@Override
public void setSocketSendBufSize(int size) {
this.socketSendBufferSize = size;
} | 3.68 |
flink_MutableHashTable_moveToNextBucket | /**
* Move to next bucket, return true while move to a on heap bucket, return false while move
* to a spilled bucket or there is no more bucket.
*/
private boolean moveToNextBucket() {
scanCount++;
if (scanCount > totalBucketNumber - 1) {
return false;
}
// move to next bucket, update all the current bucket status with new bucket
// information.
final int bucketArrayPos = scanCount >> this.bucketsPerSegmentBits;
final int currentBucketInSegmentOffset =
(scanCount & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
MemorySegment currentBucket = this.buckets[bucketArrayPos];
final int partitionNumber =
currentBucket.get(currentBucketInSegmentOffset + HEADER_PARTITION_OFFSET);
final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
if (p.isInMemory()) {
setBucket(currentBucket, p.overflowSegments, p, currentBucketInSegmentOffset);
return true;
} else {
return false;
}
} | 3.68 |
morf_AbstractSqlDialectTest_testCreateTableStatements | /**
* Tests the SQL for creating tables.
*/
@SuppressWarnings("unchecked")
@Test
public void testCreateTableStatements() {
Table table = metadata.getTable(TEST_TABLE);
Table alternate = metadata.getTable(ALTERNATE_TABLE);
Table nonNull = metadata.getTable(NON_NULL_TABLE);
Table compositePrimaryKey = metadata.getTable(COMPOSITE_PRIMARY_KEY_TABLE);
Table autoNumber = metadata.getTable(AUTO_NUMBER_TABLE);
compareStatements(
expectedCreateTableStatements(),
testDialect.tableDeploymentStatements(table),
testDialect.tableDeploymentStatements(alternate),
testDialect.tableDeploymentStatements(nonNull),
testDialect.tableDeploymentStatements(compositePrimaryKey),
testDialect.tableDeploymentStatements(autoNumber)
);
} | 3.68 |
hadoop_AllocateRequest_schedulingRequests | /**
* Set the <code>schedulingRequests</code> of the request.
* @see AllocateRequest#setSchedulingRequests(List)
* @param schedulingRequests <code>SchedulingRequest</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Unstable
public AllocateRequestBuilder schedulingRequests(
List<SchedulingRequest> schedulingRequests) {
allocateRequest.setSchedulingRequests(schedulingRequests);
return this;
} | 3.68 |
hudi_HoodieWriteCommitKafkaCallbackConfig_setCallbackKafkaConfigIfNeeded | /**
* Set default value for {@link HoodieWriteCommitKafkaCallbackConfig} if needed.
*/
public static void setCallbackKafkaConfigIfNeeded(HoodieConfig config) {
config.setDefaultValue(ACKS);
config.setDefaultValue(RETRIES);
} | 3.68 |
flink_SourceBuilder_fromFormat | /**
* Creates a new source that is bounded.
*
* @param env The stream execution environment.
* @param inputFormat The input source to consume.
* @param typeInfo The type of the output.
* @param <OUT> The output type.
* @return A source that is bounded.
*/
public static <OUT> DataStreamSource<OUT> fromFormat(
StreamExecutionEnvironment env,
InputFormat<OUT, ?> inputFormat,
TypeInformation<OUT> typeInfo) {
InputFormatSourceFunction<OUT> function =
new InputFormatSourceFunction<>(inputFormat, typeInfo);
env.clean(function);
final StreamSource<OUT, ?> sourceOperator = new StreamSource<>(function);
return new DataStreamSource<>(
env, typeInfo, sourceOperator, true, SOURCE_NAME, Boundedness.BOUNDED);
} | 3.68 |
hibernate-validator_ConstraintAnnotationVisitor_visitExecutableAsMethod | /**
* <p>
* Checks whether the given annotations are correctly specified at the given
* method. The following checks are performed:
* </p>
* <ul>
* <li>
* Constraint annotations may only be given at non-static, JavaBeans getter
* methods which's return type is supported by the constraints.</li>
* <li>
* The {@code @Valid} annotation may only be given at non-static,
* non-primitive JavaBeans getter methods.</li>
* </ul>
*/
@Override
public Void visitExecutableAsMethod(
ExecutableElement method,
List<AnnotationMirror> mirrors) {
checkConstraints( method, mirrors );
return null;
} | 3.68 |
AreaShop_GeneralRegion_notifyAndUpdate | /**
* Broadcast the given event and update the region status.
* @param event The update event that should be broadcasted
*/
public void notifyAndUpdate(NotifyRegionEvent event) {
Bukkit.getPluginManager().callEvent(event);
update();
} | 3.68 |
flink_Transformation_getName | /** Returns the name of this {@code Transformation}. */
public String getName() {
return name;
} | 3.68 |
hadoop_VersionInfoMojo_computeMD5 | /**
* Computes and returns an MD5 checksum of the contents of all files in the
* input Maven FileSet.
*
* @return String containing hexadecimal representation of MD5 checksum
* @throws Exception if there is any error while computing the MD5 checksum
*/
private String computeMD5() throws Exception {
List<File> files = FileSetUtils.convertFileSetToFiles(source);
// File order of MD5 calculation is significant. Sorting is done on
// unix-format names, case-folded, in order to get a platform-independent
// sort and calculate the same MD5 on all platforms.
Collections.sort(files, new MD5Comparator());
byte[] md5 = computeMD5(files);
String md5str = byteArrayToString(md5);
getLog().info("Computed MD5: " + md5str);
return md5str;
} | 3.68 |
zxing_RSSExpandedReader_isPartialRow | // Returns true when one of the rows already contains all the pairs
private static boolean isPartialRow(Iterable<ExpandedPair> pairs, Iterable<ExpandedRow> rows) {
for (ExpandedRow r : rows) {
boolean allFound = true;
for (ExpandedPair p : pairs) {
boolean found = false;
for (ExpandedPair pp : r.getPairs()) {
if (p.equals(pp)) {
found = true;
break;
}
}
if (!found) {
allFound = false;
break;
}
}
if (allFound) {
// the row 'r' contain all the pairs from 'pairs'
return true;
}
}
return false;
} | 3.68 |
flink_OperatorTransformation_bootstrapWith | /**
* Create a new {@link OneInputStateTransformation} from a {@link DataStream}.
*
* @param stream A data stream of elements.
* @param <T> The type of the input.
* @return A {@link OneInputStateTransformation}.
*/
public static <T> OneInputStateTransformation<T> bootstrapWith(DataStream<T> stream) {
return new OneInputStateTransformation<>(stream);
} | 3.68 |
framework_AbstractDateField_setDateOutOfRangeMessage | /**
* Sets the current error message if the range validation fails.
*
* @param dateOutOfRangeMessage
* - Localizable message which is shown when value (the date) is
* set outside allowed range
*/
public void setDateOutOfRangeMessage(String dateOutOfRangeMessage) {
this.dateOutOfRangeMessage = dateOutOfRangeMessage;
} | 3.68 |
querydsl_NumberExpression_gt | /**
* Create a {@code this > right} expression
*
* @param <A>
* @param right rhs of the comparison
* @return {@code this > right}
* @see java.lang.Comparable#compareTo(Object)
*/
public final <A extends Number & Comparable<?>> BooleanExpression gt(Expression<A> right) {
return Expressions.booleanOperation(Ops.GT, mixin, right);
} | 3.68 |
hadoop_AuthenticationToken_isExpired | /**
* Returns true if the token has expired.
*
* @return true if the token has expired.
*/
public boolean isExpired() {
return super.isExpired();
} | 3.68 |
hbase_SingleColumnValueFilter_getQualifier | /** Returns the qualifier */
public byte[] getQualifier() {
return columnQualifier;
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionMaximum | /**
* Add a new evaluator to the maximum statistics.
* @param key key of this statistic
* @param eval evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionMaximum(String key,
ToLongFunction<String> eval) {
activeInstance().addMaximumFunction(key, eval::applyAsLong);
return this;
} | 3.68 |
framework_DragSourceExtension_getDragData | /**
* Get server side drag data. This data is available in the drop event and
* can be used to transfer data between drag source and drop target if they
* are in the same UI.
*
* @return Server side drag data if set, otherwise {@literal null}.
*/
public Object getDragData() {
return dragData;
} | 3.68 |
hadoop_BCFile_getInputStream | /**
* Get the output stream for BlockAppender's consumption.
*
* @return the output stream suitable for writing block data.
*/
public InputStream getInputStream() {
return in;
} | 3.68 |
flink_InMemoryPartition_isCompacted | /** @return true if garbage exists in partition */
public boolean isCompacted() {
return this.compacted;
} | 3.68 |
hadoop_Validate_checkWithinRange | /**
* Validates that the given value is within the given range of values.
* @param value the value to check.
* @param valueName the name of the argument.
* @param minValueInclusive inclusive lower limit for the value.
* @param maxValueInclusive inclusive upper limit for the value.
*/
public static void checkWithinRange(
double value,
String valueName,
double minValueInclusive,
double maxValueInclusive) {
checkArgument(
(value >= minValueInclusive) && (value <= maxValueInclusive),
"'%s' (%s) must be within the range [%s, %s].",
valueName,
value,
minValueInclusive,
maxValueInclusive);
} | 3.68 |
hbase_HRegion_cacheSkipWALMutationForRegionReplication | /**
* Here is for HBASE-26993,in order to make the new framework for region replication could work
* for SKIP_WAL, we save the {@link Mutation} which {@link Mutation#getDurability} is
* {@link Durability#SKIP_WAL} in miniBatchOp.
*/
@Override
protected void cacheSkipWALMutationForRegionReplication(
MiniBatchOperationInProgress<Mutation> miniBatchOp,
List<Pair<NonceKey, WALEdit>> nonceKeyAndWALEdits, Map<byte[], List<Cell>> familyCellMap) {
if (!this.regionReplicateEnable) {
return;
}
WALEdit walEditForReplicateIfExistsSkipWAL =
miniBatchOp.getWalEditForReplicateIfExistsSkipWAL();
/**
* When there is a SKIP_WAL {@link Mutation},we create a new {@link WALEdit} for replicating
* to region replica,first we fill the existing {@link WALEdit} to it and then add the
* {@link Mutation} which is SKIP_WAL to it.
*/
if (walEditForReplicateIfExistsSkipWAL == null) {
walEditForReplicateIfExistsSkipWAL =
this.createWALEditForReplicateSkipWAL(miniBatchOp, nonceKeyAndWALEdits);
miniBatchOp.setWalEditForReplicateIfExistsSkipWAL(walEditForReplicateIfExistsSkipWAL);
}
walEditForReplicateIfExistsSkipWAL.add(familyCellMap);
} | 3.68 |
hudi_ClientIds_getHeartbeatFolderPath | // -------------------------------------------------------------------------
// Utilities
// -------------------------------------------------------------------------
private String getHeartbeatFolderPath(String basePath) {
return basePath + Path.SEPARATOR + AUXILIARYFOLDER_NAME + Path.SEPARATOR + HEARTBEAT_FOLDER_NAME;
} | 3.68 |
druid_MySqlStatementParser_parserParameters | /**
* parse create procedure parameters
*
* @param parameters
*/
private void parserParameters(List<SQLParameter> parameters, SQLObject parent) {
if (lexer.token() == Token.RPAREN) {
return;
}
for (; ; ) {
SQLParameter parameter = new SQLParameter();
if (lexer.token() == Token.CURSOR) {
lexer.nextToken();
parameter.setName(this.exprParser.name());
accept(Token.IS);
SQLSelect select = this.createSQLSelectParser().select();
SQLDataTypeImpl dataType = new SQLDataTypeImpl();
dataType.setName("CURSOR");
parameter.setDataType(dataType);
parameter.setDefaultValue(new SQLQueryExpr(select));
} else if (lexer.token() == Token.IN || lexer.token() == Token.OUT || lexer.token() == Token.INOUT) {
if (lexer.token() == Token.IN) {
parameter.setParamType(ParameterType.IN);
} else if (lexer.token() == Token.OUT) {
parameter.setParamType(ParameterType.OUT);
} else if (lexer.token() == Token.INOUT) {
parameter.setParamType(ParameterType.INOUT);
}
lexer.nextToken();
parameter.setName(this.exprParser.name());
parameter.setDataType(this.exprParser.parseDataType());
} else {
// default parameter type is in
parameter.setParamType(ParameterType.DEFAULT);
parameter.setName(this.exprParser.name());
parameter.setDataType(this.exprParser.parseDataType());
if (lexer.token() == Token.COLONEQ) {
lexer.nextToken();
parameter.setDefaultValue(this.exprParser.expr());
}
}
parameters.add(parameter);
if (lexer.token() == Token.COMMA || lexer.token() == Token.SEMI) {
lexer.nextToken();
}
if (lexer.token() != Token.BEGIN && lexer.token() != Token.RPAREN) {
continue;
}
break;
}
} | 3.68 |
framework_Form_setImmediate | /**
* Setting the form to be immediate also sets all the fields of the form to
* the same state.
*/
@Override
public void setImmediate(boolean immediate) {
super.setImmediate(immediate);
for (Field<?> f : fields.values()) {
if (f instanceof AbstractLegacyComponent) {
((AbstractLegacyComponent) f).setImmediate(immediate);
}
}
} | 3.68 |
morf_TableNameDecorator_isTemporary | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Table#isTemporary()
*/
@Override
public boolean isTemporary() {
return false;
} | 3.68 |
framework_Calendar_getLastDateForWeek | /**
* Gets a date that is last day in the week that target given date belongs
* to.
*
* @param date
* Target date
* @return Date that is last date in same week that given date is.
*/
protected Date getLastDateForWeek(Date date) {
currentCalendar.setTime(date);
currentCalendar.add(java.util.Calendar.DATE, 1);
int firstDayOfWeek = currentCalendar.getFirstDayOfWeek();
// Roll to weeks last day using firstdayofweek. Roll until FDofW is
// found and then roll back one day.
while (firstDayOfWeek != currentCalendar
.get(java.util.Calendar.DAY_OF_WEEK)) {
currentCalendar.add(java.util.Calendar.DATE, 1);
}
currentCalendar.add(java.util.Calendar.DATE, -1);
return currentCalendar.getTime();
} | 3.68 |
hadoop_ReferenceCountMap_put | /**
* Add the reference. If the instance already present, just increase the
* reference count.
*
* @param key Key to put in reference map
* @return Referenced instance
*/
public E put(E key) {
E value = referenceMap.putIfAbsent(key, key);
if (value == null) {
value = key;
}
value.incrementAndGetRefCount();
return value;
} | 3.68 |
framework_BootstrapResponse_setUriResolver | /**
* Sets the URI resolver used in the bootstrap process.
*
* @param uriResolver
* the uri resolver which is used
* @since 8.1
*/
public void setUriResolver(VaadinUriResolver uriResolver) {
assert this.uriResolver == null : "URI resolver should never be changed";
assert uriResolver != null : "URI resolver should never be null";
this.uriResolver = uriResolver;
} | 3.68 |
flink_JobExceptionsInfoWithHistory_equals | // hashCode and equals are necessary for the test classes deriving from
// RestResponseMarshallingTestBase
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass() || !super.equals(o)) {
return false;
}
RootExceptionInfo that = (RootExceptionInfo) o;
return getConcurrentExceptions().equals(that.getConcurrentExceptions());
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertWithNullLiterals | /**
* Test that an Insert statement is generated with a null value
*/
@Test
public void testInsertWithNullLiterals() {
InsertStatement stmt = new InsertStatement().into(new TableReference(ALTERNATE_TABLE))
.fields(
literal(1).as("id"),
literal(0).as("version"),
new NullFieldLiteral().as(STRING_FIELD)
);
String expectedSql = "INSERT INTO " + tableName(ALTERNATE_TABLE) + " (id, version, stringField) VALUES (1, 0, NULL)";
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertEquals("Insert with null literals", ImmutableList.of(expectedSql).toString().toLowerCase(), sql.toString().replaceAll("/\\*.*?\\*/ ", "").toLowerCase());
} | 3.68 |
morf_SqlDialect_getSubstringFunctionName | /**
* Gets the function name required to perform a substring command.
* <p>
* The default is provided here and should be overridden in child classes as
* neccessary.
* </p>
*
* @return The substring function name.
*/
protected String getSubstringFunctionName() {
return "SUBSTRING";
} | 3.68 |
graphhopper_TourStrategy_slightlyModifyDistance | /**
* Modifies the Distance up to +-10%
*/
protected double slightlyModifyDistance(double distance) {
double distanceModification = random.nextDouble() * .1 * distance;
if (random.nextBoolean())
distanceModification = -distanceModification;
return distance + distanceModification;
} | 3.68 |
framework_SortOrderBuilder_build | /**
* Returns an unmodifiable copy of the list of current sort orders in this
* sort builder.
*
* @return an unmodifiable sort order list
*/
public final List<T> build() {
return Collections.unmodifiableList(new ArrayList<>(sortOrders));
} | 3.68 |
flink_PackagingTestUtils_assertJarContainsOnlyFilesMatching | /**
* Verifies that all files in the jar match one of the provided allow strings.
*
* <p>An allow item ending on a {@code "/"} is treated as an allowed parent directory.
* Otherwise, it is treated as an allowed file.
*
* <p>For example, given a jar containing a file {@code META-INF/NOTICES}:
*
* <p>These would pass:
*
* <ul>
* <li>{@code "META-INF/"}
* <li>{@code "META-INF/NOTICES"}
* </ul>
*
* <p>These would fail:
*
* <ul>
* <li>{@code "META-INF"}
* <li>{@code "META-INF/NOTICE"}
* <li>{@code "META-INF/NOTICES/"}
* </ul>
*/
public static void assertJarContainsOnlyFilesMatching(
Path jarPath, Collection<String> allowedPaths) throws Exception {
final URI jar = jarPath.toUri();
try (final FileSystem fileSystem =
FileSystems.newFileSystem(
new URI("jar:file", jar.getHost(), jar.getPath(), jar.getFragment()),
Collections.emptyMap())) {
try (Stream<Path> walk = Files.walk(fileSystem.getPath("/"))) {
walk.filter(file -> !Files.isDirectory(file))
.map(file -> file.toAbsolutePath().toString())
.map(file -> file.startsWith("/") ? file.substring(1) : file)
.forEach(
file ->
assertThat(allowedPaths)
.as("Bad file in JAR: %s", file)
.anySatisfy(
allowedPath -> {
if (allowedPath.endsWith("/")) {
assertThat(file)
.startsWith(allowedPath);
} else {
assertThat(file)
.isEqualTo(allowedPath);
}
}));
}
}
} | 3.68 |
hadoop_CachingGetSpaceUsed_incDfsUsed | /**
* Increment the cached value of used space.
*
* @param value dfs used value.
*/
public void incDfsUsed(long value) {
used.addAndGet(value);
} | 3.68 |
framework_VCalendar_getDateTimeFormat | /**
* Get the date and time format to format the dates (includes both date and
* time).
*
* @return
*/
public DateTimeFormat getDateTimeFormat() {
return dateformat_datetime;
} | 3.68 |
flink_SourceOperator_initReader | /**
* Initializes the reader. The code from this method should ideally happen in the constructor or
* in the operator factory even. It has to happen here at a slightly later stage, because of the
* lazy metric initialization.
*
* <p>Calling this method explicitly is an optional way to have the reader initialization a bit
* earlier than in open(), as needed by the {@link
* org.apache.flink.streaming.runtime.tasks.SourceOperatorStreamTask}
*
* <p>This code should move to the constructor once the metric groups are available at task
* setup time.
*/
public void initReader() throws Exception {
if (sourceReader != null) {
return;
}
final int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
final SourceReaderContext context =
new SourceReaderContext() {
@Override
public SourceReaderMetricGroup metricGroup() {
return sourceMetricGroup;
}
@Override
public Configuration getConfiguration() {
return configuration;
}
@Override
public String getLocalHostName() {
return localHostname;
}
@Override
public int getIndexOfSubtask() {
return subtaskIndex;
}
@Override
public void sendSplitRequest() {
operatorEventGateway.sendEventToCoordinator(
new RequestSplitEvent(getLocalHostName()));
}
@Override
public void sendSourceEventToCoordinator(SourceEvent event) {
operatorEventGateway.sendEventToCoordinator(new SourceEventWrapper(event));
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return new UserCodeClassLoader() {
@Override
public ClassLoader asClassLoader() {
return getRuntimeContext().getUserCodeClassLoader();
}
@Override
public void registerReleaseHookIfAbsent(
String releaseHookName, Runnable releaseHook) {
getRuntimeContext()
.registerUserCodeClassLoaderReleaseHookIfAbsent(
releaseHookName, releaseHook);
}
};
}
@Override
public int currentParallelism() {
return getRuntimeContext().getNumberOfParallelSubtasks();
}
};
sourceReader = readerFactory.apply(context);
} | 3.68 |
framework_SortOrderBuilder_thenDesc | /**
* Appends sorting with descending sort direction.
*
* @param by
* the object to sort by
* @return this sort builder
*/
public SortOrderBuilder<T, V> thenDesc(V by) {
return append(createSortOrder(by, SortDirection.DESCENDING));
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_isActiveAndEnabled | /**
* Returns true is the subcluster request is both active and enabled.
*/
private boolean isActiveAndEnabled(SubClusterId targetId) {
if (targetId == null) {
return false;
} else {
return getActiveAndEnabledSC().contains(targetId);
}
} | 3.68 |
flink_MetricQueryService_createMetricQueryService | /**
* Starts the MetricQueryService actor in the given actor system.
*
* @param rpcService The rpcService running the MetricQueryService
* @param resourceID resource ID to disambiguate the actor name
* @return actor reference to the MetricQueryService
*/
public static MetricQueryService createMetricQueryService(
RpcService rpcService, ResourceID resourceID, long maximumFrameSize) {
String endpointId =
resourceID == null
? METRIC_QUERY_SERVICE_NAME
: METRIC_QUERY_SERVICE_NAME + "_" + resourceID.getResourceIdString();
return new MetricQueryService(rpcService, endpointId, maximumFrameSize);
} | 3.68 |
hbase_MasterObserver_postMoveServers | /**
* Called after servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param servers set of servers to move
* @param targetGroup name of group
*/
default void postMoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<Address> servers, String targetGroup) throws IOException {
} | 3.68 |
querydsl_Expressions_comparableTemplate | /**
* Create a new Template expression
*
* @param cl type of expression
* @param template template
* @param args template parameters
* @return template expression
*/
public static <T extends Comparable<?>> ComparableTemplate<T> comparableTemplate(Class<? extends T> cl, Template template, List<?> args) {
return new ComparableTemplate<T>(cl, template, args);
} | 3.68 |
flink_FactoryUtil_createTableFactoryHelper | /**
* Creates a utility that helps in discovering formats, merging options with {@link
* DynamicTableFactory.Context#getEnrichmentOptions()} and validating them all for a {@link
* DynamicTableFactory}.
*
* <p>The following example sketches the usage:
*
* <pre>{@code
* // in createDynamicTableSource()
* helper = FactoryUtil.createTableFactoryHelper(this, context);
*
* keyFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, KEY_FORMAT);
* valueFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, VALUE_FORMAT);
*
* helper.validate();
*
* ... // construct connector with discovered formats
* }</pre>
*
* <p>Note: The format option parameter of {@link
* TableFactoryHelper#discoverEncodingFormat(Class, ConfigOption)} and {@link
* TableFactoryHelper#discoverDecodingFormat(Class, ConfigOption)} must be {@link #FORMAT} or
* end with {@link #FORMAT_SUFFIX}. The discovery logic will replace 'format' with the factory
* identifier value as the format prefix. For example, assuming the identifier is 'json', if the
* format option key is 'format', then the format prefix is 'json.'. If the format option key is
* 'value.format', then the format prefix is 'value.json'. The format prefix is used to project
* the options for the format factory.
*
* <p>Note: When created, this utility merges the options from {@link
* DynamicTableFactory.Context#getEnrichmentOptions()} using {@link
* DynamicTableFactory#forwardOptions()}. When invoking {@link TableFactoryHelper#validate()},
* this utility checks for left-over options in the final step.
*/
public static TableFactoryHelper createTableFactoryHelper(
DynamicTableFactory factory, DynamicTableFactory.Context context) {
return new TableFactoryHelper(factory, context);
} | 3.68 |
hudi_HoodieMetadataTableValidator_validateLatestFileSlices | /**
* Compare getLatestFileSlices between metadata table and fileSystem.
*/
private void validateLatestFileSlices(
HoodieMetadataValidationContext metadataTableBasedContext,
HoodieMetadataValidationContext fsBasedContext,
String partitionPath,
Set<String> baseDataFilesForCleaning) {
List<FileSlice> latestFileSlicesFromMetadataTable;
List<FileSlice> latestFileSlicesFromFS;
if (!baseDataFilesForCleaning.isEmpty()) {
latestFileSlicesFromMetadataTable = filterFileSliceBasedOnInflightCleaning(metadataTableBasedContext.getSortedLatestFileSliceList(partitionPath), baseDataFilesForCleaning);
latestFileSlicesFromFS = filterFileSliceBasedOnInflightCleaning(fsBasedContext.getSortedLatestFileSliceList(partitionPath), baseDataFilesForCleaning);
} else {
latestFileSlicesFromMetadataTable = metadataTableBasedContext.getSortedLatestFileSliceList(partitionPath);
latestFileSlicesFromFS = fsBasedContext.getSortedLatestFileSliceList(partitionPath);
}
LOG.debug("Latest file list from metadata: " + latestFileSlicesFromMetadataTable + ". For partition " + partitionPath);
LOG.debug("Latest file list from direct listing: " + latestFileSlicesFromFS + ". For partition " + partitionPath);
validateFileSlices(
latestFileSlicesFromMetadataTable, latestFileSlicesFromFS, partitionPath,
fsBasedContext.getMetaClient(), "latest file slices");
} | 3.68 |
flink_TemplateUtils_findResultOnlyTemplates | /** Find a template that only specifies a result. */
static Set<FunctionResultTemplate> findResultOnlyTemplates(
Set<FunctionTemplate> functionTemplates,
Function<FunctionTemplate, FunctionResultTemplate> accessor) {
return functionTemplates.stream()
.filter(t -> t.getSignatureTemplate() == null && accessor.apply(t) != null)
.map(accessor)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | 3.68 |
graphhopper_Helper_parseList | /**
* parses a string like [a,b,c]
*/
public static List<String> parseList(String listStr) {
String trimmed = listStr.trim();
if (trimmed.length() < 2)
return Collections.emptyList();
String[] items = trimmed.substring(1, trimmed.length() - 1).split(",");
List<String> result = new ArrayList<>();
for (String item : items) {
String s = item.trim();
if (!s.isEmpty()) {
result.add(s);
}
}
return result;
} | 3.68 |
hadoop_FSBuilder_opt | /**
* Pass an optional double parameter for the Builder.
* This parameter is converted to a long and passed
* to {@link #optLong(String, long)} -all
* decimal precision is lost.
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
* @deprecated use {@link #optDouble(String, double)}
*/
@Deprecated
default B opt(@Nonnull String key, double value) {
return optLong(key, (long) value);
} | 3.68 |
hbase_Append_setTimeRange | /**
* Sets the TimeRange to be used on the Get for this append.
* <p>
* This is useful for when you have counters that only last for specific periods of time (ie.
* counters that are partitioned by time). By setting the range of valid times for this append,
* you can potentially gain some performance with a more optimal Get operation. Be careful adding
* the time range to this class as you will update the old cell if the time range doesn't include
* the latest cells.
* <p>
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
*/
public Append setTimeRange(long minStamp, long maxStamp) {
tr = TimeRange.between(minStamp, maxStamp);
return this;
} | 3.68 |
dubbo_ReferenceBeanSupport_convertPropertyValues | /**
* Convert to raw props, without parsing nested config objects
*/
public static Map<String, Object> convertPropertyValues(MutablePropertyValues propertyValues) {
Map<String, Object> referenceProps = new LinkedHashMap<>();
for (PropertyValue propertyValue : propertyValues.getPropertyValueList()) {
String propertyName = propertyValue.getName();
Object value = propertyValue.getValue();
if (ReferenceAttributes.METHODS.equals(propertyName)
|| ReferenceAttributes.ARGUMENTS.equals(propertyName)) {
ManagedList managedList = (ManagedList) value;
List<Map<String, Object>> elementList = new ArrayList<>();
for (Object el : managedList) {
Map<String, Object> element = convertPropertyValues(
((BeanDefinitionHolder) el).getBeanDefinition().getPropertyValues());
element.remove(ReferenceAttributes.ID);
elementList.add(element);
}
value = elementList.toArray(new Object[0]);
} else if (ReferenceAttributes.PARAMETERS.equals(propertyName)) {
value = createParameterMap((ManagedMap) value);
}
// convert ref
if (value instanceof RuntimeBeanReference) {
RuntimeBeanReference beanReference = (RuntimeBeanReference) value;
value = beanReference.getBeanName();
}
if (value == null || (value instanceof String && StringUtils.isBlank((String) value))) {
// ignore null or blank string
continue;
}
referenceProps.put(propertyName, value);
}
return referenceProps;
} | 3.68 |
flink_ColumnSummary_containsNull | /** True if this column contains any null values. */
public boolean containsNull() {
return getNullCount() > 0L;
} | 3.68 |
hadoop_ContainerReapContext_getContainer | /**
* Get the container set for the context.
*
* @return the {@link Container} set in the context.
*/
public Container getContainer() {
return container;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.