name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_KeyValue_getShortMidpointKey | /**
* This is a HFile block index key optimization.
* @param leftKey byte array for left Key
* @param rightKey byte array for right Key
* @return 0 if equal, <0 if left smaller, >0 if right smaller
* @deprecated Since 0.99.2;
*/
@Deprecated
public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) {
if (rightKey == null) {
throw new IllegalArgumentException("rightKey can not be null");
}
if (leftKey == null) {
return Arrays.copyOf(rightKey, rightKey.length);
}
if (compareFlatKey(leftKey, rightKey) >= 0) {
throw new IllegalArgumentException("Unexpected input, leftKey:" + Bytes.toString(leftKey)
+ ", rightKey:" + Bytes.toString(rightKey));
}
short leftRowLength = Bytes.toShort(leftKey, 0);
short rightRowLength = Bytes.toShort(rightKey, 0);
int leftCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + leftRowLength;
int rightCommonLength = ROW_LENGTH_SIZE + FAMILY_LENGTH_SIZE + rightRowLength;
int leftCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + leftCommonLength;
int rightCommonLengthWithTSAndType = TIMESTAMP_TYPE_SIZE + rightCommonLength;
int leftColumnLength = leftKey.length - leftCommonLengthWithTSAndType;
int rightColumnLength = rightKey.length - rightCommonLengthWithTSAndType;
// rows are equal
if (
leftRowLength == rightRowLength && compareRows(leftKey, ROW_LENGTH_SIZE, leftRowLength,
rightKey, ROW_LENGTH_SIZE, rightRowLength) == 0
) {
// Compare family & qualifier together.
int comparison = Bytes.compareTo(leftKey, leftCommonLength, leftColumnLength, rightKey,
rightCommonLength, rightColumnLength);
// same with "row + family + qualifier", return rightKey directly
if (comparison == 0) {
return Arrays.copyOf(rightKey, rightKey.length);
}
// "family + qualifier" are different, generate a faked key per rightKey
byte[] newKey = Arrays.copyOf(rightKey, rightKey.length);
Bytes.putLong(newKey, rightKey.length - TIMESTAMP_TYPE_SIZE, HConstants.LATEST_TIMESTAMP);
Bytes.putByte(newKey, rightKey.length - TYPE_SIZE, Type.Maximum.getCode());
return newKey;
}
// rows are different
short minLength = leftRowLength < rightRowLength ? leftRowLength : rightRowLength;
short diffIdx = 0;
while (
diffIdx < minLength
&& leftKey[ROW_LENGTH_SIZE + diffIdx] == rightKey[ROW_LENGTH_SIZE + diffIdx]
) {
diffIdx++;
}
byte[] newRowKey = null;
if (diffIdx >= minLength) {
// leftKey's row is prefix of rightKey's.
newRowKey = new byte[diffIdx + 1];
System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1);
} else {
int diffByte = leftKey[ROW_LENGTH_SIZE + diffIdx];
if (
(0xff & diffByte) < 0xff && (diffByte + 1) < (rightKey[ROW_LENGTH_SIZE + diffIdx] & 0xff)
) {
newRowKey = new byte[diffIdx + 1];
System.arraycopy(leftKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx);
newRowKey[diffIdx] = (byte) (diffByte + 1);
} else {
newRowKey = new byte[diffIdx + 1];
System.arraycopy(rightKey, ROW_LENGTH_SIZE, newRowKey, 0, diffIdx + 1);
}
}
return new KeyValue(newRowKey, null, null, HConstants.LATEST_TIMESTAMP, Type.Maximum)
.getKey();
} | 3.68 |
flink_Router_toString | /** Returns visualized routing rules. */
@Override
public String toString() {
// Step 1/2: Dump routers and anyMethodRouter in order
int numRoutes = size();
List<String> methods = new ArrayList<String>(numRoutes);
List<String> patterns = new ArrayList<String>(numRoutes);
List<String> targets = new ArrayList<String>(numRoutes);
// For router
for (Entry<HttpMethod, MethodlessRouter<T>> e : routers.entrySet()) {
HttpMethod method = e.getKey();
MethodlessRouter<T> router = e.getValue();
aggregateRoutes(method.toString(), router.routes(), methods, patterns, targets);
}
// For anyMethodRouter
aggregateRoutes("*", anyMethodRouter.routes(), methods, patterns, targets);
// For notFound
if (notFound != null) {
methods.add("*");
patterns.add("*");
targets.add(targetToString(notFound));
}
// Step 2/2: Format the List into aligned columns: <method> <patterns> <target>
int maxLengthMethod = maxLength(methods);
int maxLengthPattern = maxLength(patterns);
String format = "%-" + maxLengthMethod + "s %-" + maxLengthPattern + "s %s\n";
int initialCapacity = (maxLengthMethod + 1 + maxLengthPattern + 1 + 20) * methods.size();
StringBuilder b = new StringBuilder(initialCapacity);
for (int i = 0; i < methods.size(); i++) {
String method = methods.get(i);
String pattern = patterns.get(i);
String target = targets.get(i);
b.append(String.format(format, method, pattern, target));
}
return b.toString();
} | 3.68 |
flink_HeapPriorityQueueSet_remove | /**
* In contrast to the superclass and to maintain set semantics, removal here is based on
* comparing the given element via {@link #equals(Object)}.
*
* @return <code>true</code> if the operation changed the head element or if is it unclear if
* the head element changed. Only returns <code>false</code> iff the head element was not
* changed by this operation.
*/
@Override
public boolean remove(@Nonnull T toRemove) {
T storedElement = getDedupMapForElement(toRemove).remove(toRemove);
return storedElement != null && super.remove(storedElement);
} | 3.68 |
pulsar_ProducerImpl_cnx | // wrapper for connection methods
ClientCnx cnx() {
return this.connectionHandler.cnx();
} | 3.68 |
hbase_HFileReaderImpl_getKVBufSize | // From non encoded HFiles, we always read back KeyValue or its descendant.(Note: When HFile
// block is in DBB, it will be OffheapKV). So all parts of the Cell is in a contiguous
// array/buffer. How many bytes we should wrap to make the KV is what this method returns.
private int getKVBufSize() {
int kvBufSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen;
if (currTagsLen > 0) {
kvBufSize += Bytes.SIZEOF_SHORT + currTagsLen;
}
return kvBufSize;
} | 3.68 |
framework_BasicEvent_getStyleName | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.calendar.event.CalendarEvent#getStyleName()
*/
@Override
public String getStyleName() {
return styleName;
} | 3.68 |
hadoop_RouterStateIdContext_getRouterFederatedStateMap | /**
* Utility function to parse routerFederatedState field in RPC headers.
*/
public static Map<String, Long> getRouterFederatedStateMap(ByteString byteString) {
if (byteString != null) {
RouterFederatedStateProto federatedState;
try {
federatedState = RouterFederatedStateProto.parseFrom(byteString);
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
return federatedState.getNamespaceStateIdsMap();
} else {
return Collections.emptyMap();
}
} | 3.68 |
hbase_HFileSystem_newInstanceFileSystem | /**
* Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer
* versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration).
* @param conf Configuration
* @return A new instance of the filesystem
*/
private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException {
URI uri = FileSystem.getDefaultUri(conf);
FileSystem fs = null;
Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
if (clazz != null) {
// This will be true for Hadoop 1.0, or 0.20.
fs = (FileSystem) org.apache.hadoop.util.ReflectionUtils.newInstance(clazz, conf);
fs.initialize(uri, conf);
} else {
// For Hadoop 2.0, we have to go through FileSystem for the filesystem
// implementation to be loaded by the service loader in case it has not
// been loaded yet.
Configuration clone = new Configuration(conf);
clone.setBoolean("fs." + uri.getScheme() + ".impl.disable.cache", true);
fs = FileSystem.get(uri, clone);
}
if (fs == null) {
throw new IOException("No FileSystem for scheme: " + uri.getScheme());
}
return fs;
} | 3.68 |
hadoop_ListResultEntrySchema_withOwner | /**
* Set the owner value.
*
* @param owner the owner value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withOwner(final String owner) {
this.owner = owner;
return this;
} | 3.68 |
hudi_BaseHoodieTableServiceClient_scheduleLogCompactionAtInstant | /**
* Schedules a new log compaction instant with passed-in instant time.
*
* @param instantTime Log Compaction Instant Time
* @param extraMetadata Extra Metadata to be stored
*/
public boolean scheduleLogCompactionAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.LOG_COMPACT).isPresent();
} | 3.68 |
open-banking-gateway_FintechPsuAspspTuple_toDatasafePathWithoutParent | /**
* Converts current tuple to Datasafe storage path.
* @return Datasafe path corresponding to current tuple
*/
public String toDatasafePathWithoutParent() {
return this.psuId + "/" + this.aspspId;
} | 3.68 |
pulsar_BrokerMonitor_printData | // Decide whether this broker is running SimpleLoadManagerImpl or ModularLoadManagerImpl and then print the data
// accordingly.
private synchronized void printData(final String path) {
final String broker = brokerNameFromPath(path);
String jsonString;
try {
jsonString = new String(zkClient.getData(path, this, null));
} catch (Exception ex) {
throw new RuntimeException(ex);
}
// Use presence of the String "allocated" to determine if this is using SimpleLoadManagerImpl.
if (jsonString.contains("allocated")) {
printLoadReport(broker, gson.fromJson(jsonString, LoadReport.class));
} else {
final LocalBrokerData localBrokerData = gson.fromJson(jsonString, LocalBrokerData.class);
final String timeAveragePath = BROKER_TIME_AVERAGE_BASE_PATH + "/" + broker;
try {
final TimeAverageBrokerData timeAverageData = gson.fromJson(
new String(zkClient.getData(timeAveragePath, false, null)), TimeAverageBrokerData.class);
printBrokerData(broker, localBrokerData, timeAverageData);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | 3.68 |
zxing_ShareActivity_showContactAsBarcode | /**
* Takes a contact Uri and does the necessary database lookups to retrieve that person's info,
* then sends an Encode intent to render it as a QR Code.
*
* @param contactUri A Uri of the form content://contacts/people/17
*/
private void showContactAsBarcode(Uri contactUri) {
if (contactUri == null) {
return; // Show error?
}
ContentResolver resolver = getContentResolver();
String id;
String name;
boolean hasPhone;
try (Cursor cursor = resolver.query(contactUri, null, null, null, null)) {
if (cursor == null || !cursor.moveToFirst()) {
return;
}
id = cursor.getString(cursor.getColumnIndex(BaseColumns._ID));
name = cursor.getString(cursor.getColumnIndex(ContactsContract.Contacts.DISPLAY_NAME));
hasPhone = cursor.getInt(cursor.getColumnIndex(ContactsContract.Contacts.HAS_PHONE_NUMBER)) > 0;
}
// Don't require a name to be present, this contact might be just a phone number.
Bundle bundle = new Bundle();
if (name != null && !name.isEmpty()) {
bundle.putString(ContactsContract.Intents.Insert.NAME, massageContactData(name));
}
if (hasPhone) {
try (Cursor phonesCursor = resolver.query(ContactsContract.CommonDataKinds.Phone.CONTENT_URI,
null,
ContactsContract.CommonDataKinds.Phone.CONTACT_ID + '=' + id,
null,
null)) {
if (phonesCursor != null) {
int foundPhone = 0;
int phonesNumberColumn = phonesCursor.getColumnIndex(ContactsContract.CommonDataKinds.Phone.NUMBER);
int phoneTypeColumn = phonesCursor.getColumnIndex(ContactsContract.CommonDataKinds.Phone.TYPE);
while (phonesCursor.moveToNext() && foundPhone < Contents.PHONE_KEYS.length) {
String number = phonesCursor.getString(phonesNumberColumn);
if (number != null && !number.isEmpty()) {
bundle.putString(Contents.PHONE_KEYS[foundPhone], massageContactData(number));
}
int type = phonesCursor.getInt(phoneTypeColumn);
bundle.putInt(Contents.PHONE_TYPE_KEYS[foundPhone], type);
foundPhone++;
}
}
}
}
try (Cursor methodsCursor = resolver.query(ContactsContract.CommonDataKinds.StructuredPostal.CONTENT_URI,
null,
ContactsContract.CommonDataKinds.StructuredPostal.CONTACT_ID + '=' + id,
null,
null)) {
if (methodsCursor != null && methodsCursor.moveToNext()) {
String data = methodsCursor.getString(
methodsCursor.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.FORMATTED_ADDRESS));
if (data != null && !data.isEmpty()) {
bundle.putString(ContactsContract.Intents.Insert.POSTAL, massageContactData(data));
}
}
}
try (Cursor emailCursor = resolver.query(ContactsContract.CommonDataKinds.Email.CONTENT_URI,
null,
ContactsContract.CommonDataKinds.Email.CONTACT_ID + '=' + id,
null,
null)) {
if (emailCursor != null) {
int foundEmail = 0;
int emailColumn = emailCursor.getColumnIndex(ContactsContract.CommonDataKinds.Email.DATA);
while (emailCursor.moveToNext() && foundEmail < Contents.EMAIL_KEYS.length) {
String email = emailCursor.getString(emailColumn);
if (email != null && !email.isEmpty()) {
bundle.putString(Contents.EMAIL_KEYS[foundEmail], massageContactData(email));
}
foundEmail++;
}
}
}
Intent intent = buildEncodeIntent(Contents.Type.CONTACT);
intent.putExtra(Intents.Encode.DATA, bundle);
startActivity(intent);
} | 3.68 |
framework_VComboBox_reactOnInputWhenReady | /**
* Perform filtering with the user entered string and when the results
* are received, perform any action appropriate for the user input
* (select an item or create a new one).
*
* @param value
* user input
*/
public void reactOnInputWhenReady(String value) {
pendingUserInput = value;
showPopup = false;
filterOptions(0, value);
} | 3.68 |
flink_PackagedProgram_deleteExtractedLibraries | /** Deletes all temporary files created for contained packaged libraries. */
private void deleteExtractedLibraries() {
deleteExtractedLibraries(this.extractedTempLibraries);
this.extractedTempLibraries.clear();
} | 3.68 |
hadoop_LoggingAuditor_getDescription | /**
* Get the span description built in the constructor.
* @return description text.
*/
protected String getDescription() {
return description;
} | 3.68 |
hadoop_ByteBufferDecodingState_checkInputBuffers | /**
* Check and ensure the buffers are of the desired length and type, direct
* buffers or not.
* @param buffers the buffers to check
*/
void checkInputBuffers(ByteBuffer[] buffers) {
int validInputs = 0;
for (ByteBuffer buffer : buffers) {
if (buffer == null) {
continue;
}
if (buffer.remaining() != decodeLength) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, not of length " + decodeLength);
}
if (buffer.isDirect() != usingDirectBuffer) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, isDirect should be " + usingDirectBuffer);
}
validInputs++;
}
if (validInputs < decoder.getNumDataUnits()) {
throw new HadoopIllegalArgumentException(
"No enough valid inputs are provided, not recoverable");
}
} | 3.68 |
framework_VMenuBar_itemClick | /**
* When an item is clicked.
*
* @param item
*/
public void itemClick(CustomMenuItem item) {
boolean triggered = triggerEventIfNeeded(item);
if (item.getCommand() != null || triggered) {
try {
if (item.getCommand() != null) {
item.getCommand().execute();
}
} finally {
setSelected(null);
if (visibleChildMenu != null) {
visibleChildMenu.hideChildren();
}
hideParents(true);
menuVisible = false;
}
} else {
if (item.getSubMenu() != null
&& item.getSubMenu() != visibleChildMenu) {
setSelected(item);
showChildMenu(item);
menuVisible = true;
} else if (!subMenu) {
setSelected(null);
hideChildren();
menuVisible = false;
}
}
} | 3.68 |
flink_IOUtils_closeSocket | /**
* Closes the socket ignoring {@link IOException}.
*
* @param sock the socket to close
*/
public static void closeSocket(final Socket sock) {
// avoids try { close() } dance
if (sock != null) {
try {
sock.close();
} catch (IOException ignored) {
}
}
} | 3.68 |
graphhopper_Snap_getQueryDistance | /**
* @return the distance of the query to the snapped coordinates. In meter
*/
public double getQueryDistance() {
return queryDistance;
} | 3.68 |
hbase_RSAnnotationReadingPriorityFunction_getDeadline | /**
* Based on the request content, returns the deadline of the request.
* @return Deadline of this request. 0 now, otherwise msec of 'delay'
*/
@Override
public long getDeadline(RequestHeader header, Message param) {
if (param instanceof ScanRequest) {
ScanRequest request = (ScanRequest) param;
if (!request.hasScannerId()) {
return 0;
}
// get the 'virtual time' of the scanner, and applies sqrt() to get a
// nice curve for the delay. More a scanner is used the less priority it gets.
// The weight is used to have more control on the delay.
long vtime = rpcServices.getScannerVirtualTime(request.getScannerId());
return Math.round(Math.sqrt(vtime * scanVirtualTimeWeight));
}
return 0;
} | 3.68 |
hbase_Procedure_releaseLock | /**
* The user should override this method, and release lock if necessary.
*/
protected void releaseLock(TEnvironment env) {
// no-op
} | 3.68 |
hadoop_FederationNamespaceInfo_getBlockPoolId | /**
* The HDFS block pool id for this namespace.
*
* @return Block pool identifier.
*/
public String getBlockPoolId() {
return this.blockPoolId;
} | 3.68 |
framework_Page_getStyles | /**
* Returns that stylesheet associated with this Page. The stylesheet
* contains additional styles injected at runtime into the HTML document.
*
* @since 7.1
*/
public Styles getStyles() {
if (styles == null) {
styles = new Styles(uI);
}
return styles;
} | 3.68 |
flink_IntegerResourceVersion_valueOf | /**
* Create a {@link IntegerResourceVersion} with given integer value.
*
* @param value resource version integer value. The value should not be negative.
* @return {@link IntegerResourceVersion} with given value.
*/
public static IntegerResourceVersion valueOf(int value) {
Preconditions.checkArgument(value >= 0);
return new IntegerResourceVersion(value);
} | 3.68 |
querydsl_DateTimeExpression_max | /**
* Get the maximum value of this expression (aggregation)
*
* @return max(this)
*/
@Override
public DateTimeExpression<T> max() {
if (max == null) {
max = Expressions.dateTimeOperation(getType(), Ops.AggOps.MAX_AGG, mixin);
}
return max;
} | 3.68 |
hadoop_AppReportFetcher_getApplicationReport | /**
* Get an application report for the specified application id from the RM and
* fall back to the Application History Server if not found in RM.
*
* @param applicationsManager what to use to get the RM reports.
* @param appId id of the application to get.
* @return the ApplicationReport for the appId.
* @throws YarnException on any error.
* @throws IOException connection exception.
*/
protected FetchedAppReport getApplicationReport(ApplicationClientProtocol applicationsManager,
ApplicationId appId) throws YarnException, IOException {
GetApplicationReportRequest request =
this.recordFactory.newRecordInstance(GetApplicationReportRequest.class);
request.setApplicationId(appId);
ApplicationReport appReport;
FetchedAppReport fetchedAppReport;
try {
appReport = applicationsManager.getApplicationReport(request).getApplicationReport();
fetchedAppReport = new FetchedAppReport(appReport, AppReportSource.RM);
} catch (ApplicationNotFoundException e) {
if (!isAHSEnabled) {
// Just throw it as usual if historyService is not enabled.
throw e;
}
//Fetch the application report from AHS
appReport = historyManager.getApplicationReport(request).getApplicationReport();
fetchedAppReport = new FetchedAppReport(appReport, AppReportSource.AHS);
}
return fetchedAppReport;
} | 3.68 |
hadoop_Quota_orByStorageType | /**
* Invoke predicate by each storage type and bitwise inclusive OR the results.
*
* @param predicate the function test the storage type.
* @return true if bitwise OR by all storage type returns true, false otherwise.
*/
public static boolean orByStorageType(Predicate<StorageType> predicate) {
boolean res = false;
for (StorageType type : StorageType.values()) {
res |= predicate.test(type);
}
return res;
} | 3.68 |
pulsar_SubscribeRateLimiter_addSubscribeLimiterIfAbsent | /**
* Update subscribe-throttling-rate. gives first priority to
* namespace-policy configured subscribe rate else applies
* default broker subscribe-throttling-rate
*/
private synchronized void addSubscribeLimiterIfAbsent(ConsumerIdentifier consumerIdentifier) {
if (subscribeRateLimiter.get(consumerIdentifier) != null || !isSubscribeRateEnabled(this.subscribeRate)) {
return;
}
updateSubscribeRate(consumerIdentifier, this.subscribeRate);
} | 3.68 |
AreaShop_AreaShop_info | /**
* Print an information message to the console.
* @param message The message to print
*/
public static void info(Object... message) {
AreaShop.getInstance().getLogger().info(StringUtils.join(message, " "));
} | 3.68 |
flink_DecimalData_scale | /** Returns the <i>scale</i> of this {@link DecimalData}. */
public int scale() {
return scale;
} | 3.68 |
flink_MemorySegment_putCharLittleEndian | /**
* Writes the given character (16 bit, 2 bytes) to the given position in little-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putChar(int, char)}. For most cases (such as transient storage in memory
* or serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putChar(int, char)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putCharLittleEndian(int index, char value) {
if (LITTLE_ENDIAN) {
putChar(index, value);
} else {
putChar(index, Character.reverseBytes(value));
}
} | 3.68 |
framework_BinderValidationStatus_getFieldValidationErrors | /**
* Gets the failed field level validation statuses.
* <p>
* The field level validators have been added with
* {@link BindingBuilder#withValidator(Validator)}.
*
* @return a list of failed field level validation statuses
*/
public List<BindingValidationStatus<?>> getFieldValidationErrors() {
return bindingStatuses.stream().filter(BindingValidationStatus::isError)
.collect(Collectors.toList());
} | 3.68 |
dubbo_LfuCache_put | /**
* API to store value against a key in the calling thread scope.
* @param key Unique identifier for the object being store.
* @param value Value getting store
*/
@SuppressWarnings("unchecked")
@Override
public void put(Object key, Object value) {
store.put(key, value);
} | 3.68 |
hadoop_HttpUserGroupInformation_get | /**
* Returns the remote {@link UserGroupInformation} in context for the current
* HTTP request, taking into account proxy user requests.
*
* @return the remote {@link UserGroupInformation}, <code>NULL</code> if none.
*/
public static UserGroupInformation get() {
return DelegationTokenAuthenticationFilter.
getHttpUserGroupInformationInContext();
} | 3.68 |
pulsar_ModularLoadManagerImpl_getBundleStats | // Use the Pulsar client to acquire the namespace bundle stats.
private Map<String, NamespaceBundleStats> getBundleStats() {
return pulsar.getBrokerService().getBundleStats();
} | 3.68 |
hadoop_RollingWindow_inc | /**
* Increment the bucket. It assumes that staleness check is already
* performed. We do not need to update the {@link #updateTime} because as
* long as the {@link #updateTime} belongs to the current view of the
* rolling window, the algorithm works fine.
* @param delta
*/
void inc(long delta) {
value.addAndGet(delta);
} | 3.68 |
hadoop_CommitUtilsWithMR_jobName | /**
* Get a job name; returns meaningful text if there is no name.
* @param context job context
* @return a string for logs
*/
public static String jobName(JobContext context) {
String name = context.getJobName();
return (name != null && !name.isEmpty()) ? name : "(anonymous)";
} | 3.68 |
dubbo_StringUtils_parseLong | /**
* parse str to Long(if str is not number or n < 0, then return 0)
*
* @param str a number str
* @return positive long or zero
*/
public static long parseLong(String str) {
return isNumber(str) ? Long.parseLong(str) : 0;
} | 3.68 |
flink_ZooKeeperCheckpointStoreUtil_nameToCheckpointID | /**
* Converts a path to the checkpoint id.
*
* @param path in ZooKeeper
* @return Checkpoint id parsed from the path
*/
@Override
public long nameToCheckpointID(String path) {
try {
String numberString;
// check if we have a leading slash
if ('/' == path.charAt(0)) {
numberString = path.substring(1);
} else {
numberString = path;
}
return Long.parseLong(numberString);
} catch (NumberFormatException e) {
LOG.warn(
"Could not parse checkpoint id from {}. This indicates that the "
+ "checkpoint id to path conversion has changed.",
path);
return INVALID_CHECKPOINT_ID;
}
} | 3.68 |
flink_OperationTreeBuilder_addAliasToTheCallInAggregate | /**
* Add a default name to the call in the grouping expressions, e.g., groupBy(a % 5) to groupBy(a
* % 5 as TMP_0) or make aggregate a named aggregate.
*/
private List<Expression> addAliasToTheCallInAggregate(
List<String> inputFieldNames, List<Expression> expressions) {
int attrNameCntr = 0;
Set<String> usedFieldNames = new HashSet<>(inputFieldNames);
List<Expression> result = new ArrayList<>();
for (Expression groupingExpression : expressions) {
if (groupingExpression instanceof UnresolvedCallExpression
&& !ApiExpressionUtils.isFunction(
groupingExpression, BuiltInFunctionDefinitions.AS)) {
String tempName = getUniqueName("TMP_" + attrNameCntr, usedFieldNames);
attrNameCntr += 1;
usedFieldNames.add(tempName);
result.add(
unresolvedCall(
BuiltInFunctionDefinitions.AS,
groupingExpression,
valueLiteral(tempName)));
} else {
result.add(groupingExpression);
}
}
return result;
} | 3.68 |
flink_ThreadSafeSimpleCounter_dec | /**
* Decrement the current count by the given value.
*
* @param n value to decrement the current count by
*/
@Override
public void dec(long n) {
longAdder.add(-n);
} | 3.68 |
flink_TaskIOMetricGroup_reuseRecordsInputCounter | // ============================================================================================
// Metric Reuse
// ============================================================================================
public void reuseRecordsInputCounter(Counter numRecordsInCounter) {
this.numRecordsIn.addCounter(numRecordsInCounter);
} | 3.68 |
hadoop_ResourceRequest_allocationRequestId | /**
* Set the <code>allocationRequestId</code> of the request.
* @see ResourceRequest#setAllocationRequestId(long)
* @param allocationRequestId
* <code>allocationRequestId</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Evolving
public ResourceRequestBuilder allocationRequestId(
long allocationRequestId) {
resourceRequest.setAllocationRequestId(allocationRequestId);
return this;
} | 3.68 |
flink_BytesMap_calcSecondHashCode | // M(the num of buckets) is the nth power of 2, so the second hash code must be odd, and always
// is
// H2(K) = 1 + 2 * ((H1(K)/M) mod (M-1))
protected int calcSecondHashCode(final int firstHashCode) {
return ((((firstHashCode >> log2NumBuckets)) & numBucketsMask2) << 1) + 1;
} | 3.68 |
framework_CurrentInstance_restoreInstances | /**
* Restores the given instances to the given values. Note that this should
* only be used internally to restore Vaadin classes.
*
* @since 7.1
*
* @param old
* A Class -> CurrentInstance map to set as current instances
*/
public static void restoreInstances(Map<Class<?>, CurrentInstance> old) {
boolean removeStale = false;
for (Class c : old.keySet()) {
CurrentInstance ci = old.get(c);
Object v = ci.instance.get();
if (v == null) {
removeStale = true;
} else if (v == NULL_OBJECT) {
/*
* NULL_OBJECT is used to identify objects that are null when
* #setCurrent(UI) or #setCurrent(VaadinSession) are called on a
* CurrentInstance. Without this a reference to an already
* collected instance may be left in the CurrentInstance when it
* really should be restored to null.
*
* One example case that this fixes:
* VaadinService.runPendingAccessTasks() clears all current
* instances and then sets everything but the UI. This makes
* UI.accessSynchronously() save these values before calling
* setCurrent(UI), which stores UI=null in the map it returns.
* This map will be restored after UI.accessSync(), which,
* unless it respects null values, will just leave the wrong UI
* instance registered.
*/
v = null;
}
set(c, v);
}
if (removeStale) {
removeStaleInstances(old);
}
} | 3.68 |
flink_ConfigurationUtils_getPrefixedKeyValuePairs | /**
* Extract and parse Flink configuration properties with a given name prefix and return the
* result as a Map.
*/
public static Map<String, String> getPrefixedKeyValuePairs(
String prefix, Configuration configuration) {
Map<String, String> result = new HashMap<>();
for (Map.Entry<String, String> entry : configuration.toMap().entrySet()) {
if (entry.getKey().startsWith(prefix) && entry.getKey().length() > prefix.length()) {
String key = entry.getKey().substring(prefix.length());
result.put(key, entry.getValue());
}
}
return result;
} | 3.68 |
framework_DropTargetExtension_setDropCriterion | /**
* Set a drop criterion to allow drop on this drop target. When data is
* dragged on top of the drop target, the given value is compared to the
* drag source's payload with the same key. The drag passes this criterion
* if the value of the payload compared to the given value using the given
* operator holds.
* <p>
* Note that calling this method will overwrite the previously set criteria.
* To set multiple criteria, call the
* {@link #setDropCriteria(Criterion.Match, Criterion...)} method.
* <p>
* To handle more complex criteria, define a custom script with
* {@link #setDropCriteriaScript(String)}. Drop will be allowed if both this
* criterion and the criteria script are passed.
*
* @param key
* key of the payload to be compared
* @param operator
* comparison operator to be used
* @param value
* value to be compared to the payload's value
* @see DragSourceExtension#setPayload(String, double)
*/
public void setDropCriterion(String key, ComparisonOperator operator,
double value) {
setDropCriteria(Criterion.Match.ANY,
new Criterion(key, operator, value));
} | 3.68 |
MagicPlugin_MaterialSets_wildcard | /**
* @return A material set that matches all materials.
*/
public static MaterialSet wildcard() {
return WildcardMaterialSet.INSTANCE;
} | 3.68 |
hbase_RequestConverter_buildSlowLogResponseRequest | /**
* Build RPC request payload for getLogEntries
* @param filterParams map of filter params
* @param limit limit for no of records that server returns
* @param logType type of the log records
* @return request payload {@link HBaseProtos.LogRequest}
*/
public static HBaseProtos.LogRequest buildSlowLogResponseRequest(
final Map<String, Object> filterParams, final int limit, final String logType) {
SlowLogResponseRequest.Builder builder = SlowLogResponseRequest.newBuilder();
builder.setLimit(limit);
if (logType.equals("SLOW_LOG")) {
builder.setLogType(SlowLogResponseRequest.LogType.SLOW_LOG);
} else if (logType.equals("LARGE_LOG")) {
builder.setLogType(SlowLogResponseRequest.LogType.LARGE_LOG);
}
boolean filterByAnd = false;
if (MapUtils.isNotEmpty(filterParams)) {
if (filterParams.containsKey("clientAddress")) {
final String clientAddress = (String) filterParams.get("clientAddress");
if (StringUtils.isNotEmpty(clientAddress)) {
builder.setClientAddress(clientAddress);
}
}
if (filterParams.containsKey("regionName")) {
final String regionName = (String) filterParams.get("regionName");
if (StringUtils.isNotEmpty(regionName)) {
builder.setRegionName(regionName);
}
}
if (filterParams.containsKey("tableName")) {
final String tableName = (String) filterParams.get("tableName");
if (StringUtils.isNotEmpty(tableName)) {
builder.setTableName(tableName);
}
}
if (filterParams.containsKey("userName")) {
final String userName = (String) filterParams.get("userName");
if (StringUtils.isNotEmpty(userName)) {
builder.setUserName(userName);
}
}
if (filterParams.containsKey("filterByOperator")) {
final String filterByOperator = (String) filterParams.get("filterByOperator");
if (StringUtils.isNotEmpty(filterByOperator)) {
if (filterByOperator.toUpperCase().equals("AND")) {
filterByAnd = true;
}
}
}
}
if (filterByAnd) {
builder.setFilterByOperator(SlowLogResponseRequest.FilterByOperator.AND);
} else {
builder.setFilterByOperator(SlowLogResponseRequest.FilterByOperator.OR);
}
SlowLogResponseRequest slowLogResponseRequest = builder.build();
return HBaseProtos.LogRequest.newBuilder()
.setLogClassName(slowLogResponseRequest.getClass().getName())
.setLogMessage(slowLogResponseRequest.toByteString()).build();
} | 3.68 |
open-banking-gateway_ConsentAccess_getFirstByCurrentSession | /**
* Available consent for current session execution with throwing exception
*/
default ProtocolFacingConsent getFirstByCurrentSession() {
List<ProtocolFacingConsent> consents = findByCurrentServiceSessionOrderByModifiedDesc();
if (consents.isEmpty()) {
throw new IllegalStateException("Context not found");
}
return consents.get(0);
} | 3.68 |
flink_RpcEndpoint_unregisterResource | /**
* Unregister the given closeable resource from {@link CloseableRegistry}.
*
* @param closeableResource the given closeable resource
* @return true if the given resource unregister successful, otherwise false
*/
protected boolean unregisterResource(Closeable closeableResource) {
return resourceRegistry.unregisterCloseable(closeableResource);
} | 3.68 |
morf_NamedParameterPreparedStatement_parseSql | /**
* Parses the SQL string containing named parameters in such a form that
* can be cached, so that prepared statements using the parsed result
* can be created rapidly.
*
* @param sql the SQL
* @param sqlDialect Dialect of the SQL.
* @return the parsed result
*/
public static ParseResult parseSql(String sql, SqlDialect sqlDialect) {
return new ParseResult(sql, sqlDialect);
} | 3.68 |
framework_DropTargetExtension_setDropEffect | /**
* Sets the drop effect for the current drop target. This is set to the
* dropEffect on {@code dragenter} and {@code dragover} events.
* <p>
* <em>NOTE: If the drop effect that doesn't match the dropEffect /
* effectAllowed of the drag source, it DOES NOT prevent drop on IE and
* Safari! For FireFox and Chrome the drop is prevented if there they don't
* match.</em>
* <p>
* Default value is browser dependent and can depend on e.g. modifier keys.
* <p>
* From Moz Foundation: "You can modify the dropEffect property during the
* dragenter or dragover events, if for example, a particular drop target
* only supports certain operations. You can modify the dropEffect property
* to override the user effect, and enforce a specific drop operation to
* occur. Note that this effect must be one listed within the effectAllowed
* property. Otherwise, it will be set to an alternate value that is
* allowed."
*
* @param dropEffect
* the drop effect to be set or {@code null} to not modify
*/
public void setDropEffect(DropEffect dropEffect) {
if (!Objects.equals(getState(false).dropEffect, dropEffect)) {
getState().dropEffect = dropEffect;
}
} | 3.68 |
flink_FromJarEntryClassInformationProvider_getJarFile | /**
* Returns the specified {@code jarFile}.
*
* @return The specified {@code jarFile}.
* @see #getJobClassName()
*/
@Override
public Optional<File> getJarFile() {
return Optional.of(jarFile);
} | 3.68 |
hadoop_PersistentLongFile_writeFile | /**
* Atomically write the given value to the given file, including fsyncing.
*
* @param file destination file
* @param val value to write
* @throws IOException if the file cannot be written
*/
public static void writeFile(File file, long val) throws IOException {
AtomicFileOutputStream fos = new AtomicFileOutputStream(file);
try {
fos.write(String.valueOf(val).getBytes(StandardCharsets.UTF_8));
fos.write('\n');
fos.close();
fos = null;
} finally {
if (fos != null) {
fos.abort();
}
}
} | 3.68 |
morf_SelectStatementBuilder_allowParallelDml | /**
* Request that this query can contribute towards a parallel DML execution plan.
* If a select statement is used within a DML statement, some dialects require DML parallelisation to be enabled via the select statement.
* If the database implementation does not support, or is configured to disable parallel query execution, then this request will have no effect.
*
* <p>For queries that are likely to change a lot of data, a parallel execution plan may result in the results being written faster, although the exact effect depends on
* the underlying database, the nature of the data.</p>
*
* <p>Note that the use cases of this are rare. Caution is needed because if multiple requests are made by the application to run parallel queries, the resulting resource contention may result in worse performance - this is not intended for queries that are submitted in parallel by the application.</p>
*
* @return this, for method chaining.
* @see #withParallelQueryPlan()
*/
public SelectStatementBuilder allowParallelDml() {
this.hints.add(AllowParallelDmlHint.INSTANCE);
return this;
} | 3.68 |
hudi_UpsertPartitioner_filterSmallFilesInClustering | /**
* Exclude small file handling for clustering since update path is not supported.
* @param pendingClusteringFileGroupsId pending clustering file groups id of partition
* @param smallFiles small files of partition
* @return smallFiles not in clustering
*/
private List<SmallFile> filterSmallFilesInClustering(final Set<String> pendingClusteringFileGroupsId, final List<SmallFile> smallFiles) {
if (!pendingClusteringFileGroupsId.isEmpty()) {
return smallFiles.stream()
.filter(smallFile -> !pendingClusteringFileGroupsId.contains(smallFile.location.getFileId())).collect(Collectors.toList());
} else {
return smallFiles;
}
} | 3.68 |
flink_KerberosLoginProvider_doLoginAndReturnUGI | /**
* Does kerberos login and doesn't set current user, just returns a new UGI instance. Must be
* called when isLoginPossible returns true.
*/
public UserGroupInformation doLoginAndReturnUGI() throws IOException {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
if (principal != null) {
LOG.info(
"Attempting to login to KDC using principal: {} keytab: {}", principal, keytab);
UserGroupInformation ugi =
UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
LOG.info("Successfully logged into KDC");
return ugi;
} else if (!HadoopUserUtils.isProxyUser(currentUser)) {
LOG.info("Attempting to load user's ticket cache");
final String ccache = System.getenv("KRB5CCNAME");
final String user =
Optional.ofNullable(System.getenv("KRB5PRINCIPAL"))
.orElse(currentUser.getUserName());
UserGroupInformation ugi = UserGroupInformation.getUGIFromTicketCache(ccache, user);
LOG.info("Loaded user's ticket cache successfully");
return ugi;
} else {
throwProxyUserNotSupported();
return currentUser;
}
} | 3.68 |
framework_ContainerHierarchicalWrapper_removeItemRecursively | /**
* Removes the Item identified by given itemId and all its children.
*
* @see #removeItem(Object)
* @param itemId
* the identifier of the Item to be removed
* @return true if the operation succeeded
*/
public boolean removeItemRecursively(Object itemId) {
return HierarchicalContainer.removeItemRecursively(this, itemId);
} | 3.68 |
hbase_ServerRegionReplicaUtil_shouldReplayRecoveredEdits | /**
* Returns whether to replay the recovered edits to flush the results. Currently secondary region
* replicas do not replay the edits, since it would cause flushes which might affect the primary
* region. Primary regions even opened in read only mode should replay the edits.
* @param region the HRegion object
* @return whether recovered edits should be replayed.
*/
public static boolean shouldReplayRecoveredEdits(HRegion region) {
return isDefaultReplica(region.getRegionInfo());
} | 3.68 |
morf_CaseInsensitiveString_toString | /**
* Returns the internal representation of the string value. Note that the
* case of this is unpredictable. Do not use to compare to other strings;
* instead use {@link #equalsString(String)}.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return string;
} | 3.68 |
hbase_ReplicationSourceManager_cleanOldLogs | /**
* Cleans a log file and all older logs from replication queue. Called when we are sure that a log
* file is closed and has no more entries.
* @param log Path to the log
* @param inclusive whether we should also remove the given log file
* @param source the replication source
*/
void cleanOldLogs(String log, boolean inclusive, ReplicationSourceInterface source) {
String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log);
if (source.isRecovered()) {
NavigableSet<Path> wals = walsByIdRecoveredQueues.get(source.getQueueId()).get(logPrefix);
if (wals != null) {
// here we just want to compare the timestamp, so it is OK to just create a fake WAL path
NavigableSet<String> walsToRemove = wals.headSet(new Path(oldLogDir, log), inclusive)
.stream().map(Path::getName).collect(Collectors.toCollection(TreeSet::new));
if (walsToRemove.isEmpty()) {
return;
}
cleanOldLogs(walsToRemove, source);
walsToRemove.clear();
}
} else {
NavigableSet<String> wals;
NavigableSet<String> walsToRemove;
// synchronized on walsById to avoid race with postLogRoll
synchronized (this.walsById) {
wals = walsById.get(source.getQueueId()).get(logPrefix);
if (wals == null) {
return;
}
walsToRemove = wals.headSet(log, inclusive);
if (walsToRemove.isEmpty()) {
return;
}
walsToRemove = new TreeSet<>(walsToRemove);
}
// cleanOldLogs may spend some time, especially for sync replication where we may want to
// remove remote wals as the remote cluster may have already been down, so we do it outside
// the lock to avoid block preLogRoll
cleanOldLogs(walsToRemove, source);
// now let's remove the files in the set
synchronized (this.walsById) {
wals.removeAll(walsToRemove);
}
}
} | 3.68 |
hadoop_StageConfig_getTaskId | /**
* ID of the task.
*/
public String getTaskId() {
return taskId;
} | 3.68 |
incubator-hugegraph-toolchain_StringEncoding_writeAsciiString | // Similar to {@link StringSerializer}
public static int writeAsciiString(byte[] array, int offset, String value) {
E.checkArgument(CharMatcher.ascii().matchesAllOf(value),
"'%s' must be ASCII string", value);
int len = value.length();
if (len == 0) {
array[offset++] = (byte) 0x80;
return offset;
}
int i = 0;
do {
int c = value.charAt(i);
assert c <= 127;
byte b = (byte) c;
if (++i == len) {
b |= 0x80; // End marker
}
array[offset++] = b;
} while (i < len);
return offset;
} | 3.68 |
hbase_PrivateCellUtil_estimatedSerializedSizeOfKey | /**
* Calculates the serialized key size. We always serialize in the KeyValue's serialization format.
* @param cell the cell for which the key size has to be calculated.
* @return the key size
*/
public static int estimatedSerializedSizeOfKey(final Cell cell) {
if (cell instanceof KeyValue) return ((KeyValue) cell).getKeyLength();
return cell.getRowLength() + cell.getFamilyLength() + cell.getQualifierLength()
+ KeyValue.KEY_INFRASTRUCTURE_SIZE;
} | 3.68 |
flink_MemorySegment_putLongLittleEndian | /**
* Writes the given long value (64bit, 8 bytes) to the given position in little endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putLong(int, long)}. For most cases (such as transient storage in memory
* or serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putLong(int, long)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putLongLittleEndian(int index, long value) {
if (LITTLE_ENDIAN) {
putLong(index, value);
} else {
putLong(index, Long.reverseBytes(value));
}
} | 3.68 |
framework_VLoadingIndicator_getConnection | /**
* Returns the {@link ApplicationConnection} which uses this loading
* indicator.
*
* @return The ApplicationConnection for this loading indicator
*/
public ApplicationConnection getConnection() {
return connection;
} | 3.68 |
hbase_MemStoreCompactor_doCompaction | /**
* ---------------------------------------------------------------------- The worker thread
* performs the compaction asynchronously. The solo (per compactor) thread only reads the
* compaction pipeline. There is at most one thread per memstore instance.
*/
private void doCompaction() {
ImmutableSegment result = null;
boolean resultSwapped = false;
MemStoreCompactionStrategy.Action nextStep = strategy.getAction(versionedList);
boolean merge = (nextStep == MemStoreCompactionStrategy.Action.MERGE
|| nextStep == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS);
try {
if (isInterrupted.get()) { // if the entire process is interrupted cancel flattening
return; // the compaction also doesn't start when interrupted
}
if (nextStep == MemStoreCompactionStrategy.Action.NOOP) {
return;
}
if (
nextStep == MemStoreCompactionStrategy.Action.FLATTEN
|| nextStep == MemStoreCompactionStrategy.Action.FLATTEN_COUNT_UNIQUE_KEYS
) {
// some Segment in the pipeline is with SkipList index, make it flat
compactingMemStore.flattenOneSegment(versionedList.getVersion(), nextStep);
return;
}
// Create one segment representing all segments in the compaction pipeline,
// either by compaction or by merge
if (!isInterrupted.get()) {
result = createSubstitution(nextStep);
}
// Substitute the pipeline with one segment
if (!isInterrupted.get()) {
resultSwapped = compactingMemStore.swapCompactedSegments(versionedList, result, merge);
if (resultSwapped) {
// update compaction strategy
strategy.updateStats(result);
// update the wal so it can be truncated and not get too long
compactingMemStore.updateLowestUnflushedSequenceIdInWAL(true); // only if greater
}
}
} catch (IOException e) {
LOG.trace("Interrupting in-memory compaction for store={}",
compactingMemStore.getFamilyName());
Thread.currentThread().interrupt();
} finally {
// For the MERGE case, if the result was created, but swap didn't happen,
// we DON'T need to close the result segment (meaning its MSLAB)!
// Because closing the result segment means closing the chunks of all segments
// in the compaction pipeline, which still have ongoing scans.
if (!merge && (result != null) && !resultSwapped) {
result.close();
}
releaseResources();
compactingMemStore.setInMemoryCompactionCompleted();
}
} | 3.68 |
hbase_LruAdaptiveBlockCache_evictBlock | /**
* Evict the block, and it will be cached by the victim handler if exists && block may be
* read again later
* @param evictedByEvictionProcess true if the given block is evicted by EvictionThread
* @return the heap size of evicted block
*/
protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) {
LruCachedBlock previous = map.remove(block.getCacheKey());
if (previous == null) {
return 0;
}
updateSizeMetrics(block, true);
long val = elements.decrementAndGet();
if (LOG.isTraceEnabled()) {
long size = map.size();
assertCounterSanity(size, val);
}
if (block.getBuffer().getBlockType().isData()) {
dataBlockElements.decrement();
}
if (evictedByEvictionProcess) {
// When the eviction of the block happened because of invalidation of HFiles, no need to
// update the stats counter.
stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary());
if (victimHandler != null) {
victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer());
}
}
// Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO
// NOT move this up because if do that then the victimHandler may access the buffer with
// refCnt = 0 which is disallowed.
previous.getBuffer().release();
return block.heapSize();
} | 3.68 |
hbase_MasterObserver_postGetClusterMetrics | /**
* Called after get cluster status.
*/
default void postGetClusterMetrics(ObserverContext<MasterCoprocessorEnvironment> ctx,
ClusterMetrics status) throws IOException {
} | 3.68 |
framework_VAbstractPopupCalendar_getDescriptionForAssistiveDevices | /**
* Get the description that explains the usage of the Widget for users of
* assistive devices.
*
* @return String with the description
*/
public String getDescriptionForAssistiveDevices() {
return descriptionForAssistiveDevicesElement.getInnerText();
} | 3.68 |
dubbo_ClassSourceScanner_spiClassesWithAdaptive | /**
* Filter out the spi classes with adaptive annotations
* from all the class collections that can be loaded.
* @return All spi classes with adaptive annotations
*/
public List<Class<?>> spiClassesWithAdaptive() {
Map<String, Class<?>> allClasses = getClasses();
List<Class<?>> spiClasses = new ArrayList<>(allClasses.values())
.stream()
.filter(it -> {
if (null == it) {
return false;
}
Annotation anno = it.getAnnotation(SPI.class);
if (null == anno) {
return false;
}
Optional<Method> optional = Arrays.stream(it.getMethods())
.filter(it2 -> it2.getAnnotation(Adaptive.class) != null)
.findAny();
return optional.isPresent();
})
.collect(Collectors.toList());
return spiClasses;
} | 3.68 |
framework_PerformanceTestSubTreeCaching_populateContainer | /**
* Adds n Table components to given container
*
* @param testContainer2
*/
private void populateContainer(VerticalLayout container, int n) {
for (int i = 0; i < n; i++) {
// array_type array_element = [i];
final Table t = TestForTablesInitialColumnWidthLogicRendering
.getTestTable(5, 100);
container.addComponent(t);
}
} | 3.68 |
hadoop_BoundedResourcePool_close | /**
* Derived classes may implement a way to cleanup each item.
*/
@Override
protected synchronized void close(T item) {
// Do nothing in this class. Allow overriding classes to take any cleanup action.
} | 3.68 |
querydsl_GenericExporter_setSupertypeAnnotation | /**
* Set the supertype annotation
*
* @param supertypeAnnotation supertype annotation
*/
public void setSupertypeAnnotation(
Class<? extends Annotation> supertypeAnnotation) {
this.supertypeAnnotation = supertypeAnnotation;
} | 3.68 |
framework_VUpload_isImmediateMode | /**
* Returns whether this component is in immediate mode or not.
*
* @return {@code true} for immediate mode, {@code false} for not
*/
public boolean isImmediateMode() {
return immediateMode;
} | 3.68 |
hbase_Scan_getStopRow | /** Returns the stoprow */
public byte[] getStopRow() {
return this.stopRow;
} | 3.68 |
morf_NamedParameterPreparedStatement_close | /**
* @see PreparedStatement#close()
* @exception SQLException if a database access error occurs
*/
@Override
public void close() throws SQLException {
statement.close();
} | 3.68 |
hadoop_AbstractS3ACommitter_jobCompleted | /**
* Job completion outcome; this may be subclassed in tests.
* @param success did the job succeed.
*/
protected void jobCompleted(boolean success) {
getCommitOperations().jobCompleted(success);
} | 3.68 |
hbase_HFileBlockIndex_getNonRootSize | /** Returns the size of this chunk if stored in the non-root index block format */
@Override
public int getNonRootSize() {
return Bytes.SIZEOF_INT // Number of entries
+ Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index
+ curTotalNonRootEntrySize; // All entries
} | 3.68 |
flink_DataSet_printOnTaskManager | /**
* Writes a DataSet to the standard output streams (stdout) of the TaskManagers that execute the
* program (or more specifically, the data sink operators). On a typical cluster setup, the data
* will appear in the TaskManagers' <i>.out</i> files.
*
* <p>To print the data to the console or stdout stream of the client process instead, use the
* {@link #print()} method.
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param prefix The string to prefix each line of the output with. This helps identifying
* outputs from different printing sinks.
* @return The DataSink operator that writes the DataSet.
* @see #print()
*/
public DataSink<T> printOnTaskManager(String prefix) {
return output(new PrintingOutputFormat<T>(prefix, false));
} | 3.68 |
hudi_InternalSchemaBuilder_refreshNewId | /**
* Assigns new ids for all fields in a Type, based on initial id.
*
* @param type a type.
* @param nextId initial id which used to fresh ids for all fields in a type
* @return a new type with new ids
*/
public Type refreshNewId(Type type, AtomicInteger nextId) {
switch (type.typeId()) {
case RECORD:
Types.RecordType record = (Types.RecordType) type;
List<Types.Field> oldFields = record.fields();
int currentId = nextId.get();
nextId.set(currentId + record.fields().size());
List<Types.Field> internalFields = new ArrayList<>();
for (int i = 0; i < oldFields.size(); i++) {
Types.Field oldField = oldFields.get(i);
Type fieldType = refreshNewId(oldField.type(), nextId);
internalFields.add(Types.Field.get(currentId++, oldField.isOptional(), oldField.name(), fieldType, oldField.doc()));
}
return Types.RecordType.get(internalFields);
case ARRAY:
Types.ArrayType array = (Types.ArrayType) type;
int elementId = nextId.get();
nextId.set(elementId + 1);
Type elementType = refreshNewId(array.elementType(), nextId);
return Types.ArrayType.get(elementId, array.isElementOptional(), elementType);
case MAP:
Types.MapType map = (Types.MapType) type;
int keyId = nextId.get();
int valueId = keyId + 1;
nextId.set(keyId + 2);
Type keyType = refreshNewId(map.keyType(), nextId);
Type valueType = refreshNewId(map.valueType(), nextId);
return Types.MapType.get(keyId, valueId, keyType, valueType, map.isValueOptional());
default:
return type;
}
} | 3.68 |
streampipes_PropertyRequirementsBuilder_create | /**
* Creates new requirements for a data processor or a data sink at a property level. A matching event property
* needs to provide all requirements assigned by this class.
*
* @return {@link PropertyRequirementsBuilder}
*/
public static PropertyRequirementsBuilder create(Datatypes propertyDatatype) {
return new PropertyRequirementsBuilder(propertyDatatype);
} | 3.68 |
framework_LayoutManager_getBorderHeight | /**
* Gets the border height (top border + bottom border) of the given element,
* provided that it has been measured. These elements are guaranteed to be
* measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured border height (top border + bottom border) of the
* element in pixels.
*/
public final int getBorderHeight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getBorderHeight();
} | 3.68 |
hbase_MetricsTableRequests_updateDelete | /**
* Update the Delete time histogram
* @param t time it took
*/
public void updateDelete(long t) {
if (isEnableTableLatenciesMetrics()) {
deleteTimeHistogram.update(t);
}
} | 3.68 |
flink_TaskSlot_closeAsync | /**
* Close the task slot asynchronously.
*
* <p>Slot is moved to {@link TaskSlotState#RELEASING} state and only once. If there are active
* tasks running in the slot then they are failed. The future of all tasks terminated and slot
* cleaned up is initiated only once and always returned in case of multiple attempts to close
* the slot.
*
* @param cause cause of closing
* @return future of all running task if any being done and slot cleaned up.
*/
CompletableFuture<Void> closeAsync(Throwable cause) {
if (!isReleasing()) {
state = TaskSlotState.RELEASING;
if (!isEmpty()) {
// we couldn't free the task slot because it still contains task, fail the tasks
// and set the slot state to releasing so that it gets eventually freed
tasks.values().forEach(task -> task.failExternally(cause));
}
final CompletableFuture<Void> shutdownFuture =
FutureUtils.waitForAll(
tasks.values().stream()
.map(TaskSlotPayload::getTerminationFuture)
.collect(Collectors.toList()))
.thenRun(memoryManager::shutdown);
verifyAllManagedMemoryIsReleasedAfter(shutdownFuture);
FutureUtils.forward(shutdownFuture, closingFuture);
}
return closingFuture;
} | 3.68 |
flink_MessageSerializer_deserializeResponse | /**
* De-serializes the response sent to the {@link
* org.apache.flink.queryablestate.network.Client}.
*
* <pre>
* <b>The buffer is expected to be at the response position.</b>
* </pre>
*
* @param buf The {@link ByteBuf} containing the serialized response.
* @return The response.
*/
public RESP deserializeResponse(final ByteBuf buf) {
Preconditions.checkNotNull(buf);
return responseDeserializer.deserializeMessage(buf);
} | 3.68 |
morf_MergeStatementBuilder_into | /**
* Merges into a specific table.
*
* <blockquote><pre>
* merge().into(new TableReference("agreement"));</pre></blockquote>
*
* @param intoTable the table to merge into.
* @return this, for method chaining.
*/
public MergeStatementBuilder into(TableReference intoTable) {
this.table = intoTable;
return this;
} | 3.68 |
morf_Oracle_getErrorCodeFromOracleXAException | /**
* Recursively try and extract the error code from any nested OracleXAException
*/
private Optional<Integer> getErrorCodeFromOracleXAException(Throwable exception) {
try {
if ("oracle.jdbc.xa.OracleXAException".equals(exception.getClass().getName())) {
return Optional.of((Integer) exception.getClass().getMethod("getOracleError").invoke(exception));
} else if (exception.getCause() != null) {
return getErrorCodeFromOracleXAException(exception.getCause());
}
return Optional.empty();
} catch (Exception e) {
log.error("Exception when trying to extract error code", exception);
throw new RuntimeException(e);
}
} | 3.68 |
hbase_SnapshotReferenceUtil_visitTableStoreFiles | /**
* © Iterate over the snapshot store files
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @param desc the {@link SnapshotDescription} of the snapshot to verify
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
static void visitTableStoreFiles(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor)
throws IOException {
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc);
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
if (regionManifests == null || regionManifests.isEmpty()) {
LOG.debug("No manifest files present: " + snapshotDir);
return;
}
for (SnapshotRegionManifest regionManifest : regionManifests) {
visitRegionStoreFiles(regionManifest, visitor);
}
} | 3.68 |
hbase_TimeoutExceptionInjector_start | /**
* Start a timer to fail a process if it takes longer than the expected time to complete.
* <p>
* Non-blocking.
* @throws IllegalStateException if the timer has already been marked done via {@link #complete()}
* or {@link #trigger()}
*/
public synchronized void start() throws IllegalStateException {
if (this.start >= 0) {
LOG.warn("Timer already started, can't be started again. Ignoring second request.");
return;
}
LOG.debug("Scheduling process timer to run in: " + maxTime + " ms");
timer.schedule(timerTask, maxTime);
this.start = EnvironmentEdgeManager.currentTime();
} | 3.68 |
hadoop_WordList_contains | /**
* Returns 'true' if the list contains the specified word.
*/
public boolean contains(String word) {
return list.containsKey(word);
} | 3.68 |
flink_ZooKeeperUtils_isZooKeeperRecoveryMode | /** Returns whether {@link HighAvailabilityMode#ZOOKEEPER} is configured. */
public static boolean isZooKeeperRecoveryMode(Configuration flinkConf) {
return HighAvailabilityMode.fromConfig(flinkConf).equals(HighAvailabilityMode.ZOOKEEPER);
} | 3.68 |
hadoop_BaseRecord_initDefaultTimes | /**
* Initialize default times. The driver may update these timestamps on insert
* and/or update. This should only be called when initializing an object that
* is not backed by a data store.
*/
private void initDefaultTimes() {
long now = Time.now();
this.setDateCreated(now);
this.setDateModified(now);
} | 3.68 |
hmily_CreateSQLUtil_getKeyValueClause | /**
* Get key value SQL clause.
*
* @param keySet key set
* @param separator separator
* @return key value SQL clause
*/
public static String getKeyValueClause(final Set<String> keySet, final String separator) {
return Joiner.on(separator).withKeyValueSeparator("=").join(Maps.asMap(keySet, input -> "?"));
} | 3.68 |
hbase_CachedEntryQueue_poll | /** Returns The next element in this queue, or {@code null} if the queue is empty. */
public Map.Entry<BlockCacheKey, BucketEntry> poll() {
return queue.poll();
} | 3.68 |
hadoop_DeviceMappingManager_defaultScheduleAction | // Default scheduling logic
private void defaultScheduleAction(Set<Device> allowed,
Map<Device, ContainerId> used, Set<Device> assigned,
ContainerId containerId, int count) {
LOG.debug("Using default scheduler. Allowed:" + allowed
+ ",Used:" + used + ", containerId:" + containerId);
for (Device device : allowed) {
if (!used.containsKey(device)) {
used.put(device, containerId);
assigned.add(device);
if (assigned.size() == count) {
return;
}
}
} // end for
} | 3.68 |
flink_TimestampData_fromTimestamp | /**
* Creates an instance of {@link TimestampData} from an instance of {@link Timestamp}.
*
* @param timestamp an instance of {@link Timestamp}
*/
public static TimestampData fromTimestamp(Timestamp timestamp) {
return fromLocalDateTime(timestamp.toLocalDateTime());
} | 3.68 |
flink_MiniCluster_executeJobBlocking | /**
* This method runs a job in blocking mode. The method returns only after the job completed
* successfully, or after it failed terminally.
*
* @param job The Flink job to execute
* @return The result of the job execution
* @throws JobExecutionException Thrown if anything went amiss during initial job launch, or if
* the job terminally failed.
*/
public JobExecutionResult executeJobBlocking(JobGraph job)
throws JobExecutionException, InterruptedException {
checkNotNull(job, "job is null");
final CompletableFuture<JobSubmissionResult> submissionFuture = submitJob(job);
final CompletableFuture<JobResult> jobResultFuture =
submissionFuture.thenCompose(
(JobSubmissionResult ignored) -> requestJobResult(job.getJobID()));
final JobResult jobResult;
try {
jobResult = jobResultFuture.get();
} catch (ExecutionException e) {
throw new JobExecutionException(
job.getJobID(),
"Could not retrieve JobResult.",
ExceptionUtils.stripExecutionException(e));
}
try {
return jobResult.toJobExecutionResult(Thread.currentThread().getContextClassLoader());
} catch (IOException | ClassNotFoundException e) {
throw new JobExecutionException(job.getJobID(), e);
}
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.