name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_StateStoreSerializableImpl_getOriginalPrimaryKey | /**
* Get the original primary key for the given state store record key. The returned
* key is readable as it is the original key.
*
* @param stateStoreRecordKey The record primary key stored by the state store implementations.
* @return The original primary key for the given record key.
*/
protected static String getOriginalPrimaryKey(String stateStoreRecordKey) {
Objects.requireNonNull(stateStoreRecordKey,
"state store record key provided to getOriginalPrimaryKey should not be null");
stateStoreRecordKey = stateStoreRecordKey.replaceAll(SLASH_MARK, "/");
stateStoreRecordKey = stateStoreRecordKey.replaceAll(COLON_MARK, ":");
return stateStoreRecordKey;
} | 3.68 |
flink_ModuleManager_getFactory | /**
* Returns the first factory found in the loaded modules given a selector.
*
* <p>Modules are checked in the order in which they have been loaded. The first factory
* returned by a module will be used. If no loaded module provides a factory, {@link
* Optional#empty()} is returned.
*/
@SuppressWarnings("unchecked")
public <T extends Factory> Optional<T> getFactory(Function<Module, Optional<T>> selector) {
for (final String moduleName : usedModules) {
final Optional<T> factory = selector.apply(loadedModules.get(moduleName));
if (factory.isPresent()) {
return factory;
}
}
return Optional.empty();
} | 3.68 |
pulsar_KerberosName_replaceParameters | /**
* Replace the numbered parameters of the form $n where n is from 1 to
* the length of params. Normal text is copied directly and $n is replaced
* by the corresponding parameter.
* @param format the string to replace parameters again
* @param params the list of parameters
* @return the generated string with the parameter references replaced.
* @throws BadFormatString
*/
static String replaceParameters(String format,
String[] params) throws BadFormatString {
Matcher match = parameterPattern.matcher(format);
int start = 0;
StringBuilder result = new StringBuilder();
while (start < format.length() && match.find(start)) {
result.append(match.group(1));
String paramNum = match.group(3);
if (paramNum != null) {
try {
int num = Integer.parseInt(paramNum);
if (num < 0 || num >= params.length) {
throw new BadFormatString("index " + num + " from " + format
+ " is outside of the valid range 0 to " + (params.length - 1));
}
result.append(params[num]);
} catch (NumberFormatException nfe) {
throw new BadFormatString("bad format in username mapping in "
+ paramNum, nfe);
}
}
start = match.end();
}
return result.toString();
} | 3.68 |
hadoop_FlowRunRowKey_getRowKey | /**
* Constructs a row key for the entity table as follows: {
* clusterId!userId!flowName!Inverted Flow Run Id}.
*
* @return byte array with the row key
*/
public byte[] getRowKey() {
return flowRunRowKeyConverter.encode(this);
} | 3.68 |
hadoop_LogAggregationWebUtils_getLogStartTime | /**
* Parse log start time from html.
* @param startStr the start time string
* @return the startIndex
*/
public static long getLogStartTime(String startStr)
throws NumberFormatException {
long start = 0;
if (startStr != null && !startStr.isEmpty()) {
start = Long.parseLong(startStr);
}
return start;
} | 3.68 |
flink_Transformation_getParallelism | /** Returns the parallelism of this {@code Transformation}. */
public int getParallelism() {
return parallelism;
} | 3.68 |
framework_CustomizedSystemMessages_setSessionExpiredNotificationEnabled | /**
* Enables or disables the notification. If disabled, the set URL (or
* current) is loaded directly when next transaction between server and
* client happens.
*
* @param sessionExpiredNotificationEnabled
* true = enabled, false = disabled
*/
public void setSessionExpiredNotificationEnabled(
boolean sessionExpiredNotificationEnabled) {
this.sessionExpiredNotificationEnabled = sessionExpiredNotificationEnabled;
} | 3.68 |
AreaShop_GeneralRegion_removelandlord | /**
* Remove the landlord from this region.
*/
public void removelandlord() {
setSetting("general.landlord", null);
setSetting("general.landlordName", null);
} | 3.68 |
hadoop_SchedulerNodeReport_getUsedResource | /**
* @return the amount of resources currently used by the node.
*/
public Resource getUsedResource() {
return used;
} | 3.68 |
streampipes_InfluxDbClient_loadColumns | // Client must be connected before calling this method
void loadColumns() throws AdapterException {
if (!connected) {
throw new AdapterException("Client must be connected to the server in order to load the columns.");
}
List<List<Object>> fieldKeys = query("SHOW FIELD KEYS FROM " + measureName);
List<List<Object>> tagKeys = query("SHOW TAG KEYS FROM " + measureName);
// if (fieldKeys.size() == 0 || tagKeys.size() == 0) {
if (fieldKeys.size() == 0) {
throw new AdapterException("Error while checking the Schema (does the measurement exist?)");
}
columns = new ArrayList<>();
columns.add(new Column("time", Datatypes.Long));
for (List o : fieldKeys) {
// o.get(0): Name, o.get(1): Datatype
// Data types: https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_reference/#data-types
String name = o.get(0).toString();
Datatypes datatype;
switch (o.get(1).toString()) {
case "float":
datatype = Datatypes.Float;
break;
case "boolean":
datatype = Datatypes.Boolean;
break;
case "integer":
datatype = Datatypes.Integer;
break;
default:
datatype = Datatypes.String;
break;
}
columns.add(new Column(name, datatype));
}
for (List o : tagKeys) {
// All tag keys are strings
String name = o.get(0).toString();
columns.add(new Column(name, Datatypes.String));
}
// Update the column String
// Do it only here, because it is needed every time for the query (performance)
StringBuilder sb = new StringBuilder();
for (Column column : columns) {
sb.append(column.getName()).append(", ");
} | 3.68 |
framework_VaadinService_removeSession | /**
* Called when the VaadinSession should be removed from the underlying HTTP
* session.
*
* @since 7.6
* @param wrappedSession
* the underlying HTTP session
*/
public void removeSession(WrappedSession wrappedSession) {
assert VaadinSession.hasLock(this, wrappedSession);
removeFromHttpSession(wrappedSession);
} | 3.68 |
AreaShop_GeneralRegion_handleSchematicEvent | /**
* Checks an event and handles saving to and restoring from schematic for it.
* @param type The type of event
*/
public void handleSchematicEvent(RegionEvent type) {
// Check the individual>group>default setting
if(!isRestoreEnabled()) {
AreaShop.debug("Schematic operations for " + getName() + " not enabled, skipped");
return;
}
// Get the safe and restore names
ConfigurationSection profileSection = getConfigurationSectionSetting("general.schematicProfile", "schematicProfiles");
if(profileSection == null) {
return;
}
String save = profileSection.getString(type.getValue() + ".save");
String restore = profileSection.getString(type.getValue() + ".restore");
// Save the region if needed
if(save != null && !save.isEmpty()) {
save = Message.fromString(save).replacements(this).getSingle();
saveRegionBlocks(save);
}
// Restore the region if needed
if(restore != null && !restore.isEmpty()) {
restore = Message.fromString(restore).replacements(this).getSingle();
restoreRegionBlocks(restore);
}
} | 3.68 |
hbase_MasterFileSystem_getClusterId | /** Returns The unique identifier generated for this cluster */
public ClusterId getClusterId() {
return clusterId;
} | 3.68 |
hbase_ImmutableBytesWritable_copyBytes | /** Returns a copy of the bytes referred to by this writable */
public byte[] copyBytes() {
return Arrays.copyOfRange(bytes, offset, offset + length);
} | 3.68 |
hadoop_FileSetUtils_getCommaSeparatedList | /**
* Returns a string containing every element of the given list, with each
* element separated by a comma.
*
* @param list List of all elements
* @return String containing every element, comma-separated
*/
private static String getCommaSeparatedList(List<String> list) {
StringBuilder buffer = new StringBuilder();
String separator = "";
for (Object e : list) {
buffer.append(separator).append(e);
separator = ",";
}
return buffer.toString();
} | 3.68 |
flink_ImperativeAggregateFunction_getAccumulatorType | /**
* Returns the {@link TypeInformation} of the {@link ImperativeAggregateFunction}'s accumulator.
*
* @return The {@link TypeInformation} of the {@link ImperativeAggregateFunction}'s accumulator
* or <code>null</code> if the accumulator type should be automatically inferred.
* @deprecated This method uses the old type system and is based on the old reflective
* extraction logic. The method will be removed in future versions and is only called when
* using the deprecated {@code TableEnvironment.registerFunction(...)} method. The new
* reflective extraction logic (possibly enriched with {@link DataTypeHint} and {@link
* FunctionHint}) should be powerful enough to cover most use cases. For advanced users, it
* is possible to override {@link UserDefinedFunction#getTypeInference(DataTypeFactory)}.
*/
@Deprecated
public TypeInformation<ACC> getAccumulatorType() {
return null;
} | 3.68 |
hbase_AuthManager_authorizeUserGlobal | /**
* Check if user has given action privilige in global scope.
* @param user user name
* @param action one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeUserGlobal(User user, Permission.Action action) {
if (user == null) {
return false;
}
if (Superusers.isSuperUser(user)) {
return true;
}
if (authorizeGlobal(globalCache.get(user.getShortName()), action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (authorizeGlobal(globalCache.get(AuthUtil.toGroupEntry(group)), action)) {
return true;
}
}
return false;
} | 3.68 |
hadoop_DiskBalancerWorkItem_setStartTime | /**
* Sets the Start time.
* @param startTime - Time stamp for start of execution.
*/
public void setStartTime(long startTime) {
this.startTime = startTime;
} | 3.68 |
graphhopper_VectorTile_setId | /**
* <code>optional uint64 id = 1 [default = 0];</code>
*/
public Builder setId(long value) {
bitField0_ |= 0x00000001;
id_ = value;
onChanged();
return this;
} | 3.68 |
zxing_CodaBarReader_setCounters | /**
* Records the size of all runs of white and black pixels, starting with white.
* This is just like recordPattern, except it records all the counters, and
* uses our builtin "counters" member for storage.
* @param row row to count from
*/
private void setCounters(BitArray row) throws NotFoundException {
counterLength = 0;
// Start from the first white bit.
int i = row.getNextUnset(0);
int end = row.getSize();
if (i >= end) {
throw NotFoundException.getNotFoundInstance();
}
boolean isWhite = true;
int count = 0;
while (i < end) {
if (row.get(i) != isWhite) {
count++;
} else {
counterAppend(count);
count = 1;
isWhite = !isWhite;
}
i++;
}
counterAppend(count);
} | 3.68 |
morf_RecreateOracleSequences_getJiraId | /**
* @see org.alfasoftware.morf.upgrade.UpgradeStep#getJiraId()
*/
@Override
public String getJiraId() {
return "WEB-51306";
} | 3.68 |
flink_DecodingFormat_listReadableMetadata | /**
* Returns the map of metadata keys and their corresponding data types that can be produced by
* this format for reading. By default, this method returns an empty map.
*
* <p>Metadata columns add additional columns to the table's schema. A decoding format is
* responsible to add requested metadata columns at the end of produced rows.
*
* <p>See {@link SupportsReadingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link
* SupportsReadingMetadata} and calls this method in {@link
* SupportsReadingMetadata#listReadableMetadata()}.
*/
default Map<String, DataType> listReadableMetadata() {
return Collections.emptyMap();
} | 3.68 |
flink_SlotPoolService_castInto | /**
* Tries to cast this slot pool service into the given clazz.
*
* @param clazz to cast the slot pool service into
* @param <T> type of clazz
* @return {@link Optional#of} the target type if it can be cast; otherwise {@link
* Optional#empty()}
*/
default <T> Optional<T> castInto(Class<T> clazz) {
if (clazz.isAssignableFrom(this.getClass())) {
return Optional.of(clazz.cast(this));
} else {
return Optional.empty();
}
} | 3.68 |
hbase_SingleColumnValueFilter_isFamilyEssential | /**
* The only CF this filter needs is given column family. So, it's the only essential column in
* whole scan. If filterIfMissing == false, all families are essential, because of possibility of
* skipping the rows without any data in filtered CF.
*/
@Override
public boolean isFamilyEssential(byte[] name) {
return !this.filterIfMissing || Bytes.equals(name, this.columnFamily);
} | 3.68 |
framework_MultiSelectionModelImpl_fetchAllDescendants | /**
* Fetch all the descendants of the given parent item from the given data
* provider.
*
* @since 8.1
* @param parent
* the parent item to fetch descendants for
* @param dataProvider
* the data provider to fetch from
* @return the stream of all descendant items
*/
private Stream<T> fetchAllDescendants(T parent,
HierarchicalDataProvider<T, ?> dataProvider) {
List<T> children = dataProvider
.fetchChildren(new HierarchicalQuery<>(null, parent))
.collect(Collectors.toList());
if (children.isEmpty()) {
return Stream.empty();
}
return children.stream()
.flatMap(child -> Stream.concat(Stream.of(child),
fetchAllDescendants(child, dataProvider)));
} | 3.68 |
hbase_Scan_getFingerprint | /**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
* and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
Map<String, Object> map = new HashMap<>();
List<String> families = new ArrayList<>();
if (this.familyMap.isEmpty()) {
map.put("families", "ALL");
return map;
} else {
map.put("families", families);
}
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
families.add(Bytes.toStringBinary(entry.getKey()));
}
return map;
} | 3.68 |
flink_SplitFetcher_addSplits | /**
* Add splits to the split fetcher. This operation is asynchronous.
*
* @param splitsToAdd the splits to add.
*/
public void addSplits(List<SplitT> splitsToAdd) {
lock.lock();
try {
enqueueTaskUnsafe(new AddSplitsTask<>(splitReader, splitsToAdd, assignedSplits));
wakeUpUnsafe(true);
} finally {
lock.unlock();
}
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_initializeIfNeeded | /**
* Initialize the metadata table if needed.
*
* @param dataMetaClient - meta client for the data table
* @param inflightInstantTimestamp - timestamp of an instant in progress on the dataset
* @throws IOException on errors
*/
protected boolean initializeIfNeeded(HoodieTableMetaClient dataMetaClient,
Option<String> inflightInstantTimestamp) throws IOException {
HoodieTimer timer = HoodieTimer.start();
List<MetadataPartitionType> partitionsToInit = new ArrayList<>(MetadataPartitionType.values().length);
try {
boolean exists = metadataTableExists(dataMetaClient);
if (!exists) {
// FILES partition is always required
partitionsToInit.add(FILES);
}
// check if any of the enabled partition types needs to be initialized
// NOTE: It needs to be guarded by async index config because if that is enabled then initialization happens through the index scheduler.
if (!dataWriteConfig.isMetadataAsyncIndex()) {
Set<String> completedPartitions = dataMetaClient.getTableConfig().getMetadataPartitions();
LOG.info("Async metadata indexing disabled and following partitions already initialized: " + completedPartitions);
// TODO: fix the filter to check for exact partition name, e.g. completedPartitions could have func_index_datestr,
// but now the user is trying to initialize the func_index_dayhour partition.
this.enabledPartitionTypes.stream()
.filter(p -> !completedPartitions.contains(p.getPartitionPath()) && !FILES.equals(p))
.forEach(partitionsToInit::add);
}
if (partitionsToInit.isEmpty()) {
// No partitions left to initialize, since all the metadata enabled partitions are either initialized before
// or current in the process of initialization.
initMetadataReader();
return true;
}
// If there is no commit on the dataset yet, use the SOLO_COMMIT_TIMESTAMP as the instant time for initial commit
// Otherwise, we use the timestamp of the latest completed action.
String initializationTime = dataMetaClient.getActiveTimeline().filterCompletedInstants().lastInstant().map(HoodieInstant::getTimestamp).orElse(SOLO_COMMIT_TIMESTAMP);
// Initialize partitions for the first time using data from the files on the file system
if (!initializeFromFilesystem(initializationTime, partitionsToInit, inflightInstantTimestamp)) {
LOG.error("Failed to initialize MDT from filesystem");
return false;
}
metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.INITIALIZE_STR, timer.endTimer()));
return true;
} catch (IOException e) {
LOG.error("Failed to initialize metadata table. Disabling the writer.", e);
return false;
}
} | 3.68 |
flink_FactoryUtil_validateWatermarkOptions | /**
* Validate watermark options from table options.
*
* @param factoryIdentifier identifier of table
* @param conf table options
*/
public static void validateWatermarkOptions(String factoryIdentifier, ReadableConfig conf) {
Optional<String> errMsgOptional = checkWatermarkOptions(conf);
if (errMsgOptional.isPresent()) {
throw new ValidationException(
String.format(
"Error configuring watermark for '%s', %s",
factoryIdentifier, errMsgOptional.get()));
}
} | 3.68 |
dubbo_MergerFactory_getActualTypeArgument | /**
* get merger's actual type argument (same as return type)
* @param mergerCls
* @return
*/
private Class<?> getActualTypeArgument(Class<? extends Merger> mergerCls) {
Class<?> superClass = mergerCls;
while (superClass != Object.class) {
Type[] interfaceTypes = superClass.getGenericInterfaces();
ParameterizedType mergerType;
for (Type it : interfaceTypes) {
if (it instanceof ParameterizedType
&& (mergerType = ((ParameterizedType) it)).getRawType() == Merger.class) {
Type typeArg = mergerType.getActualTypeArguments()[0];
return TypeUtils.getRawClass(typeArg);
}
}
superClass = superClass.getSuperclass();
}
return null;
} | 3.68 |
morf_SelectStatementBuilder_notDistinct | /**
* Disable DISTINCT. Although this is the default, it can be used
* to remove the DISTINCT behaviour on a copied statement.
*`
* @return this, for method chaining.
*/
public SelectStatementBuilder notDistinct() {
this.distinct = false;
return this;
} | 3.68 |
flink_CheckpointConfig_setCheckpointIntervalDuringBacklog | /**
* Sets the interval in which checkpoints are periodically scheduled during backlog.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #setMaxConcurrentCheckpoints(int)} and {@link
* #setMinPauseBetweenCheckpoints(long)}.
*
* <p>If not explicitly configured, checkpoint interval during backlog will be the same as that
* in normal situation(see {@link #setCheckpointInterval(long)}). If configured to zero,
* checkpoints would be disabled during backlog.
*
* @param checkpointInterval The checkpoint interval, in milliseconds.
*/
public void setCheckpointIntervalDuringBacklog(long checkpointInterval) {
if (checkpointInterval != 0 && checkpointInterval < MINIMAL_CHECKPOINT_TIME) {
throw new IllegalArgumentException(
String.format(
"Checkpoint interval must be zero or larger than or equal to %s ms",
MINIMAL_CHECKPOINT_TIME));
}
configuration.set(
ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG,
Duration.ofMillis(checkpointInterval));
} | 3.68 |
hadoop_ContentCounts_getSnapshotCount | // Get the number of snapshots
public long getSnapshotCount() {
return contents.get(Content.SNAPSHOT);
} | 3.68 |
framework_AbstractClientConnector_removeListener | /**
* <p>
* Removes one registered listener method. The given method owned by the
* given object will no longer be called when the specified events are
* generated by this component.
* </p>
*
* <p>
* This version of <code>removeListener</code> gets the name of the
* activation method as a parameter. The actual method is reflected from
* <code>target</code>, and unless exactly one match is found,
* <code>java.lang.IllegalArgumentException</code> is thrown.
* </p>
*
* <p>
* For more information on the inheritable event mechanism see the
* {@link com.vaadin.event com.vaadin.event package documentation}.
* </p>
*
* @param eventType
* the exact event type the <code>object</code> listens to.
* @param target
* the target object that has registered to listen to events of
* type <code>eventType</code> with one or more methods.
* @param methodName
* the name of the method owned by <code>target</code> that's
* registered to listen to events of type <code>eventType</code>.
* @deprecated As of 7.0. This method should be avoided. Use
* {@link #removeListener(Class, Object, Method)} instead.
*/
@Deprecated
@Override
public void removeListener(Class<?> eventType, Object target,
String methodName) {
if (eventRouter != null) {
eventRouter.removeListener(eventType, target, methodName);
}
} | 3.68 |
hadoop_TypedBytesInput_readFloat | /**
* Reads the float following a <code>Type.FLOAT</code> code.
* @return the obtained float
* @throws IOException
*/
public float readFloat() throws IOException {
return in.readFloat();
} | 3.68 |
dubbo_Bytes_bytes2double | /**
* to long.
*
* @param b byte array.
* @param off offset.
* @return double.
*/
public static double bytes2double(byte[] b, int off) {
long j = ((b[off + 7] & 0xFFL) << 0)
+ ((b[off + 6] & 0xFFL) << 8)
+ ((b[off + 5] & 0xFFL) << 16)
+ ((b[off + 4] & 0xFFL) << 24)
+ ((b[off + 3] & 0xFFL) << 32)
+ ((b[off + 2] & 0xFFL) << 40)
+ ((b[off + 1] & 0xFFL) << 48)
+ (((long) b[off + 0]) << 56);
return Double.longBitsToDouble(j);
} | 3.68 |
flink_KeyedStream_reduce | /**
* Applies a reduce transformation on the grouped data stream grouped on by the given key
* position. The {@link ReduceFunction} will receive input values based on the key value. Only
* input values with the same key will go to the same reducer.
*
* @param reducer The {@link ReduceFunction} that will be called for every element of the input
* values with the same key.
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> reduce(ReduceFunction<T> reducer) {
ReduceTransformation<T, KEY> reduce =
new ReduceTransformation<>(
"Keyed Reduce",
environment.getParallelism(),
transformation,
clean(reducer),
keySelector,
getKeyType(),
false);
getExecutionEnvironment().addOperator(reduce);
return new SingleOutputStreamOperator<>(getExecutionEnvironment(), reduce);
} | 3.68 |
framework_Window_close | /**
* Method that handles window closing (from UI).
*
* <p>
* By default, windows are removed from their respective UIs and thus
* visually closed on browser-side.
* </p>
*
* <p>
* To react to a window being closed (after it is closed), register a
* {@link CloseListener}.
* </p>
*/
public void close() {
UI uI = getUI();
// Don't do anything if not attached to a UI
if (uI != null) {
// window is removed from the UI
uI.removeWindow(this);
}
} | 3.68 |
flink_FileSystem_getDefaultFsUri | /**
* Gets the default file system URI that is used for paths and file systems that do not specify
* and explicit scheme.
*
* <p>As an example, assume the default file system URI is set to {@code
* 'hdfs://someserver:9000/'}. A file path of {@code '/user/USERNAME/in.txt'} is interpreted as
* {@code 'hdfs://someserver:9000/user/USERNAME/in.txt'}.
*
* @return The default file system URI
*/
public static URI getDefaultFsUri() {
return defaultScheme != null ? defaultScheme : LocalFileSystem.getLocalFsURI();
} | 3.68 |
hadoop_AllocateRequest_build | /**
* Return generated {@link AllocateRequest} object.
* @return {@link AllocateRequest}
*/
@Public
@Stable
public AllocateRequest build() {
return allocateRequest;
} | 3.68 |
querydsl_GeometryExpression_intersects | /**
* Returns 1 (TRUE) if this geometric object “spatially intersects” anotherGeometry.
*
* @param geometry other geometry
* @return true, if intersects
*/
public BooleanExpression intersects(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.INTERSECTS, mixin, geometry);
} | 3.68 |
zxing_Decoder_correctErrors | /**
* <p>Given data and error-correction codewords received, possibly corrupted by errors, attempts to
* correct the errors in-place using Reed-Solomon error correction.</p>
*
* @param codewordBytes data and error correction codewords
* @param numDataCodewords number of codewords that are data bytes
* @return the number of errors corrected
* @throws ChecksumException if error correction fails
*/
private int correctErrors(byte[] codewordBytes, int numDataCodewords) throws ChecksumException {
int numCodewords = codewordBytes.length;
// First read into an array of ints
int[] codewordsInts = new int[numCodewords];
for (int i = 0; i < numCodewords; i++) {
codewordsInts[i] = codewordBytes[i] & 0xFF;
}
int errorsCorrected = 0;
try {
errorsCorrected = rsDecoder.decodeWithECCount(codewordsInts, codewordBytes.length - numDataCodewords);
} catch (ReedSolomonException ignored) {
throw ChecksumException.getChecksumInstance();
}
// Copy back into array of bytes -- only need to worry about the bytes that were data
// We don't care about errors in the error-correction codewords
for (int i = 0; i < numDataCodewords; i++) {
codewordBytes[i] = (byte) codewordsInts[i];
}
return errorsCorrected;
} | 3.68 |
pulsar_FunctionRuntimeManager_getFunctionInstanceStats | /**
* Get stats of a function instance. If this worker is not running the function instance.
* @param tenant the tenant the function belongs to
* @param namespace the namespace the function belongs to
* @param functionName the function name
* @param instanceId the function instance id
* @return jsonObject containing stats for instance
*/
public FunctionInstanceStatsDataImpl getFunctionInstanceStats(String tenant, String namespace,
String functionName, int instanceId, URI uri) {
Assignment assignment;
if (runtimeFactory.externallyManaged()) {
assignment = this.findAssignment(tenant, namespace, functionName, -1);
} else {
assignment = this.findAssignment(tenant, namespace, functionName, instanceId);
}
if (assignment == null) {
return new FunctionInstanceStatsDataImpl();
}
final String assignedWorkerId = assignment.getWorkerId();
final String workerId = this.workerConfig.getWorkerId();
// If I am running worker
if (assignedWorkerId.equals(workerId)) {
FunctionRuntimeInfo functionRuntimeInfo = this.getFunctionRuntimeInfo(
FunctionCommon.getFullyQualifiedInstanceId(assignment.getInstance()));
RuntimeSpawner runtimeSpawner = functionRuntimeInfo.getRuntimeSpawner();
if (runtimeSpawner != null) {
return (FunctionInstanceStatsDataImpl)
WorkerUtils.getFunctionInstanceStats(
FunctionCommon.getFullyQualifiedInstanceId(
assignment.getInstance()), functionRuntimeInfo, instanceId).getMetrics();
}
return new FunctionInstanceStatsDataImpl();
} else {
// query other worker
List<WorkerInfo> workerInfoList = this.membershipManager.getCurrentMembership();
WorkerInfo workerInfo = null;
for (WorkerInfo entry : workerInfoList) {
if (assignment.getWorkerId().equals(entry.getWorkerId())) {
workerInfo = entry;
}
}
if (workerInfo == null) {
return new FunctionInstanceStatsDataImpl();
}
if (uri == null) {
throw new WebApplicationException(Response.serverError().status(Status.INTERNAL_SERVER_ERROR).build());
} else {
URI redirect = UriBuilder.fromUri(uri).host(workerInfo.getWorkerHostname())
.port(workerInfo.getPort()).build();
throw new WebApplicationException(Response.temporaryRedirect(redirect).build());
}
}
} | 3.68 |
AreaShop_FileManager_getRegionNames | /**
* Get a list of names of all regions.
* @return A String list with all the names
*/
public List<String> getRegionNames() {
ArrayList<String> result = new ArrayList<>();
for(GeneralRegion region : getRegions()) {
result.add(region.getName());
}
return result;
} | 3.68 |
shardingsphere-elasticjob_ListenerNotifierManager_getInstance | /**
* Get singleton instance of ListenerNotifierManager.
* @return singleton instance of ListenerNotifierManager.
*/
public static ListenerNotifierManager getInstance() {
if (null == instance) {
synchronized (ListenerNotifierManager.class) {
if (null == instance) {
instance = new ListenerNotifierManager();
}
}
}
return instance;
} | 3.68 |
hbase_Query_setIsolationLevel | /**
* Set the isolation level for this query. If the isolation level is set to READ_UNCOMMITTED, then
* this query will return data from committed and uncommitted transactions. If the isolation level
* is set to READ_COMMITTED, then this query will return data from committed transactions only. If
* a isolation level is not explicitly set on a Query, then it is assumed to be READ_COMMITTED.
* @param level IsolationLevel for this query
*/
public Query setIsolationLevel(IsolationLevel level) {
setAttribute(ISOLATION_LEVEL, level.toBytes());
return this;
} | 3.68 |
framework_InfoSection_equalsEither | /**
* Checks if the target value equals one of the reference values
*
* @param target
* The value to compare
* @param reference1
* A reference value
* @param reference2
* A reference value
* @return true if target equals one of the references, false otherwise
*/
private boolean equalsEither(String target, String reference1,
String reference2) {
if (SharedUtil.equals(target, reference1)) {
return true;
}
if (SharedUtil.equals(target, reference2)) {
return true;
}
return false;
} | 3.68 |
hudi_CompactionCommitSink_commitIfNecessary | /**
* Condition to commit: the commit buffer has equal size with the compaction plan operations
* and all the compact commit event {@link CompactionCommitEvent} has the same compaction instant time.
*
* @param instant Compaction commit instant time
* @param events Commit events ever received for the instant
*/
private void commitIfNecessary(String instant, Collection<CompactionCommitEvent> events) throws IOException {
HoodieCompactionPlan compactionPlan = compactionPlanCache.computeIfAbsent(instant, k -> {
try {
return CompactionUtils.getCompactionPlan(
this.writeClient.getHoodieTable().getMetaClient(), instant);
} catch (Exception e) {
throw new HoodieException(e);
}
});
boolean isReady = compactionPlan.getOperations().size() == events.size();
if (!isReady) {
return;
}
if (events.stream().anyMatch(CompactionCommitEvent::isFailed)) {
try {
// handle failure case
CompactionUtil.rollbackCompaction(table, instant);
} finally {
// remove commitBuffer to avoid obsolete metadata commit
reset(instant);
this.compactionMetrics.markCompactionRolledBack();
}
return;
}
try {
doCommit(instant, events);
} catch (Throwable throwable) {
// make it fail-safe
LOG.error("Error while committing compaction instant: " + instant, throwable);
this.compactionMetrics.markCompactionRolledBack();
} finally {
// reset the status
reset(instant);
}
} | 3.68 |
flink_FutureUtils_completedVoidFuture | /**
* Returns a completed future of type {@link Void}.
*
* @return a completed future of type {@link Void}
*/
public static CompletableFuture<Void> completedVoidFuture() {
return COMPLETED_VOID_FUTURE;
} | 3.68 |
framework_InMemoryDataProvider_addSortOrder | /**
* Adds a property and direction to the default sorting for this data
* provider. If no default sorting has been defined, then the provided sort
* order will be used as the default sorting. If a default sorting has been
* defined, then the provided sort order will be used to determine the
* ordering of items that are considered equal by the previously defined
* default sorting.
* <p>
* The default sorting is used if the query defines no sorting. The default
* sorting is also used to determine the ordering of items that are
* considered equal by the sorting defined in the query.
*
* @see #setSortOrder(ValueProvider, SortDirection)
* @see #addSortComparator(SerializableComparator)
*
* @param valueProvider
* the value provider that defines the property do sort by, not
* <code>null</code>
* @param sortDirection
* the sort direction to use, not <code>null</code>
*/
public default <V extends Comparable<? super V>> void addSortOrder(
ValueProvider<T, V> valueProvider, SortDirection sortDirection) {
addSortComparator(InMemoryDataProviderHelpers
.propertyComparator(valueProvider, sortDirection));
} | 3.68 |
querydsl_JTSGeometryExpression_intersects | /**
* Returns 1 (TRUE) if this geometric object “spatially intersects” anotherGeometry.
*
* @param geometry other geometry
* @return true, if intersects
*/
public BooleanExpression intersects(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.INTERSECTS, mixin, geometry);
} | 3.68 |
hbase_MetricsStochasticBalancer_balancerStatus | /**
* Updates the balancer status tag reported to JMX
*/
@Override
public void balancerStatus(boolean status) {
stochasticSource.updateBalancerStatus(status);
} | 3.68 |
morf_FieldFromSelectFirst_getSelectFirstStatement | /**
* @return the selectStatement
*/
public SelectFirstStatement getSelectFirstStatement() {
return selectFirstStatement;
} | 3.68 |
flink_BatchTask_closeUserCode | /**
* Closes the given stub using its {@link
* org.apache.flink.api.common.functions.RichFunction#close()} method. If the close call
* produces an exception, a new exception with a standard error message is created, using the
* encountered exception as its cause.
*
* @param stub The user code instance to be closed.
* @throws Exception Thrown, if the user code's close method produces an exception.
*/
public static void closeUserCode(Function stub) throws Exception {
try {
FunctionUtils.closeFunction(stub);
} catch (Throwable t) {
throw new Exception(
"The user defined 'close()' method caused an exception: " + t.getMessage(), t);
}
} | 3.68 |
hbase_FilterBase_areSerializedFieldsEqual | /**
* Default implementation so that writers of custom filters aren't forced to implement.
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {
return true;
} | 3.68 |
shardingsphere-elasticjob_JobAPIFactory_createJobOperateAPI | /**
* Create job operate API.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return job operate API
*/
public static JobOperateAPI createJobOperateAPI(final String connectString, final String namespace, final String digest) {
return new JobOperateAPIImpl(RegistryCenterFactory.createCoordinatorRegistryCenter(connectString, namespace, digest));
} | 3.68 |
framework_FieldGroup_getFieldFactory | /**
* Gets the field factory for the {@link FieldGroup}. The field factory is
* only used when {@link FieldGroup} creates a new field.
*
* @return The field factory in use
*
*/
public FieldGroupFieldFactory getFieldFactory() {
return fieldFactory;
} | 3.68 |
flink_ParquetAvroWriters_forGenericRecord | /**
* Creates a ParquetWriterFactory that accepts and writes Avro generic types. The Parquet
* writers will use the given schema to build and write the columnar data.
*
* @param schema The schema of the generic type.
*/
public static ParquetWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
return AvroParquetWriters.forGenericRecord(schema);
} | 3.68 |
pulsar_LongHierarchicalLedgerRangeIterator_getChildrenAt | /**
* Returns all children with path as a parent. If path is non-existent,
* returns an empty list anyway (after all, there are no children there).
* Maps all exceptions (other than NoNode) to IOException in keeping with
* LedgerRangeIterator.
*
* @param path
* @return Iterator into set of all children with path as a parent
* @throws IOException
*/
List<String> getChildrenAt(String path) throws IOException {
try {
return store.getChildren(path)
.get(AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (ExecutionException | TimeoutException e) {
if (log.isDebugEnabled()) {
log.debug("Failed to get children at {}", path);
}
throw new IOException(e);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Interrupted while reading ledgers at path " + path, ie);
}
} | 3.68 |
morf_InsertStatement_getHints | /**
* @return all hints in the order they were declared.
*/
public List<Hint> getHints() {
return hints;
} | 3.68 |
hbase_Bytes_incrementBytes | /**
* Bytewise binary increment/deincrement of long contained in byte array on given amount.
* @param value - array of bytes containing long (length <= SIZEOF_LONG)
* @param amount value will be incremented on (deincremented if negative)
* @return array of bytes containing incremented long (length == SIZEOF_LONG)
*/
public static byte[] incrementBytes(byte[] value, long amount) {
byte[] val = value;
if (val.length < SIZEOF_LONG) {
// Hopefully this doesn't happen too often.
byte[] newvalue;
if (val[0] < 0) {
newvalue = new byte[] { -1, -1, -1, -1, -1, -1, -1, -1 };
} else {
newvalue = new byte[SIZEOF_LONG];
}
System.arraycopy(val, 0, newvalue, newvalue.length - val.length, val.length);
val = newvalue;
} else if (val.length > SIZEOF_LONG) {
throw new IllegalArgumentException("Increment Bytes - value too big: " + val.length);
}
if (amount == 0) return val;
if (val[0] < 0) {
return binaryIncrementNeg(val, amount);
}
return binaryIncrementPos(val, amount);
} | 3.68 |
hbase_TableSplit_readFields | /**
* Reads the values of each field.
* @param in The input to read from.
* @throws IOException When reading the input fails.
*/
@Override
public void readFields(DataInput in) throws IOException {
Version version = Version.UNVERSIONED;
// TableSplit was not versioned in the beginning.
// In order to introduce it now, we make use of the fact
// that tableName was written with Bytes.writeByteArray,
// which encodes the array length as a vint which is >= 0.
// Hence if the vint is >= 0 we have an old version and the vint
// encodes the length of tableName.
// If < 0 we just read the version and the next vint is the length.
// @see Bytes#readByteArray(DataInput)
int len = WritableUtils.readVInt(in);
if (len < 0) {
// what we just read was the version
version = Version.fromCode(len);
len = WritableUtils.readVInt(in);
}
byte[] tableNameBytes = new byte[len];
in.readFully(tableNameBytes);
tableName = TableName.valueOf(tableNameBytes);
startRow = Bytes.readByteArray(in);
endRow = Bytes.readByteArray(in);
regionLocation = Bytes.toString(Bytes.readByteArray(in));
if (version.atLeast(Version.INITIAL)) {
scan = Bytes.toString(Bytes.readByteArray(in));
}
length = WritableUtils.readVLong(in);
if (version.atLeast(Version.WITH_ENCODED_REGION_NAME)) {
encodedRegionName = Bytes.toString(Bytes.readByteArray(in));
}
} | 3.68 |
pulsar_AuthenticationProviderOpenID_authenticateTokenAsync | /**
* Authenticate the parameterized {@link AuthenticationDataSource} and return the decoded JWT.
* @param authData - the authData containing the token.
* @return a completed future with the decoded JWT, if the JWT is authenticated. Otherwise, a failed future.
*/
CompletableFuture<DecodedJWT> authenticateTokenAsync(AuthenticationDataSource authData) {
String token;
try {
token = AuthenticationProviderToken.getToken(authData);
} catch (AuthenticationException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
return CompletableFuture.failedFuture(e);
}
return authenticateToken(token)
.whenComplete((jwt, e) -> {
if (jwt != null) {
AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName());
}
// Failure metrics are incremented within methods above
});
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setStoragePolicy | /**
* Set the storage policy for use with this family
* @param policy the policy to set, valid setting includes: <i>"LAZY_PERSIST"</i>,
* <i>"ALL_SSD"</i>, <i>"ONE_SSD"</i>, <i>"HOT"</i>, <i>"WARM"</i>, <i>"COLD"</i>
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) {
return setValue(STORAGE_POLICY_BYTES, policy);
} | 3.68 |
flink_BuiltInSqlFunction_internal | /** @see BuiltInFunctionDefinition.Builder#internal() */
public Builder internal() {
this.isInternal = true;
return this;
} | 3.68 |
hbase_NettyHBaseRpcConnectionHeaderHandler_setupCryptoAESHandler | /**
* Remove handlers for sasl encryption and add handlers for Crypto AES encryption
*/
private void setupCryptoAESHandler(ChannelPipeline p, CryptoAES cryptoAES) {
p.replace(SaslWrapHandler.class, null, new SaslWrapHandler(cryptoAES::wrap));
p.replace(SaslUnwrapHandler.class, null, new SaslUnwrapHandler(cryptoAES::unwrap));
} | 3.68 |
hadoop_ECPolicyLoader_loadLayoutVersion | /**
* Load layoutVersion from root element in the XML configuration file.
* @param root root element
* @return layout version
*/
private int loadLayoutVersion(Element root) {
int layoutVersion;
Text text = (Text) root.getElementsByTagName("layoutversion")
.item(0).getFirstChild();
if (text != null) {
String value = text.getData().trim();
try {
layoutVersion = Integer.parseInt(value);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Bad layoutVersion value "
+ value + " is found. It should be an integer");
}
} else {
throw new IllegalArgumentException("Value of <layoutVersion> is null");
}
return layoutVersion;
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setCompactionCompressionType | /**
* Compression types supported in hbase. LZO is not bundled as part of the hbase distribution.
* See See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a> for
* how to enable it.
* @param type Compression type setting.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor
setCompactionCompressionType(Compression.Algorithm type) {
return setValue(COMPRESSION_COMPACT_BYTES, type.name());
} | 3.68 |
hbase_WALKeyImpl_addClusterId | /**
* Marks that the cluster with the given clusterId has consumed the change
*/
public void addClusterId(UUID clusterId) {
if (!clusterIds.contains(clusterId)) {
clusterIds.add(clusterId);
}
} | 3.68 |
hbase_AtomicUtils_updateMin | /**
* Updates a AtomicLong which is supposed to maintain the minimum values. This method is not
* synchronized but is thread-safe.
*/
public static void updateMin(AtomicLong min, long value) {
while (true) {
long cur = min.get();
if (value >= cur) {
break;
}
if (min.compareAndSet(cur, value)) {
break;
}
}
} | 3.68 |
framework_StringDecorator_group | /**
* Groups a string by surrounding it in parenthesis.
*
* @param str
* the string to group
* @return the grouped string
*/
public String group(String str) {
return "(" + str + ")";
} | 3.68 |
flink_OrcShim_createShim | /** Create shim from hive version. */
static OrcShim<VectorizedRowBatch> createShim(String hiveVersion) {
if (hiveVersion.startsWith("2.0")) {
return new OrcShimV200();
} else if (hiveVersion.startsWith("2.1")) {
return new OrcShimV210();
} else if (hiveVersion.startsWith("2.2")
|| hiveVersion.startsWith("2.3")
|| hiveVersion.startsWith("3.")) {
return new OrcShimV230();
} else {
throw new UnsupportedOperationException(
"Unsupported hive version for orc shim: " + hiveVersion);
}
} | 3.68 |
flink_MapValue_remove | /*
* (non-Javadoc)
* @see java.util.Map#remove(java.lang.Object)
*/
@Override
public V remove(final Object key) {
return this.map.remove(key);
} | 3.68 |
hmily_HashedWheelTimer_processCancelledTasks | /**
* 流程任务取消;从取消队列中获取任务。并删除.
*/
private void processCancelledTasks() {
for (;;) {
HashedWheelTimeout timeout = cancelledTimeouts.poll();
if (timeout == null) {
// all processed
break;
}
try {
timeout.remove();
} catch (Throwable t) {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn("An exception was thrown while process a cancellation task", t);
}
}
}
} | 3.68 |
hudi_OrcUtils_readAvroRecords | /**
* NOTE: This literally reads the entire file contents, thus should be used with caution.
*/
@Override
public List<GenericRecord> readAvroRecords(Configuration configuration, Path filePath, Schema avroSchema) {
List<GenericRecord> records = new ArrayList<>();
try (Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(configuration))) {
TypeDescription orcSchema = reader.getSchema();
try (RecordReader recordReader = reader.rows(new Options(configuration).schema(orcSchema))) {
OrcReaderIterator<GenericRecord> iterator = new OrcReaderIterator<>(recordReader, avroSchema, orcSchema);
while (iterator.hasNext()) {
GenericRecord record = iterator.next();
records.add(record);
}
}
} catch (IOException io) {
throw new HoodieIOException("Unable to create an ORC reader for ORC file:" + filePath, io);
}
return records;
} | 3.68 |
hadoop_AvailableSpaceResolver_getSubclusterInfo | /**
* Get the mapping from NamespaceId to subcluster space info. It gets this
* mapping from the subclusters through expensive calls (e.g., RPC) and uses
* caching to avoid too many calls. The cache might be updated asynchronously
* to reduce latency.
*
* @return NamespaceId to {@link SubclusterAvailableSpace}.
*/
@Override
protected Map<String, SubclusterAvailableSpace> getSubclusterInfo(
MembershipStore membershipStore) {
Map<String, SubclusterAvailableSpace> mapping = new HashMap<>();
try {
// Get the Namenode's available space info from the subclusters
// from the Membership store.
GetNamenodeRegistrationsRequest request = GetNamenodeRegistrationsRequest
.newInstance();
GetNamenodeRegistrationsResponse response = membershipStore
.getNamenodeRegistrations(request);
final List<MembershipState> nns = response.getNamenodeMemberships();
for (MembershipState nn : nns) {
try {
String nsId = nn.getNameserviceId();
long availableSpace = nn.getStats().getAvailableSpace();
mapping.put(nsId, new SubclusterAvailableSpace(nsId, availableSpace));
} catch (Exception e) {
LOG.error("Cannot get stats info for {}: {}.", nn, e.getMessage());
}
}
} catch (IOException ioe) {
LOG.error("Cannot get Namenodes from the State Store.", ioe);
}
return mapping;
} | 3.68 |
hadoop_BalanceProcedureScheduler_submit | /**
* Submit the job.
*/
public synchronized void submit(BalanceJob job) throws IOException {
if (!running.get()) {
throw new IOException("Scheduler is shutdown.");
}
String jobId = allocateJobId();
job.setId(jobId);
job.setScheduler(this);
journal.saveJob(job);
jobSet.put(job, job);
runningQueue.add(job);
LOG.info("Add new job={}", job);
} | 3.68 |
framework_VColorPickerGrid_updateGrid | /**
* Updates the row and column count and creates a new grid based on them.
* The new grid replaces the old grid if one existed.
* <p>
* For internal use only. May be renamed or removed in a future release.
*
* @param rowCount
* how many rows the grid should have
* @param columnCount
* how many columns the grid should have
*/
public void updateGrid(int rowCount, int columnCount) {
rows = rowCount;
columns = columnCount;
this.remove(grid);
this.add(createGrid(), 0, 0);
} | 3.68 |
hudi_BaseHoodieQueueBasedExecutor_execute | /**
* Main API to run both production and consumption.
*/
@Override
public E execute() {
try {
checkState(this.consumer.isPresent());
setUp();
// Start consuming/producing asynchronously
this.consumingFuture = startConsumingAsync();
this.producingFuture = startProducingAsync();
// NOTE: To properly support mode when there's no consumer, we have to fall back
// to producing future as the trigger for us to shut down the queue
return allOf(Arrays.asList(producingFuture, consumingFuture))
.whenComplete((ignored, throwable) -> {
// Close the queue to release the resources
queue.close();
})
.thenApply(ignored -> consumer.get().finish())
// Block until producing and consuming both finish
.get();
} catch (Exception e) {
if (e instanceof InterruptedException) {
// In case {@code InterruptedException} was thrown, resetting the interrupted flag
// of the thread, we reset it (to true) again to permit subsequent handlers
// to be interrupted as well
Thread.currentThread().interrupt();
}
// throw if we have any other exception seen already. There is a chance that cancellation/closing of producers with CompeletableFuture wins before the actual exception
// is thrown.
if (this.queue.getThrowable() != null) {
throw new HoodieException(queue.getThrowable());
}
throw new HoodieException(e);
}
} | 3.68 |
flink_FromJarEntryClassInformationProvider_createFromPythonJar | /**
* Creates a {@code FromJarEntryClassInformationProvider} for a job implemented in Python.
*
* @return A {@code FromJarEntryClassInformationProvider} for a job implemented in Python
*/
public static FromJarEntryClassInformationProvider createFromPythonJar() {
return new FromJarEntryClassInformationProvider(
new File(PackagedProgramUtils.getPythonJar().getPath()),
PackagedProgramUtils.getPythonDriverClassName());
} | 3.68 |
hbase_HFileBlockIndex_locateNonRootIndexEntry | /**
* Search for one key using the secondary index in a non-root block. In case of success,
* positions the provided buffer at the entry of interest, where the file offset and the
* on-disk-size can be read. a non-root block without header. Initial position does not matter.
* the byte array containing the key
* @return the index position where the given key was found, otherwise return -1 in the case the
* given key is before the first key.
*/
static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, CellComparator comparator) {
int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator);
if (entryIndex != -1) {
int numEntries = nonRootBlock.getIntAfterPosition(0);
// The end of secondary index and the beginning of entries themselves.
int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
// The offset of the entry we are interested in relative to the end of
// the secondary index.
int entryRelOffset = nonRootBlock.getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex));
nonRootBlock.position(entriesOffset + entryRelOffset);
}
return entryIndex;
} | 3.68 |
framework_VAbstractCalendarPanel_onMouseDown | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.MouseDownHandler#onMouseDown(com.google
* .gwt.event.dom.client.MouseDownEvent)
*/
@SuppressWarnings("unchecked")
@Override
public void onMouseDown(MouseDownEvent event) {
// Click-n-hold the left mouse button for fast-forward or fast-rewind.
// Timer is first used for a 500ms delay after mousedown. After that has
// elapsed, another timer is triggered to go off every 150ms. Both
// timers are cancelled on mouseup or mouseout.
if (event.getNativeButton() == NativeEvent.BUTTON_LEFT && event
.getSource() instanceof VAbstractCalendarPanel.VEventButton) {
final VEventButton sender = (VEventButton) event.getSource();
processClickEvent(sender);
mouseTimer = new Timer() {
@Override
public void run() {
mouseTimer = new Timer() {
@Override
public void run() {
processClickEvent(sender);
}
};
mouseTimer.scheduleRepeating(150);
}
};
mouseTimer.schedule(500);
}
} | 3.68 |
flink_RpcUtils_terminateRpcEndpoint | /**
* Shuts the given {@link RpcEndpoint}s down and awaits their termination.
*
* @param rpcEndpoints to terminate
* @throws ExecutionException if a problem occurred
* @throws InterruptedException if the operation has been interrupted
*/
@VisibleForTesting
public static void terminateRpcEndpoint(RpcEndpoint... rpcEndpoints)
throws ExecutionException, InterruptedException {
terminateAsyncCloseables(Arrays.asList(rpcEndpoints));
} | 3.68 |
framework_AbstractSelect_getItemIcon | /**
* Gets the item icon.
*
* @param itemId
* the id of the item to be assigned an icon.
* @return the icon for the item or null, if not specified.
*/
public Resource getItemIcon(Object itemId) {
final Resource explicit = itemIcons.get(itemId);
if (explicit != null) {
return explicit;
}
if (getItemIconPropertyId() == null) {
return null;
}
final Property<?> ip = getContainerProperty(itemId,
getItemIconPropertyId());
if (ip == null) {
return null;
}
final Object icon = ip.getValue();
if (icon instanceof Resource) {
return (Resource) icon;
}
return null;
} | 3.68 |
framework_InMemoryDataProviderHelpers_createEqualsFilter | /**
* Creates a predicate that compares equality of the given required value to
* the value the given value provider obtains.
*
* @param valueProvider
* the value provider to use
* @param requiredValue
* the required value
* @return the created predicate
*/
public static <T, V> SerializablePredicate<T> createEqualsFilter(
ValueProvider<T, V> valueProvider, V requiredValue) {
Objects.requireNonNull(valueProvider, "Value provider cannot be null");
return item -> Objects.equals(valueProvider.apply(item), requiredValue);
} | 3.68 |
dubbo_StubServiceDescriptor_getMethod | /**
* Does not use Optional as return type to avoid potential performance decrease.
*
* @param methodName
* @param paramTypes
* @return
*/
public MethodDescriptor getMethod(String methodName, Class<?>[] paramTypes) {
List<MethodDescriptor> methodModels = methods.get(methodName);
if (CollectionUtils.isNotEmpty(methodModels)) {
for (MethodDescriptor descriptor : methodModels) {
if (Arrays.equals(paramTypes, descriptor.getParameterClasses())) {
return descriptor;
}
}
}
return null;
} | 3.68 |
hbase_MasterFileSystem_getRootDir | /** Returns HBase root dir. */
public Path getRootDir() {
return this.rootdir;
} | 3.68 |
hadoop_TimelineEntityReaderFactory_createEntityTypeReader | /**
* Creates a timeline entity type reader that will read all available entity
* types within the specified context.
*
* @param context Reader context which defines the scope in which query has to
* be made. Limited to application level only.
* @return an <cite>EntityTypeReader</cite> object
*/
public static EntityTypeReader createEntityTypeReader(
TimelineReaderContext context) {
return new EntityTypeReader(context);
} | 3.68 |
hbase_AsyncTable_incrementColumnValue | /**
* Atomically increments a column value. If the column value already exists and is not a
* big-endian long, this could throw an exception. If the column value does not yet exist it is
* initialized to <code>amount</code> and written to the specified column.
* <p>
* Setting durability to {@link Durability#SKIP_WAL} means that in a fail scenario you will lose
* any increments that have not been flushed.
* @param row The row that contains the cell to increment.
* @param family The column family of the cell to increment.
* @param qualifier The column qualifier of the cell to increment.
* @param amount The amount to increment the cell with (or decrement, if the amount is
* negative).
* @param durability The persistence guarantee for this increment.
* @return The new value, post increment. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
default CompletableFuture<Long> incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
long amount, Durability durability) {
Preconditions.checkNotNull(row, "row is null");
Preconditions.checkNotNull(family, "family is null");
return increment(
new Increment(row).addColumn(family, qualifier, amount).setDurability(durability))
.thenApply(r -> Bytes.toLong(r.getValue(family, qualifier)));
} | 3.68 |
hudi_AbstractTableFileSystemView_fetchAllBaseFiles | /**
* Default implementation for fetching all base-files for a partition.
*
* @param partitionPath partition-path
*/
Stream<HoodieBaseFile> fetchAllBaseFiles(String partitionPath) {
return fetchAllStoredFileGroups(partitionPath).flatMap(HoodieFileGroup::getAllBaseFiles);
} | 3.68 |
flink_JoinOperator_projectTuple3 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2> ProjectJoin<I1, I2, Tuple3<T0, T1, T2>> projectTuple3() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes);
return new ProjectJoin<I1, I2, Tuple3<T0, T1, T2>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
hadoop_TimelinePutResponse_setErrors | /**
* Set the list to the given list of {@link TimelinePutError} instances
*
* @param errors
* a list of {@link TimelinePutError} instances
*/
public void setErrors(List<TimelinePutError> errors) {
this.errors.clear();
this.errors.addAll(errors);
} | 3.68 |
hbase_HFileArchiver_archiveRegion | /**
* Remove an entire region from the table directory via archiving the region's hfiles.
* @param fs {@link FileSystem} from which to remove the region
* @param rootdir {@link Path} to the root directory where hbase files are stored (for building
* the archive path)
* @param tableDir {@link Path} to where the table is being stored (for building the archive
* path)
* @param regionDir {@link Path} to where a region is being stored (for building the archive path)
* @return <tt>true</tt> if the region was successfully deleted. <tt>false</tt> if the filesystem
* operations could not complete.
* @throws IOException if the request cannot be completed
*/
public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
throws IOException {
// otherwise, we archive the files
// make sure we can archive
if (tableDir == null || regionDir == null) {
LOG.error("No archive directory could be found because tabledir (" + tableDir
+ ") or regiondir (" + regionDir + "was null. Deleting files instead.");
if (regionDir != null) {
deleteRegionWithoutArchiving(fs, regionDir);
}
// we should have archived, but failed to. Doesn't matter if we deleted
// the archived files correctly or not.
return false;
}
LOG.debug("ARCHIVING {}", regionDir);
// make sure the regiondir lives under the tabledir
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir,
CommonFSUtils.getTableName(tableDir), regionDir.getName());
FileStatusConverter getAsFile = new FileStatusConverter(fs);
// otherwise, we attempt to archive the store files
// build collection of just the store directories to archive
Collection<File> toArchive = new ArrayList<>();
final PathFilter dirFilter = new FSUtils.DirFilter(fs);
PathFilter nonHidden = new PathFilter() {
@Override
public boolean accept(Path file) {
return dirFilter.accept(file) && !file.getName().startsWith(".");
}
};
FileStatus[] storeDirs = CommonFSUtils.listStatus(fs, regionDir, nonHidden);
// if there no files, we can just delete the directory and return;
if (storeDirs == null) {
LOG.debug("Directory {} empty.", regionDir);
return deleteRegionWithoutArchiving(fs, regionDir);
}
// convert the files in the region to a File
Stream.of(storeDirs).map(getAsFile).forEachOrdered(toArchive::add);
LOG.debug("Archiving " + toArchive);
List<File> failedArchive =
resolveAndArchive(fs, regionArchiveDir, toArchive, EnvironmentEdgeManager.currentTime());
if (!failedArchive.isEmpty()) {
throw new FailedArchiveException(
"Failed to archive/delete all the files for region:" + regionDir.getName() + " into "
+ regionArchiveDir + ". Something is probably awry on the filesystem.",
failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList()));
}
// if that was successful, then we delete the region
return deleteRegionWithoutArchiving(fs, regionDir);
} | 3.68 |
flink_ResultPartition_onSubpartitionAllDataProcessed | /**
* The subpartition notifies that the corresponding downstream task have processed all the user
* records.
*
* @see EndOfData
* @param subpartition The index of the subpartition sending the notification.
*/
public void onSubpartitionAllDataProcessed(int subpartition) {} | 3.68 |
hadoop_FSDirAppendOp_computeQuotaDeltaForUCBlock | /** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
INodeFile file) {
final QuotaCounts delta = new QuotaCounts.Builder().build();
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = lastBlock.getReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = fsn.getFSDirectory()
.getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, diff);
}
}
}
return delta;
} | 3.68 |
hbase_GssSaslServerAuthenticationProvider_handle | /** {@inheritDoc} */
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback");
}
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (authid.equals(authzid)) {
ac.setAuthorized(true);
} else {
ac.setAuthorized(false);
}
if (ac.isAuthorized()) {
LOG.debug("SASL server GSSAPI callback: setting canonicalized client ID: {}", authzid);
ac.setAuthorizedID(authzid);
}
}
} | 3.68 |
hbase_MetricsIO_getInstance | /**
* Get a static instance for the MetricsIO so that accessors access the same instance. We want to
* lazy initialize so that correct process name is in place. See HBASE-27966 for more details.
*/
public static MetricsIO getInstance() {
if (instance == null) {
synchronized (MetricsIO.class) {
if (instance == null) {
instance = new MetricsIO(new MetricsIOWrapperImpl());
}
}
}
return instance;
} | 3.68 |
framework_CalendarTargetDetails_getTargetCalendar | /**
* @return the {@link Calendar} instance which was the target of the drop
*/
public Calendar getTargetCalendar() {
return (Calendar) getTarget();
} | 3.68 |
hadoop_CommonAuditContext_getGlobalContextEntry | /**
* Get a global entry.
* @param key key
* @return value or null
*/
public static String getGlobalContextEntry(String key) {
return GLOBAL_CONTEXT_MAP.get(key);
} | 3.68 |
hudi_DagUtils_convertDagToYaml | /**
* Converts {@link WorkflowDag} to a YAML representation.
*/
public static String convertDagToYaml(WorkflowDag dag) throws IOException {
final ObjectMapper yamlWriter = new ObjectMapper(new YAMLFactory().disable(Feature.WRITE_DOC_START_MARKER)
.enable(Feature.MINIMIZE_QUOTES).enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES));
JsonNode yamlNode = MAPPER.createObjectNode();
((ObjectNode) yamlNode).put(DAG_NAME, dag.getDagName());
((ObjectNode) yamlNode).put(DAG_ROUNDS, dag.getRounds());
((ObjectNode) yamlNode).put(DAG_INTERMITTENT_DELAY_MINS, dag.getIntermittentDelayMins());
JsonNode dagContentNode = MAPPER.createObjectNode();
convertDagToYaml(dagContentNode, dag.getNodeList());
((ObjectNode) yamlNode).put(DAG_CONTENT, dagContentNode);
return yamlWriter.writerWithDefaultPrettyPrinter().writeValueAsString(yamlNode);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.