name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_BackupRestoreFactory_getRestoreJob | /**
* Gets backup restore job
* @param conf configuration
* @return backup restore job instance
*/
public static RestoreJob getRestoreJob(Configuration conf) {
Class<? extends RestoreJob> cls =
conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
RestoreJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;
} | 3.68 |
flink_StateTtlConfig_cleanupInRocksdbCompactFilter | /**
* Cleanup expired state while Rocksdb compaction is running.
*
* <p>RocksDB compaction filter will query current timestamp, used to check expiration, from
* Flink every time after processing {@code queryTimeAfterNumEntries} number of state
* entries. Updating the timestamp more often can improve cleanup speed but it decreases
* compaction performance because it uses JNI call from native code.
*
* <p>Periodic compaction could speed up expired state entries cleanup, especially for state
* entries rarely accessed. Files older than this value will be picked up for compaction,
* and re-written to the same level as they were before. It makes sure a file goes through
* compaction filters periodically.
*
* @param queryTimeAfterNumEntries number of state entries to process by compaction filter
* before updating current timestamp
* @param periodicCompactionTime periodic compaction which could speed up expired state
* cleanup. 0 means turning off periodic compaction.
*/
@Nonnull
public Builder cleanupInRocksdbCompactFilter(
long queryTimeAfterNumEntries, Time periodicCompactionTime) {
strategies.put(
CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER,
new RocksdbCompactFilterCleanupStrategy(
queryTimeAfterNumEntries, periodicCompactionTime));
return this;
} | 3.68 |
flink_Configuration_setLong | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key the option specifying the key to be added
* @param value the value of the key/value pair to be added
*/
@PublicEvolving
public void setLong(ConfigOption<Long> key, long value) {
setValueInternal(key.key(), value);
} | 3.68 |
hbase_FutureUtils_allOf | /**
* Returns a new CompletableFuture that is completed when all of the given CompletableFutures
* complete. If any of the given CompletableFutures complete exceptionally, then the returned
* CompletableFuture also does so, with a CompletionException holding this exception as its cause.
* Otherwise, the results of all given CompletableFutures could be obtained by the new returned
* CompletableFuture.
*/
public static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
.thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
} | 3.68 |
flink_PrioritizedOperatorSubtaskState_isRestored | /**
* Returns true if this was created for a restored operator, false otherwise. Restored operators
* are operators that participated in a previous checkpoint, even if they did not emit any state
* snapshots.
*/
public boolean isRestored() {
return restoredCheckpointId != null;
} | 3.68 |
nifi-maven_ExtensionClassLoaderFactory_createClassLoader | /* package visible for testing reasons */
ExtensionClassLoader createClassLoader(final Set<Artifact> artifacts, final ExtensionClassLoader parent, final Artifact narArtifact) throws MojoExecutionException {
final Set<URL> urls = new HashSet<>();
for (final Artifact artifact : artifacts) {
final Set<URL> artifactUrls = toURLs(artifact);
urls.addAll(artifactUrls);
}
getLog().debug("Creating class loader with following dependencies: " + urls);
final URL[] urlArray = urls.toArray(new URL[0]);
if (parent == null) {
return new ExtensionClassLoader(urlArray, narArtifact, artifacts);
} else {
return new ExtensionClassLoader(urlArray, parent, narArtifact, artifacts);
}
} | 3.68 |
flink_MutableHashTable_getNextBuffer | /**
* Gets the next buffer to be used with the hash-table, either for an in-memory partition, or
* for the table buckets. This method returns <tt>null</tt>, if no more buffer is available.
* Spilling a partition may free new buffers then.
*
* @return The next buffer to be used by the hash-table, or null, if no buffer remains.
*/
final MemorySegment getNextBuffer() {
// check if the list directly offers memory
int s = this.availableMemory.size();
if (s > 0) {
return this.availableMemory.remove(s - 1);
}
// check if there are write behind buffers that actually are to be used for the hash table
if (this.writeBehindBuffersAvailable > 0) {
// grab at least one, no matter what
MemorySegment toReturn;
try {
toReturn = this.writeBehindBuffers.take();
} catch (InterruptedException iex) {
throw new RuntimeException(
"Hybrid Hash Join was interrupted while taking a buffer.");
}
this.writeBehindBuffersAvailable--;
// grab as many more buffers as are available directly
MemorySegment currBuff;
while (this.writeBehindBuffersAvailable > 0
&& (currBuff = this.writeBehindBuffers.poll()) != null) {
this.availableMemory.add(currBuff);
this.writeBehindBuffersAvailable--;
}
return toReturn;
} else {
// no memory available
return null;
}
} | 3.68 |
hadoop_Cluster_getAllJobs | /**
* Get all the jobs in cluster.
*
* @return array of {@link Job}
* @throws IOException
* @throws InterruptedException
* @deprecated Use {@link #getAllJobStatuses()} instead.
*/
@Deprecated
public Job[] getAllJobs() throws IOException, InterruptedException {
return getJobs(client.getAllJobs());
} | 3.68 |
morf_AbstractSqlDialectTest_testPower | /**
* Tests that POWER functionality builds the expected SQL string.
*/
@Test
public void testPower() {
SelectStatement statement = new SelectStatement(power(new FieldReference(FLOAT_FIELD), new FieldReference(INT_FIELD))).from(new TableReference(
TEST_TABLE));
String actual = testDialect.convertStatementToSQL(statement);
assertEquals("Power script should match expected", expectedPower(), actual);
} | 3.68 |
pulsar_AuthenticationProviderToken_getTokenAudience | // get Token Audience that stands for this broker from configuration, if not configured return null.
private String getTokenAudience(ServiceConfiguration conf) throws IllegalArgumentException {
String tokenAudience = (String) conf.getProperty(confTokenAudienceSettingName);
if (StringUtils.isNotBlank(tokenAudience)) {
return tokenAudience;
} else {
return null;
}
} | 3.68 |
hadoop_EntityCacheItem_forceRelease | /**
* Force releasing the cache item for the given group id, even though there
* may be active references.
*/
public synchronized void forceRelease() {
try {
if (store != null) {
store.close();
}
} catch (IOException e) {
LOG.warn("Error closing timeline store", e);
}
store = null;
// reset offsets so next time logs are re-parsed
for (LogInfo log : appLogs.getDetailLogs()) {
if (log.getFilename().contains(groupId.toString())) {
log.setOffset(0);
}
}
LOG.debug("Cache for group {} released. ", groupId);
} | 3.68 |
hudi_HiveSyncTool_syncAllPartitions | /**
* Syncs all partitions on storage to the metastore, by only making incremental changes.
*
* @param tableName The table name in the metastore.
* @return {@code true} if one or more partition(s) are changed in the metastore;
* {@code false} otherwise.
*/
private boolean syncAllPartitions(String tableName) {
try {
if (config.getSplitStrings(META_SYNC_PARTITION_FIELDS).isEmpty()) {
return false;
}
List<Partition> allPartitionsInMetastore = syncClient.getAllPartitions(tableName);
List<String> allPartitionsOnStorage = syncClient.getAllPartitionPathsOnStorage();
return syncPartitions(
tableName,
syncClient.getPartitionEvents(allPartitionsInMetastore, allPartitionsOnStorage));
} catch (Exception e) {
throw new HoodieHiveSyncException("Failed to sync partitions for table " + tableName, e);
}
} | 3.68 |
flink_LeaderInformationRegister_getRegisteredComponentIds | /** Returns the {@code componentId}s for which leader information is stored. */
public Iterable<String> getRegisteredComponentIds() {
return leaderInformationPerComponentId.keySet();
} | 3.68 |
hadoop_HdfsFileStatus_build | /**
* @return An {@link HdfsFileStatus} instance from these parameters.
*/
public HdfsFileStatus build() {
if (null == locations && !isdir && null == symlink) {
return new HdfsNamedFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path,
fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
}
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path,
fileId, childrenNum, feInfo, storagePolicy, ecPolicy, locations);
} | 3.68 |
framework_ColorPickerPopup_getColorHistory | /**
* Gets the color history.
*
* @return the color history
*/
public List<Color> getColorHistory() {
return Collections.unmodifiableList(history.getHistory());
} | 3.68 |
flink_RowtimeAttributeDescriptor_getWatermarkStrategy | /** Returns the [[WatermarkStrategy]] for the attribute. */
public WatermarkStrategy getWatermarkStrategy() {
return watermarkStrategy;
} | 3.68 |
flink_BaseHybridHashTable_hash | /**
* The level parameter is needed so that we can have different hash functions when we
* recursively apply the partitioning, so that the working set eventually fits into memory.
*/
public static int hash(int hashCode, int level) {
final int rotation = level * 11;
int code = Integer.rotateLeft(hashCode, rotation);
return code >= 0 ? code : -(code + 1);
} | 3.68 |
pulsar_SaslRoleToken_parse | /**
* Parses a string into an authentication token.
*
* @param tokenStr string representation of a token.
*
* @return the parsed authentication token.
*
* @throws AuthenticationException thrown if the string representation could not be parsed into
* an authentication token.
*/
public static SaslRoleToken parse(String tokenStr) throws AuthenticationException {
Map<String, String> map = split(tokenStr);
if (!map.keySet().equals(ATTRIBUTES)) {
throw new AuthenticationException("Invalid token string, missing attributes");
}
long expires = Long.parseLong(map.get(EXPIRES));
SaslRoleToken token = new SaslRoleToken(map.get(USER_ROLE), map.get(SESSION));
token.setExpires(expires);
return token;
} | 3.68 |
hadoop_ExecutionSummarizer_getExpectedDataSize | // Getters
protected String getExpectedDataSize() {
return expectedDataSize;
} | 3.68 |
hbase_ParseFilter_extractFilterSimpleExpression | /**
* Extracts a simple filter expression from the filter string given by the user
* <p>
* A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') The user given filter
* string can have many simpleFilterExpressions combined using operators.
* <p>
* This function extracts a simpleFilterExpression from the larger filterString given the start
* offset of the simpler expression
* <p>
* @param filterStringAsByteArray filter string given by the user
* @param filterExpressionStartOffset start index of the simple filter expression
* @return byte array containing the simple filter expression
*/
public byte[] extractFilterSimpleExpression(byte[] filterStringAsByteArray,
int filterExpressionStartOffset) throws CharacterCodingException {
int quoteCount = 0;
for (int i = filterExpressionStartOffset; i < filterStringAsByteArray.length; i++) {
if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE) {
if (isQuoteUnescaped(filterStringAsByteArray, i)) {
quoteCount++;
} else {
// To skip the next quote that has been escaped
i++;
}
}
if (filterStringAsByteArray[i] == ParseConstants.RPAREN && (quoteCount % 2) == 0) {
byte[] filterSimpleExpression = new byte[i - filterExpressionStartOffset + 1];
Bytes.putBytes(filterSimpleExpression, 0, filterStringAsByteArray,
filterExpressionStartOffset, i - filterExpressionStartOffset + 1);
return filterSimpleExpression;
}
}
throw new IllegalArgumentException("Incorrect Filter String");
} | 3.68 |
pulsar_ProducerImpl_scheduleBatchFlushTask | // must acquire semaphore before calling
private void scheduleBatchFlushTask(long batchingDelayMicros) {
ClientCnx cnx = cnx();
if (cnx != null && isBatchMessagingEnabled()) {
this.batchFlushTask = cnx.ctx().executor().schedule(catchingAndLoggingThrowables(this::batchFlushTask),
batchingDelayMicros, TimeUnit.MICROSECONDS);
}
} | 3.68 |
hadoop_FederationStateStoreUtils_decodeWritable | /**
* Decode Base64 string to Writable object.
*
* @param w Writable Key.
* @param idStr base64 string.
* @throws IOException raised on errors performing I/O.
*/
public static void decodeWritable(Writable w, String idStr) throws IOException {
DataInputStream in = new DataInputStream(
new ByteArrayInputStream(Base64.getUrlDecoder().decode(idStr)));
w.readFields(in);
} | 3.68 |
dubbo_ScriptStateRouter_getRoutedInvokers | /**
* get routed invokers from result of script rule evaluation
*/
@SuppressWarnings("unchecked")
protected BitList<Invoker<T>> getRoutedInvokers(BitList<Invoker<T>> invokers, Object obj) {
BitList<Invoker<T>> result = invokers.clone();
if (obj instanceof Invoker[]) {
result.retainAll(Arrays.asList((Invoker<T>[]) obj));
} else if (obj instanceof Object[]) {
result.retainAll(
Arrays.stream((Object[]) obj).map(item -> (Invoker<T>) item).collect(Collectors.toList()));
} else {
result.retainAll((List<Invoker<T>>) obj);
}
return result;
} | 3.68 |
hudi_ConflictDetectionUtils_getDefaultEarlyConflictDetectionStrategy | /**
* @param markerType Marker type.
* @return The class name of the default strategy for early conflict detection.
*/
public static String getDefaultEarlyConflictDetectionStrategy(MarkerType markerType) {
switch (markerType) {
case DIRECT:
return SimpleDirectMarkerBasedDetectionStrategy.class.getName();
case TIMELINE_SERVER_BASED:
default:
return AsyncTimelineServerBasedDetectionStrategy.class.getName();
}
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_setJobRunning | /**
* Set job running status.
*
* @param jobName job name
* @param isRunning job running status
*/
public void setJobRunning(final String jobName, final boolean isRunning) {
jobRunningMap.put(jobName, isRunning);
} | 3.68 |
flink_BulkIterationBase_setNextPartialSolution | /** @param result */
public void setNextPartialSolution(Operator<T> result) {
if (result == null) {
throw new NullPointerException(
"Operator producing the next partial solution must not be null.");
}
this.iterationResult = result;
} | 3.68 |
framework_AbstractLegacyComponent_setImmediate | /**
* Sets the component's immediate mode to the specified status.
* <p>
* Since Vaadin 8, the default mode is immediate.
*
* @param immediate
* the boolean value specifying if the component should be in the
* immediate mode after the call.
*/
public void setImmediate(boolean immediate) {
explicitImmediateValue = immediate;
getState().immediate = immediate;
} | 3.68 |
flink_PbCodegenUtils_convertFlinkArrayElementToPbWithDefaultValueCode | /**
* This method will be called from serializer of flink array/map type because flink contains
* both array/map type in array format. Map/Array cannot contain null value in pb object then we
* must do conversion in case of null values in map/array type.
*
* @param flinkArrDataVar code phrase represent arrayData of arr type or keyData/valueData in
* map type.
* @param iVar the index in arrDataVar
* @param resultPbVar the returned pb variable name in codegen.
* @param elementPbFd {@link FieldDescriptor} of element type in proto object
* @param elementDataType {@link LogicalType} of element type in flink object
* @return The java code segment which represents field value retrieval.
*/
public static String convertFlinkArrayElementToPbWithDefaultValueCode(
String flinkArrDataVar,
String iVar,
String resultPbVar,
FieldDescriptor elementPbFd,
LogicalType elementDataType,
PbFormatContext pbFormatContext,
int indent)
throws PbCodegenException {
PbCodegenVarId varUid = PbCodegenVarId.getInstance();
int uid = varUid.getAndIncrement();
String flinkElementVar = "elementVar" + uid;
PbCodegenAppender appender = new PbCodegenAppender(indent);
String protoTypeStr = PbCodegenUtils.getTypeStrFromProto(elementPbFd, false);
String dataTypeStr = PbCodegenUtils.getTypeStrFromLogicType(elementDataType);
appender.appendLine(protoTypeStr + " " + resultPbVar);
appender.begin("if(" + flinkArrDataVar + ".isNullAt(" + iVar + ")){");
appender.appendLine(
resultPbVar
+ "="
+ PbCodegenUtils.pbDefaultValueCode(elementPbFd, pbFormatContext));
appender.end("}else{");
appender.begin();
appender.appendLine(dataTypeStr + " " + flinkElementVar);
String flinkContainerElementCode =
PbCodegenUtils.flinkContainerElementCode(flinkArrDataVar, iVar, elementDataType);
appender.appendLine(flinkElementVar + " = " + flinkContainerElementCode);
PbCodegenSerializer codegenSer =
PbCodegenSerializeFactory.getPbCodegenSer(
elementPbFd, elementDataType, pbFormatContext);
String code = codegenSer.codegen(resultPbVar, flinkElementVar, appender.currentIndent());
appender.appendSegment(code);
appender.end("}");
return appender.code();
} | 3.68 |
hbase_NamespaceDescriptor_setConfiguration | /**
* Setter for storing a configuration setting in {@link #configuration} map.
* @param key Config key. Same as XML config key e.g. hbase.something.or.other.
* @param value String value. If null, removes the setting.
*/
public void setConfiguration(String key, String value) {
if (value == null) {
removeConfiguration(key);
} else {
configuration.put(key, value);
}
} | 3.68 |
hadoop_ByteArrayEncodingState_convertToByteBufferState | /**
* Convert to a ByteBufferEncodingState when it's backed by on-heap arrays.
*/
ByteBufferEncodingState convertToByteBufferState() {
ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
for (int i = 0; i < inputs.length; i++) {
newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
inputOffsets[i], encodeLength);
}
for (int i = 0; i < outputs.length; i++) {
newOutputs[i] = ByteBuffer.allocateDirect(encodeLength);
}
ByteBufferEncodingState bbeState = new ByteBufferEncodingState(encoder,
encodeLength, newInputs, newOutputs);
return bbeState;
} | 3.68 |
framework_VFilterSelect_selectItem | /*
* Sets the selected item in the popup menu.
*/
private void selectItem(final MenuItem newSelectedItem) {
menu.selectItem(newSelectedItem);
// Set the icon.
FilterSelectSuggestion suggestion = (FilterSelectSuggestion) newSelectedItem
.getCommand();
setSelectedItemIcon(suggestion.getIconUri());
// Set the text.
setText(suggestion.getReplacementString());
} | 3.68 |
flink_RpcEndpoint_scheduleRunAsync | /**
* Execute the runnable in the main thread of the underlying RPC endpoint, with a delay of the
* given number of milliseconds.
*
* @param runnable Runnable to be executed
* @param delay The delay after which the runnable will be executed
*/
protected void scheduleRunAsync(Runnable runnable, long delay, TimeUnit unit) {
rpcServer.scheduleRunAsync(runnable, unit.toMillis(delay));
} | 3.68 |
hadoop_MoveStep_toString | /**
* Returns a string representation of the object.
*
* @return a string representation of the object.
*/
@Override
public String toString() {
return String.format("%s\t %s\t %s\t %s%n",
this.getSourceVolume().getPath(),
this.getDestinationVolume().getPath(),
getSizeString(this.getBytesToMove()),
this.getDestinationVolume().getStorageType());
} | 3.68 |
hbase_BucketAllocator_fragmentationBytes | /**
* Every time you allocate blocks in these buckets where the block size is less than the bucket
* size, fragmentation increases by that difference. You can reduce fragmentation by lowering
* the bucket size so that it is closer to the typical block size. This may have the consequence
* of bumping some blocks to the next larger bucket size, so experimentation may be needed.
*/
public long fragmentationBytes() {
return fragmentationBytes;
} | 3.68 |
flink_FlinkS3PrestoFileSystem_deleteObject | /**
* Deletes the object referenced by the passed {@code path}. This method is used to work around
* the fact that Presto doesn't allow us to differentiate between deleting a non-existing object
* and some other errors. Therefore, a final check for existence is necessary in case of an
* error or false return value.
*
* @param path The path referring to the object that shall be deleted.
* @throws IOException if an error occurred while deleting the file other than the {@code path}
* referring to a non-empty directory.
*/
private void deleteObject(Path path) throws IOException {
boolean success = true;
IOException actualException = null;
try {
// empty directories will cause this method to fail as well - checking for their
// existence afterwards is a workaround to cover this use-case
success = super.delete(path, false);
} catch (IOException e) {
actualException = e;
}
if (!success || actualException != null) {
if (exists(path)) {
throw Optional.ofNullable(actualException)
.orElse(
new IOException(
path.getPath()
+ " could not be deleted for unknown reasons."));
}
}
} | 3.68 |
flink_ExecutionEnvironment_registerTypeWithKryoSerializer | /**
* Registers the given Serializer via its class as a serializer for the given type at the
* KryoSerializer.
*
* @param type The class of the types serialized with the given serializer.
* @param serializerClass The class of the serializer to use.
*/
public void registerTypeWithKryoSerializer(
Class<?> type, Class<? extends Serializer<?>> serializerClass) {
config.registerTypeWithKryoSerializer(type, serializerClass);
} | 3.68 |
hmily_HmilyTccTransactionExecutor_globalConfirm | /**
* Call the confirm method and basically if the initiator calls here call the remote or the original method
* However, the context sets the call confirm
* The remote service calls the confirm method.
*
* @param currentTransaction {@linkplain HmilyTransaction}
* @throws HmilyRuntimeException ex
*/
public void globalConfirm(final HmilyTransaction currentTransaction) throws HmilyRuntimeException {
LogUtil.debug(LOGGER, () -> "hmily transaction confirm .......!start");
if (Objects.isNull(currentTransaction) || CollectionUtils.isEmpty(currentTransaction.getHmilyParticipants())) {
return;
}
currentTransaction.setStatus(HmilyActionEnum.CONFIRMING.getCode());
HmilyRepositoryStorage.updateHmilyTransactionStatus(currentTransaction);
final List<HmilyParticipant> hmilyParticipants = currentTransaction.getHmilyParticipants();
List<Boolean> successList = new ArrayList<>();
for (HmilyParticipant hmilyParticipant : hmilyParticipants) {
try {
if (hmilyParticipant.getRole() == HmilyRoleEnum.START.getCode()) {
HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.LOCAL, hmilyParticipant);
HmilyRepositoryStorage.removeHmilyParticipant(hmilyParticipant);
} else {
HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.RPC, hmilyParticipant);
}
successList.add(true);
} catch (Throwable e) {
successList.add(false);
LOGGER.error("HmilyParticipant confirm exception param:{} ", hmilyParticipant.toString(), e);
} finally {
HmilyContextHolder.remove();
}
}
if (successList.stream().allMatch(e -> e)) {
// remove global
HmilyRepositoryStorage.removeHmilyTransaction(currentTransaction);
}
} | 3.68 |
framework_Form_detachField | /**
* Called when a form field is detached from a Form. Typically when a new
* Item is assigned to Form via {@link #setItemDataSource(Item)}.
* <p>
* Override this method to control how the fields are removed from the
* layout.
* </p>
*
* @param field
* the field to be detached from the forms layout.
*/
protected void detachField(final Field field) {
Component p = field.getParent();
if (p instanceof ComponentContainer) {
((ComponentContainer) p).removeComponent(field);
}
} | 3.68 |
framework_VRadioButtonGroup_isReadonly | /**
* Returns whether this radio button group is read-only or not.
*
* @return {@code true} if this widget is read-only, {@code false} otherwise
*/
public boolean isReadonly() {
return readonly;
} | 3.68 |
hbase_HFileCorruptionChecker_checkRegionDir | /**
* Check all column families in a region dir. region directory
*/
protected void checkRegionDir(Path regionDir) throws IOException {
FileStatus[] statuses = null;
try {
statuses = fs.listStatus(regionDir);
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn("Region Directory " + regionDir
+ " does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(regionDir);
return;
}
List<FileStatus> cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (cfs.isEmpty() && !fs.exists(regionDir)) {
LOG.warn("Region Directory " + regionDir
+ " does not exist. Likely due to concurrent split/compaction. Skipping.");
missing.add(regionDir);
return;
}
LOG.info("Checking Region Directory {}. Number of entries = {}", regionDir, cfs.size());
for (FileStatus cfFs : cfs) {
Path cfDir = cfFs.getPath();
checkColFamDir(cfDir);
}
} | 3.68 |
hbase_BulkLoadHFilesTool_doBulkLoad | /**
* Perform a bulk load of the given directory into the given pre-existing table. This method is
* not threadsafe.
* @param tableName table to load the hfiles
* @param hfofDir the directory that was provided as the output path of a job using
* HFileOutputFormat
* @param silence true to ignore unmatched column families
* @param copyFile always copy hfiles if true
*/
private Map<LoadQueueItem, ByteBuffer> doBulkLoad(AsyncClusterConnection conn,
TableName tableName, Path hfofDir, boolean silence, boolean copyFile) throws IOException {
tableExists(conn, tableName);
/*
* Checking hfile format is a time-consuming operation, we should have an option to skip this
* step when bulkloading millions of HFiles. See HBASE-13985.
*/
boolean validateHFile = getConf().getBoolean(VALIDATE_HFILES, true);
if (!validateHFile) {
LOG.warn("You are skipping HFiles validation, it might cause some data loss if files "
+ "are not correct. If you fail to read data from your table after using this "
+ "option, consider removing the files and bulkload again without this option. "
+ "See HBASE-13985");
}
// LQI queue does not need to be threadsafe -- all operations on this queue
// happen in this thread
Deque<LoadQueueItem> queue = new ArrayDeque<>();
ExecutorService pool = null;
try {
prepareHFileQueue(getConf(), conn, tableName, hfofDir, queue, validateHFile, silence);
if (queue.isEmpty()) {
LOG.warn(
"Bulk load operation did not find any files to load in directory {}. "
+ "Does it contain files in subdirectories that correspond to column family names?",
(hfofDir != null ? hfofDir.toUri().toString() : ""));
return Collections.emptyMap();
}
pool = createExecutorService();
return performBulkLoad(conn, tableName, queue, pool, copyFile);
} finally {
cleanup(conn, tableName, queue, pool);
}
} | 3.68 |
hbase_HFileBlockIndex_writeInlineBlock | /**
* Write out the current inline index block. Inline blocks are non-root blocks, so the non-root
* index format is used.
*/
@Override
public void writeInlineBlock(DataOutput out) throws IOException {
if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
// Write the inline block index to the output stream in the non-root
// index block format.
indexBlockEncoder.encode(curInlineChunk, false, out);
// Save the first key of the inline block so that we can add it to the
// parent-level index.
firstKey = curInlineChunk.getBlockKey(0);
// Start a new inline index block
curInlineChunk.clear();
} | 3.68 |
hadoop_Validate_checkPathExistsAsDir | /**
* Validates that the given path exists and is a directory.
* @param path the path to check.
* @param argName the name of the argument being validated.
*/
public static void checkPathExistsAsDir(Path path, String argName) {
checkPathExists(path, argName);
checkArgument(
Files.isDirectory(path),
"Path %s (%s) must point to a directory.",
argName,
path);
} | 3.68 |
hadoop_Sets_differenceInTreeSets | /**
* Returns the difference of two sets as an unmodifiable set.
* The returned set contains all elements that are contained by {@code set1}
* and not contained by {@code set2}.
*
* <p>Results are undefined if {@code set1} and {@code set2} are sets based
* on different equivalence relations (as {@code HashSet}, {@code TreeSet},
* and the keySet of an {@code IdentityHashMap} all are).
*
* This method is used to find difference for TreeSets. For HashSets,
* recommended method is {@link #difference(Set, Set)}.
*
* @param <E> Generics Type E.
* @param set1 set1.
* @param set2 set2.
* @return a new, empty thread-safe {@code Set}.
*/
public static <E> Set<E> differenceInTreeSets(
final Set<E> set1, final Set<E> set2) {
if (set1 == null) {
throw new NullPointerException("set1");
}
if (set2 == null) {
throw new NullPointerException("set2");
}
Set<E> newSet = new TreeSet<>(set1);
newSet.removeAll(set2);
return Collections.unmodifiableSet(newSet);
} | 3.68 |
framework_VTabsheet_canSelectTab | /**
* Returns whether the tab could be selected or not. In addition to 'usual'
* selection blockers like being disabled or hidden, if the tab sheet is
* already waiting for selection confirmation from the server, any further
* selections are blocked until the response has been received.
*
* @param tabIndex
* the index of the tab to check
*
* @return {@code true} if selectable, {@code false} otherwise
*/
private boolean canSelectTab(final int tabIndex) {
if (getApplicationConnection() == null || disabled
|| waitingForResponse) {
return false;
}
Tab tab = tb.getTab(tabIndex);
if (!tab.isEnabledOnServer() || tab.isHiddenOnServer()) {
return false;
}
// Note that we return true when tabIndex == activeTabIndex; the active
// tab could be selected, it's just a no-op.
return true;
} | 3.68 |
framework_AbstractSplitPanel_getMaxSplitPositionUnit | /**
* Returns the unit of the maximum position of the splitter.
*
* @return the unit of the maximum position of the splitter
*/
public Unit getMaxSplitPositionUnit() {
return posMaxUnit;
} | 3.68 |
hbase_AccessController_start | /* ---- MasterObserver implementation ---- */
@Override
public void start(CoprocessorEnvironment env) throws IOException {
CompoundConfiguration conf = new CompoundConfiguration();
conf.add(env.getConfiguration());
authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
if (!authorizationEnabled) {
LOG.warn("AccessController has been loaded with authorization checks DISABLED!");
}
shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
if (!cellFeaturesEnabled) {
LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
+ " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY
+ " accordingly.");
}
if (env instanceof MasterCoprocessorEnvironment) {
// if running on HMaster
MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
if (mEnv instanceof HasMasterServices) {
MasterServices masterServices = ((HasMasterServices) mEnv).getMasterServices();
zkPermissionWatcher = masterServices.getZKPermissionWatcher();
accessChecker = masterServices.getAccessChecker();
}
} else if (env instanceof RegionServerCoprocessorEnvironment) {
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
if (rsEnv instanceof HasRegionServerServices) {
RegionServerServices rsServices =
((HasRegionServerServices) rsEnv).getRegionServerServices();
zkPermissionWatcher = rsServices.getZKPermissionWatcher();
accessChecker = rsServices.getAccessChecker();
}
} else if (env instanceof RegionCoprocessorEnvironment) {
// if running at region
regionEnv = (RegionCoprocessorEnvironment) env;
conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
if (regionEnv instanceof HasRegionServerServices) {
RegionServerServices rsServices =
((HasRegionServerServices) regionEnv).getRegionServerServices();
zkPermissionWatcher = rsServices.getZKPermissionWatcher();
accessChecker = rsServices.getAccessChecker();
}
}
Preconditions.checkState(zkPermissionWatcher != null, "ZKPermissionWatcher is null");
Preconditions.checkState(accessChecker != null, "AccessChecker is null");
// set the user-provider.
this.userProvider = UserProvider.instantiate(env.getConfiguration());
tableAcls = new MapMaker().weakValues().makeMap();
} | 3.68 |
hadoop_PublishedConfiguration_asProperties | /**
* Convert values to properties
* @return a property file
*/
public Properties asProperties() {
Properties props = new Properties();
props.putAll(entries);
return props;
} | 3.68 |
hadoop_NodePlan_getVolumeSetPlans | /**
* Returns a Map of VolumeSetIDs and volumeSetPlans.
*
* @return Map
*/
public List<Step> getVolumeSetPlans() {
return volumeSetPlans;
} | 3.68 |
hbase_SnapshotInfo_getLogsCount | /** Returns the number of available log files */
public int getLogsCount() {
return logsCount.get();
} | 3.68 |
hbase_TableState_isDisabledOrDisabling | /** Returns True if {@link State#DISABLED} or {@link State#DISABLED} */
public boolean isDisabledOrDisabling() {
return isInStates(State.DISABLED, State.DISABLING);
} | 3.68 |
flink_ContinuousEventTimeTrigger_of | /**
* Creates a trigger that continuously fires based on the given interval.
*
* @param interval The time interval at which to fire.
* @param <W> The type of {@link Window Windows} on which this trigger can operate.
*/
public static <W extends Window> ContinuousEventTimeTrigger<W> of(Time interval) {
return new ContinuousEventTimeTrigger<>(interval.toMilliseconds());
} | 3.68 |
hbase_RegionNormalizerManager_getMergePlanCount | /**
* Return the number of times a {@link MergeNormalizationPlan} has been submitted.
*/
public long getMergePlanCount() {
return worker == null ? 0 : worker.getMergePlanCount();
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_tryToDeleteNodesPhysically | /** Try to delete some nodes that has been logically removed. */
private void tryToDeleteNodesPhysically() {
if (highestRequiredSnapshotVersionPlusOne != 0) {
return;
}
int threshold = (int) (totalSize * logicalRemovedKeysRatio);
int size = logicallyRemovedNodes.size();
if (size > threshold) {
deleteLogicallyRemovedNodes(size - threshold);
}
} | 3.68 |
hadoop_RouterMetricsService_getRouterClientMetrics | /**
* Get the metrics system for the Router Client.
*
* @return Router Client metrics.
*/
public RouterClientMetrics getRouterClientMetrics() {
return this.routerClientMetrics;
} | 3.68 |
hadoop_BaseTableRW_getResult | /**
*
* @param hbaseConf used to read settings that override defaults
* @param conn used to create table from
* @param get that specifies what single row you want to get from this table
* @return result of get operation
* @throws IOException if any exception occurs while getting the result.
*/
public Result getResult(Configuration hbaseConf, Connection conn, Get get)
throws IOException {
Table table = conn.getTable(getTableName(hbaseConf));
return table.get(get);
} | 3.68 |
hadoop_TaskPool_throwOne | /**
* Throw one exception, adding the others as suppressed
* exceptions attached to the one thrown.
* This method never completes normally.
* @param exceptions collection of exceptions
* @param <E> class of exceptions
* @throws E an extracted exception.
*/
private static <E extends Exception> void throwOne(
Collection<Exception> exceptions)
throws E {
Iterator<Exception> iter = exceptions.iterator();
Exception e = iter.next();
Class<? extends Exception> exceptionClass = e.getClass();
while (iter.hasNext()) {
Exception other = iter.next();
if (!exceptionClass.isInstance(other)) {
e.addSuppressed(other);
}
}
TaskPool.<E>castAndThrow(e);
} | 3.68 |
flink_TieredStorageProducerClient_writeAccumulatedBuffers | /**
* Write the accumulated buffers of this subpartitionId to the appropriate tiers.
*
* @param subpartitionId the subpartition identifier
* @param accumulatedBuffers the accumulated buffers of this subpartition
*/
private void writeAccumulatedBuffers(
TieredStorageSubpartitionId subpartitionId, List<Buffer> accumulatedBuffers) {
Iterator<Buffer> bufferIterator = accumulatedBuffers.iterator();
int numWriteBytes = 0;
int numWriteBuffers = 0;
while (bufferIterator.hasNext()) {
Buffer buffer = bufferIterator.next();
numWriteBuffers++;
numWriteBytes += buffer.readableBytes();
try {
writeAccumulatedBuffer(subpartitionId, buffer);
} catch (IOException ioe) {
buffer.recycleBuffer();
while (bufferIterator.hasNext()) {
bufferIterator.next().recycleBuffer();
}
ExceptionUtils.rethrow(ioe);
}
}
updateMetricStatistics(numWriteBuffers, numWriteBytes);
} | 3.68 |
hbase_MiniHBaseCluster_getRegionServer | /**
* Grab a numbered region server of your choice.
* @return region server
*/
public HRegionServer getRegionServer(int serverNumber) {
return hbaseCluster.getRegionServer(serverNumber);
} | 3.68 |
hbase_Scan_withStopRow | /**
* Set the stop row of the scan.
* <p>
* The scan will include rows that are lexicographically less than (or equal to if
* {@code inclusive} is {@code true}) the provided stopRow.
* <p>
* <b>Note:</b> <strong>Do NOT use this in combination with {@link #setRowPrefixFilter(byte[])} or
* {@link #setStartStopRowForPrefixScan(byte[])}.</strong> Doing so will make the scan result
* unexpected or even undefined.
* </p>
* @param stopRow row to end at
* @param inclusive whether we should include the stop row when scan
* @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow, boolean inclusive) {
if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException("stopRow's length must be less than or equal to "
+ HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
}
this.stopRow = stopRow;
this.includeStopRow = inclusive;
return this;
} | 3.68 |
hudi_HoodieLogFileReader_hasPrev | /**
* hasPrev is not idempotent.
*/
@Override
public boolean hasPrev() {
try {
if (!this.reverseReader) {
throw new HoodieNotSupportedException("Reverse log reader has not been enabled");
}
reverseLogFilePosition = lastReverseLogFilePosition;
reverseLogFilePosition -= Long.BYTES;
lastReverseLogFilePosition = reverseLogFilePosition;
inputStream.seek(reverseLogFilePosition);
} catch (Exception e) {
// Either reached EOF while reading backwards or an exception
return false;
}
return true;
} | 3.68 |
flink_HybridShuffleConfiguration_getSpillingStrategyType | /** Get {@link SpillingStrategyType} for hybrid shuffle mode. */
public SpillingStrategyType getSpillingStrategyType() {
return spillingStrategyType;
} | 3.68 |
pulsar_ProcessRuntime_start | /**
* The core logic that initialize the process container and executes the function.
*/
@Override
public void start() {
java.lang.Runtime.getRuntime().addShutdownHook(new Thread(() -> process.destroy()));
// Note: we create the expected log folder before the function process logger attempts to create it
// This is because if multiple instances are launched they can encounter a race condition creation of the dir.
log.info("Creating function log directory {}", funcLogDir);
try {
Files.createDirectories(Paths.get(funcLogDir));
} catch (IOException e) {
log.info("Exception when creating log folder : {}", funcLogDir, e);
throw new RuntimeException("Log folder creation error");
}
log.info("Created or found function log directory {}", funcLogDir);
startProcess();
if (channel == null && stub == null) {
channel = ManagedChannelBuilder.forAddress("127.0.0.1", instancePort)
.usePlaintext()
.build();
stub = InstanceControlGrpc.newFutureStub(channel);
timer = InstanceCache.getInstanceCache().getScheduledExecutorService()
.scheduleAtFixedRate(catchingAndLoggingThrowables(() -> {
CompletableFuture<InstanceCommunication.HealthCheckResult> result = healthCheck();
try {
result.get();
} catch (Exception e) {
log.error("Health check failed for {}-{}",
instanceConfig.getFunctionDetails().getName(),
instanceConfig.getInstanceId(), e);
}
}), expectedHealthCheckInterval, expectedHealthCheckInterval, TimeUnit.SECONDS);
}
} | 3.68 |
flink_TypeStrategies_first | /** Type strategy that returns the first type that could be inferred. */
public static TypeStrategy first(TypeStrategy... strategies) {
return new FirstTypeStrategy(Arrays.asList(strategies));
} | 3.68 |
AreaShop_PlayerLoginLogoutListener_updateLastActive | /**
* Update the last active time for all regions the player is owner off.
* @param player The player to update the active times for
*/
private void updateLastActive(Player player) {
for(GeneralRegion region : plugin.getFileManager().getRegions()) {
if(region.isOwner(player)) {
region.updateLastActiveTime();
}
}
} | 3.68 |
morf_DatabaseDataSetConsumer_close | /**
* @see org.alfasoftware.morf.dataset.DataSetConsumer#close(org.alfasoftware.morf.dataset.DataSetConsumer.CloseState)
*/
@Override
public void close(CloseState closeState) {
try {
try {
if (CloseState.COMPLETE.equals(closeState)) {
log.debug("Closing and committing");
connection.commit();
} else {
log.debug("Rolling back");
connection.rollback();
}
} finally {
connection.setAutoCommit(wasAutoCommit);
connection.close();
}
} catch (SQLException e) {
throw new RuntimeSqlException("Error committing and closing", e);
}
} | 3.68 |
hibernate-validator_ExecutableParameterNameProvider_equals | /**
* Equality is based on identity of the delegate.
*/
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( getClass() != obj.getClass() ) {
return false;
}
ExecutableParameterNameProvider other = (ExecutableParameterNameProvider) obj;
return delegate == other;
} | 3.68 |
morf_HumanReadableStatementHelper_generateFieldSymbolString | /**
* Generates a string describing a field. This is a symbolic name, if available, otherwise any literal values.
*
* @param field the field to describe.
* @return a string containing the name of the field.
*/
private static String generateFieldSymbolString(final AliasedField field) {
if (StringUtils.isEmpty(field.getImpliedName())) {
if (field instanceof Cast) {
return generateFieldSymbolString(((Cast)field).getExpression());
} else {
return generateFieldValueString(field);
}
} else {
return field.getImpliedName();
}
} | 3.68 |
framework_VAcceptCriteria_get | /**
* Returns the accept criterion that matches the given identifier.
*
* @param name
* the identifier
* @return the accept criterion
*/
public static VAcceptCriterion get(String name) {
return impl.get(name);
} | 3.68 |
framework_ColorPickerPreviewElement_getColorTextField | /**
* @return <code>WebElement</code> representing TextField in
* ColorPickerPreviewComponent
*
* @since 8.4
*/
public WebElement getColorTextField() {
return findElement(By.className("v-colorpicker-preview-textfield"));
} | 3.68 |
AreaShop_AreaShop_notifyUpdate | /**
* Notify a player about an update if he wants notifications about it and an update is available.
* @param sender CommandSender to notify
*/
public void notifyUpdate(CommandSender sender) {
if(githubUpdateCheck != null && githubUpdateCheck.hasUpdate() && sender.hasPermission("areashop.notifyupdate")) {
AreaShop.getInstance().message(sender, "update-playerNotify", cleanVersion(githubUpdateCheck.getCurrentVersion()), cleanVersion(githubUpdateCheck.getLatestVersion()));
}
} | 3.68 |
hadoop_BlockBlobInputStream_write | /**
* Writes a range of bytes to the stream.
* @param b a byte array.
* @param off the start offset in <code>buffer</code> from which the data
* is read.
* @param length the number of bytes to be written.
* @throws IOException IO failure
*/
public synchronized void write(byte[] b, int off, int length)
throws IOException {
if (b == null) {
throw new NullPointerException("Null buffer argument");
}
if (off < 0 || length < 0 || length > b.length - off) {
throw new IndexOutOfBoundsException("array write offset");
}
System.arraycopy(b, off, buffer, writePosition, length);
writePosition += length;
} | 3.68 |
hadoop_ConnectionPool_getMaxSize | /**
* Get the maximum number of connections allowed in this pool.
*
* @return Maximum number of connections.
*/
protected int getMaxSize() {
return this.maxSize;
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_jobShardingStrategyType | /**
* Set job sharding strategy type.
*
* <p>
* Default for {@code AverageAllocationJobShardingStrategy}.
* </p>
*
* @param jobShardingStrategyType job sharding strategy type
* @return ElasticJob configuration builder
*/
public Builder jobShardingStrategyType(final String jobShardingStrategyType) {
if (null != jobShardingStrategyType) {
this.jobShardingStrategyType = jobShardingStrategyType;
}
return this;
} | 3.68 |
framework_HierarchyMapper_getParentOfItem | /**
* Find parent for the given item among open folders.
*
* @param item
* the item
* @return parent item or {@code null} for root items or if the parent is
* closed
*/
protected T getParentOfItem(T item) {
Objects.requireNonNull(item, "Can not find the parent of null");
return parentIdMap.get(getDataProvider().getId(item));
} | 3.68 |
flink_RocksDBHeapTimersFullRestoreOperation_restoreKVStateData | /**
* Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state
* handle.
*/
private void restoreKVStateData(
ThrowingIterator<KeyGroup> keyGroups,
Map<Integer, ColumnFamilyHandle> columnFamilies,
Map<Integer, HeapPriorityQueueSnapshotRestoreWrapper<?>> restoredPQStates)
throws IOException, RocksDBException, StateMigrationException {
// for all key-groups in the current state handle...
try (RocksDBWriteBatchWrapper writeBatchWrapper =
new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) {
HeapPriorityQueueSnapshotRestoreWrapper<HeapPriorityQueueElement> restoredPQ = null;
ColumnFamilyHandle handle = null;
while (keyGroups.hasNext()) {
KeyGroup keyGroup = keyGroups.next();
try (ThrowingIterator<KeyGroupEntry> groupEntries = keyGroup.getKeyGroupEntries()) {
int oldKvStateId = -1;
while (groupEntries.hasNext()) {
KeyGroupEntry groupEntry = groupEntries.next();
int kvStateId = groupEntry.getKvStateId();
if (kvStateId != oldKvStateId) {
oldKvStateId = kvStateId;
handle = columnFamilies.get(kvStateId);
restoredPQ = getRestoredPQ(restoredPQStates, kvStateId);
}
if (restoredPQ != null) {
restoreQueueElement(restoredPQ, groupEntry);
} else if (handle != null) {
writeBatchWrapper.put(
handle, groupEntry.getKey(), groupEntry.getValue());
} else {
throw new IllegalStateException("Unknown state id: " + kvStateId);
}
}
}
}
}
} | 3.68 |
flink_ResourceSpec_merge | /**
* Used by system internally to merge the other resources of chained operators when generating
* the job graph.
*
* @param other Reference to resource to merge in.
* @return The new resource with merged values.
*/
public ResourceSpec merge(final ResourceSpec other) {
checkNotNull(other, "Cannot merge with null resources");
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach(
(String name, ExternalResource resource) -> {
resultExtendedResource.compute(
name,
(ignored, oldResource) ->
oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceSpec(
this.cpuCores.merge(other.cpuCores),
this.taskHeapMemory.add(other.taskHeapMemory),
this.taskOffHeapMemory.add(other.taskOffHeapMemory),
this.managedMemory.add(other.managedMemory),
resultExtendedResource);
} | 3.68 |
flink_BulkIterationBase_getNextPartialSolution | /** @return The operator representing the next partial solution. */
public Operator<T> getNextPartialSolution() {
return this.iterationResult;
} | 3.68 |
framework_TransactionalPropertyWrapper_detachFromProperty | /**
* Removes the ValueChangeListener from wrapped Property that was added by
* TransactionalPropertyWrapper.
*
* @since 7.1.15
*/
public void detachFromProperty() {
if (wrappedProperty instanceof ValueChangeNotifier) {
((ValueChangeNotifier) wrappedProperty)
.removeValueChangeListener(listener);
}
} | 3.68 |
hadoop_FederationStateStoreFacade_getTokenByRouterStoreToken | /**
* The Router Supports GetTokenByRouterStoreToken{@link RMDelegationTokenIdentifier}.
*
* @param identifier delegation tokens from the RM
* @return RouterStoreToken
* @throws YarnException if the call to the state store is unsuccessful
* @throws IOException An IO Error occurred
*/
public RouterRMTokenResponse getTokenByRouterStoreToken(RMDelegationTokenIdentifier identifier)
throws YarnException, IOException {
LOG.info("get RouterStoreToken token with sequence number: {}.",
identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, 0L);
RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken);
return stateStore.getTokenByRouterStoreToken(request);
} | 3.68 |
flink_MetricDumpSerialization_serialize | /**
* Serializes the given metrics and returns the resulting byte array.
*
* <p>Should a {@link Metric} accessed in this method throw an exception it will be omitted
* from the returned {@link MetricSerializationResult}.
*
* <p>If the serialization of any primitive or String fails then the returned {@link
* MetricSerializationResult} is partially corrupted. Such a result can be deserialized
* safely by {@link MetricDumpDeserializer#deserialize(MetricSerializationResult)}; however
* only metrics that were fully serialized before the failure will be returned.
*
* @param counters counters to serialize
* @param gauges gauges to serialize
* @param histograms histograms to serialize
* @return MetricSerializationResult containing the serialized metrics and the count of each
* metric type
*/
public MetricSerializationResult serialize(
Map<Counter, Tuple2<QueryScopeInfo, String>> counters,
Map<Gauge<?>, Tuple2<QueryScopeInfo, String>> gauges,
Map<Histogram, Tuple2<QueryScopeInfo, String>> histograms,
Map<Meter, Tuple2<QueryScopeInfo, String>> meters) {
countersBuffer.clear();
int numCounters = 0;
for (Map.Entry<Counter, Tuple2<QueryScopeInfo, String>> entry : counters.entrySet()) {
try {
serializeCounter(
countersBuffer,
entry.getValue().f0,
entry.getValue().f1,
entry.getKey());
numCounters++;
} catch (Exception e) {
LOG.debug("Failed to serialize counter.", e);
}
}
gaugesBuffer.clear();
int numGauges = 0;
for (Map.Entry<Gauge<?>, Tuple2<QueryScopeInfo, String>> entry : gauges.entrySet()) {
try {
serializeGauge(
gaugesBuffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
numGauges++;
} catch (Exception e) {
LOG.debug("Failed to serialize gauge.", e);
}
}
histogramsBuffer.clear();
int numHistograms = 0;
for (Map.Entry<Histogram, Tuple2<QueryScopeInfo, String>> entry :
histograms.entrySet()) {
try {
serializeHistogram(
histogramsBuffer,
entry.getValue().f0,
entry.getValue().f1,
entry.getKey());
numHistograms++;
} catch (Exception e) {
LOG.debug("Failed to serialize histogram.", e);
}
}
metersBuffer.clear();
int numMeters = 0;
for (Map.Entry<Meter, Tuple2<QueryScopeInfo, String>> entry : meters.entrySet()) {
try {
serializeMeter(
metersBuffer, entry.getValue().f0, entry.getValue().f1, entry.getKey());
numMeters++;
} catch (Exception e) {
LOG.debug("Failed to serialize meter.", e);
}
}
return new MetricSerializationResult(
countersBuffer.getCopyOfBuffer(),
gaugesBuffer.getCopyOfBuffer(),
metersBuffer.getCopyOfBuffer(),
histogramsBuffer.getCopyOfBuffer(),
numCounters,
numGauges,
numMeters,
numHistograms);
} | 3.68 |
flink_FlinkConvertletTable_convertTryCast | // Slightly modified version of StandardConvertletTable::convertCast
private RexNode convertTryCast(SqlRexContext cx, final SqlCall call) {
RelDataTypeFactory typeFactory = cx.getTypeFactory();
final SqlNode leftNode = call.operand(0);
final SqlNode rightNode = call.operand(1);
final RexNode valueRex = cx.convertExpression(leftNode);
RelDataType type;
if (rightNode instanceof SqlIntervalQualifier) {
type = typeFactory.createSqlIntervalType((SqlIntervalQualifier) rightNode);
} else if (rightNode instanceof SqlDataTypeSpec) {
SqlDataTypeSpec dataType = ((SqlDataTypeSpec) rightNode);
type = dataType.deriveType(cx.getValidator());
if (type == null) {
type = cx.getValidator().getValidatedNodeType(dataType.getTypeName());
}
} else {
throw new IllegalStateException(
"Invalid right argument type for TRY_CAST: " + rightNode);
}
type = typeFactory.createTypeWithNullability(type, true);
if (SqlUtil.isNullLiteral(leftNode, false)) {
final SqlValidatorImpl validator = (SqlValidatorImpl) cx.getValidator();
validator.setValidatedNodeType(leftNode, type);
return cx.convertExpression(leftNode);
}
return cx.getRexBuilder()
.makeCall(
type, FlinkSqlOperatorTable.TRY_CAST, Collections.singletonList(valueRex));
} | 3.68 |
flink_RocksDBCachingPriorityQueueSet_size | /**
* This implementation comes at a relatively high cost per invocation. It should not be called
* repeatedly when it is clear that the value did not change. Currently this is only truly used
* to realize certain higher-level tests.
*/
@Override
public int size() {
if (allElementsInCache) {
return orderedCache.size();
} else {
int count = 0;
try (final RocksBytesIterator iterator = orderedBytesIterator()) {
while (iterator.hasNext()) {
iterator.next();
++count;
}
}
return count;
}
} | 3.68 |
AreaShop_RegionGroup_getMemberRegions | /**
* Get all members of the group as GeneralRegions.
* @return A Set with all group members
*/
public Set<GeneralRegion> getMemberRegions() {
Set<GeneralRegion> result = new HashSet<>();
for(String playerName : getMembers()) {
result.add(plugin.getFileManager().getRegion(playerName));
}
return result;
} | 3.68 |
hbase_Procedure_isWaiting | /** Returns true if the procedure is waiting for a child to finish or for an external event. */
public synchronized boolean isWaiting() {
switch (state) {
case WAITING:
case WAITING_TIMEOUT:
return true;
default:
break;
}
return false;
} | 3.68 |
morf_GraphBasedUpgradeNode_addUpgradeStatements | /**
* Add upgrade statement to be executed by this upgrade node
*
* @param statement to be executed
*/
public void addUpgradeStatements(String statement) {
upgradeStatements.add(statement);
} | 3.68 |
flink_AbstractFileSource_monitorContinuously | /**
* Sets this source to streaming ("continuous monitoring") mode.
*
* <p>This makes the source a "continuous streaming" source that keeps running, monitoring
* for new files, and reads these files when they appear and are discovered by the
* monitoring.
*
* <p>The interval in which the source checks for new files is the {@code
* discoveryInterval}. Shorter intervals mean that files are discovered more quickly, but
* also imply more frequent listing or directory traversal of the file system / object
* store.
*/
public SELF monitorContinuously(Duration discoveryInterval) {
checkNotNull(discoveryInterval, "discoveryInterval");
checkArgument(
!(discoveryInterval.isNegative() || discoveryInterval.isZero()),
"discoveryInterval must be > 0");
this.continuousSourceSettings = new ContinuousEnumerationSettings(discoveryInterval);
return self();
} | 3.68 |
hbase_CoprocessorWhitelistMasterObserver_validatePath | /**
* Validates a single whitelist path against the coprocessor path
* @param coprocPath the path to the coprocessor including scheme
* @param wlPath can be: 1) a "*" to wildcard all coprocessor paths 2) a specific filesystem
* (e.g. hdfs://my-cluster/) 3) a wildcard path to be evaluated by
* {@link FilenameUtils#wildcardMatch(String, String)} path can specify scheme
* or not (e.g. "file:///usr/hbase/coprocessors" or for all filesystems
* "/usr/hbase/coprocessors")
* @return if the path was found under the wlPath
*/
private static boolean validatePath(Path coprocPath, Path wlPath) {
// verify if all are allowed
if (wlPath.toString().equals("*")) {
return (true);
}
// verify we are on the same filesystem if wlPath has a scheme
if (!wlPath.isAbsoluteAndSchemeAuthorityNull()) {
String wlPathScheme = wlPath.toUri().getScheme();
String coprocPathScheme = coprocPath.toUri().getScheme();
String wlPathHost = wlPath.toUri().getHost();
String coprocPathHost = coprocPath.toUri().getHost();
if (wlPathScheme != null) {
wlPathScheme = wlPathScheme.toString().toLowerCase();
} else {
wlPathScheme = "";
}
if (wlPathHost != null) {
wlPathHost = wlPathHost.toString().toLowerCase();
} else {
wlPathHost = "";
}
if (coprocPathScheme != null) {
coprocPathScheme = coprocPathScheme.toString().toLowerCase();
} else {
coprocPathScheme = "";
}
if (coprocPathHost != null) {
coprocPathHost = coprocPathHost.toString().toLowerCase();
} else {
coprocPathHost = "";
}
if (!wlPathScheme.equals(coprocPathScheme) || !wlPathHost.equals(coprocPathHost)) {
return (false);
}
}
// allow any on this file-system (file systems were verified to be the same above)
if (wlPath.isRoot()) {
return (true);
}
// allow "loose" matches stripping scheme
if (
FilenameUtils.wildcardMatch(Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(),
Path.getPathWithoutSchemeAndAuthority(wlPath).toString())
) {
return (true);
}
return (false);
} | 3.68 |
hbase_AbstractRpcClient_getConnection | /**
* Get a connection from the pool, or create a new one and add it to the pool. Connections to a
* given host/port are reused.
*/
private T getConnection(ConnectionId remoteId) throws IOException {
if (failedServers.isFailedServer(remoteId.getAddress())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Not trying to connect to " + remoteId.getAddress()
+ " this server is in the failed servers list");
}
throw new FailedServerException(
"This server is in the failed servers list: " + remoteId.getAddress());
}
T conn;
synchronized (connections) {
if (!running) {
throw new StoppedRpcClientException();
}
conn = connections.getOrCreate(remoteId, () -> createConnection(remoteId));
conn.setLastTouched(EnvironmentEdgeManager.currentTime());
}
return conn;
} | 3.68 |
querydsl_JTSGeometryExpression_symDifference | /**
* Returns a geometric object that represents the
* Point set symmetric difference of this geometric object with anotherGeometry.
*
* @param geometry other geometry
* @return symmetric difference
*/
public JTSGeometryExpression<Geometry> symDifference(Expression<? extends Geometry> geometry) {
return JTSGeometryExpressions.geometryOperation(SpatialOps.SYMDIFFERENCE, mixin, geometry);
} | 3.68 |
flink_FromClasspathEntryClassInformationProvider_createWithJobClassAssumingOnSystemClasspath | /**
* Creates a {@code FromClasspathEntryClassInformationProvider} assuming that the passed job
* class is available on the system classpath.
*
* @param jobClassName The job class name working as the entry point.
* @return The {@code FromClasspathEntryClassInformationProvider} providing the job class found.
*/
public static FromClasspathEntryClassInformationProvider
createWithJobClassAssumingOnSystemClasspath(String jobClassName) {
return new FromClasspathEntryClassInformationProvider(jobClassName);
} | 3.68 |
flink_NettyShuffleEnvironment_start | /*
* Starts the internal related components for network connection and communication.
*
* @return a port to connect to the task executor for shuffle data exchange, -1 if only local connection is possible.
*/
@Override
public int start() throws IOException {
synchronized (lock) {
Preconditions.checkState(
!isClosed, "The NettyShuffleEnvironment has already been shut down.");
LOG.info("Starting the network environment and its components.");
try {
LOG.debug("Starting network connection manager");
return connectionManager.start();
} catch (IOException t) {
throw new IOException("Failed to instantiate network connection manager.", t);
}
}
} | 3.68 |
hadoop_RetriableCommand_setRetryPolicy | /**
* Fluent-interface to change the RetryHandler.
* @param retryHandler The new RetryHandler instance to be used.
* @return Self.
*/
public RetriableCommand setRetryPolicy(RetryPolicy retryHandler) {
this.retryPolicy = retryHandler;
return this;
} | 3.68 |
hadoop_PlacementConstraint_toIndentedString | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.68 |
hbase_MemStoreFlusher_requeue | /**
* @param when When to expire, when to come up out of the queue. Specify in milliseconds. This
* method adds EnvironmentEdgeManager.currentTime() to whatever you pass.
* @return This.
*/
public FlushRegionEntry requeue(final long when) {
this.whenToExpire = EnvironmentEdgeManager.currentTime() + when;
this.requeueCount++;
return this;
} | 3.68 |
pulsar_ReaderConfiguration_setReaderListener | /**
* Sets a {@link ReaderListener} for the reader
* <p>
* When a {@link ReaderListener} is set, application will receive messages through it. Calls to
* {@link Reader#readNext()} will not be allowed.
*
* @param readerListener
* the listener object
*/
public ReaderConfiguration setReaderListener(ReaderListener<byte[]> readerListener) {
Objects.requireNonNull(readerListener);
this.readerListener = readerListener;
conf.setReaderListener(new org.apache.pulsar.shade.client.api.v2.ReaderListener<byte[]>() {
@Override
public void received(org.apache.pulsar.shade.client.api.v2.Reader<byte[]> v2Reader, Message<byte[]> msg) {
readerListener.received(new ReaderV1Impl(v2Reader), msg);
}
@Override
public void reachedEndOfTopic(org.apache.pulsar.shade.client.api.v2.Reader<byte[]> reader) {
readerListener.reachedEndOfTopic(new ReaderV1Impl(reader));
}
});
return this;
} | 3.68 |
hbase_ServerManager_regionServerStartup | /**
* Let the server manager know a new regionserver has come online
* @param request the startup request
* @param versionNumber the version number of the new regionserver
* @param version the version of the new regionserver, could contain strings like "SNAPSHOT"
* @param ia the InetAddress from which request is received
* @return The ServerName we know this server as.
*/
ServerName regionServerStartup(RegionServerStartupRequest request, int versionNumber,
String version, InetAddress ia) throws IOException {
// Test for case where we get a region startup message from a regionserver
// that has been quickly restarted but whose znode expiration handler has
// not yet run, or from a server whose fail we are currently processing.
// Test its host+port combo is present in serverAddressToServerInfo. If it
// is, reject the server and trigger its expiration. The next time it comes
// in, it should have been removed from serverAddressToServerInfo and queued
// for processing by ProcessServerShutdown.
// if use-ip is enabled, we will use ip to expose Master/RS service for client,
// see HBASE-27304 for details.
boolean useIp = master.getConfiguration().getBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY,
HConstants.HBASE_SERVER_USEIP_ENABLED_DEFAULT);
String isaHostName = useIp ? ia.getHostAddress() : ia.getHostName();
final String hostname =
request.hasUseThisHostnameInstead() ? request.getUseThisHostnameInstead() : isaHostName;
ServerName sn = ServerName.valueOf(hostname, request.getPort(), request.getServerStartCode());
checkClockSkew(sn, request.getServerCurrentTime());
checkIsDead(sn, "STARTUP");
if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn, versionNumber, version))) {
LOG.warn(
"THIS SHOULD NOT HAPPEN, RegionServerStartup" + " could not record the server: " + sn);
}
storage.started(sn);
return sn;
} | 3.68 |
zxing_UPCEANReader_checkStandardUPCEANChecksum | /**
* Computes the UPC/EAN checksum on a string of digits, and reports
* whether the checksum is correct or not.
*
* @param s string of digits to check
* @return true iff string of digits passes the UPC/EAN checksum algorithm
* @throws FormatException if the string does not contain only digits
*/
static boolean checkStandardUPCEANChecksum(CharSequence s) throws FormatException {
int length = s.length();
if (length == 0) {
return false;
}
int check = Character.digit(s.charAt(length - 1), 10);
return getStandardUPCEANChecksum(s.subSequence(0, length - 1)) == check;
} | 3.68 |
hbase_ZKSplitLogManagerCoordination_handleUnassignedTask | /**
* It is possible for a task to stay in UNASSIGNED state indefinitely - say SplitLogManager wants
* to resubmit a task. It forces the task to UNASSIGNED state but it dies before it could create
* the RESCAN task node to signal the SplitLogWorkers to pick up the task. To prevent this
* scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup.
*/
private void handleUnassignedTask(String path) {
if (ZKSplitLog.isRescanNode(watcher, path)) {
return;
}
Task task = findOrCreateOrphanTask(path);
if (task.isOrphan() && (task.incarnation.get() == 0)) {
LOG.info("Resubmitting unassigned orphan task " + path);
// ignore failure to resubmit. The timeout-monitor will handle it later
// albeit in a more crude fashion
resubmitTask(path, task, FORCE);
}
} | 3.68 |
hudi_LSMTimeline_isFileInRange | /**
* Returns whether the given file is located in the filter.
*/
public static boolean isFileInRange(HoodieArchivedTimeline.TimeRangeFilter filter, String fileName) {
String minInstant = getMinInstantTime(fileName);
String maxInstant = getMaxInstantTime(fileName);
return filter.isInRange(minInstant) || filter.isInRange(maxInstant);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.