name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_UserProvider_instantiate | /**
* Instantiate the {@link UserProvider} specified in the configuration and set the passed
* configuration via {@link UserProvider#setConf(Configuration)}
* @param conf to read and set on the created {@link UserProvider}
* @return a {@link UserProvider} ready for use.
*/
public static UserProvider instantiate(Configuration conf) {
Class<? extends UserProvider> clazz =
conf.getClass(USER_PROVIDER_CONF_KEY, UserProvider.class, UserProvider.class);
return ReflectionUtils.newInstance(clazz, conf);
} | 3.68 |
flink_WindowAggregateQueryOperation_getSlide | /**
* Slide of {@link WindowType#SLIDE} window. Empty for other windows.
*
* @return slide of a slide window
*/
public Optional<ValueLiteralExpression> getSlide() {
return Optional.of(slide);
} | 3.68 |
zilla_HpackContext_evict | // Evicts older entries from dynamic table
private void evict(int noEntries)
{
for (int i = 0; i < noEntries; i++)
{
HeaderField header = table.get(i);
tableSize -= header.size;
if (encoding)
{
Long id = noEvictions + i;
if (id.equals(name2Index.get(header.name)))
{
name2Index.remove(header.name, id);
}
NameValue nameValue = new NameValue(header.name, header.value);
if (id.equals(namevalue2Index.get(nameValue)))
{
namevalue2Index.remove(nameValue, id);
}
}
}
table.subList(0, noEntries).clear();
noEvictions += noEntries;
} | 3.68 |
hadoop_HdfsDtFetcher_addDelegationTokens | /**
* Returns Token object via FileSystem, null if bad argument.
* @param conf - a Configuration object used with FileSystem.get()
* @param creds - a Credentials object to which token(s) will be added
* @param renewer - the renewer to send with the token request
* @param url - the URL to which the request is sent
* @return a Token, or null if fetch fails.
*/
public Token<?> addDelegationTokens(Configuration conf, Credentials creds,
String renewer, String url) throws Exception {
if (!url.startsWith(getServiceName().toString())) {
url = getServiceName().toString() + "://" + url;
}
FileSystem fs = FileSystem.get(URI.create(url), conf);
Token<?> token = fs.getDelegationToken(renewer);
if (token == null) {
LOG.error(FETCH_FAILED);
throw new IOException(FETCH_FAILED);
}
creds.addToken(token.getService(), token);
return token;
} | 3.68 |
hbase_CatalogFamilyFormat_getMetaKeyForRegion | /** Returns the row key to use for this regionInfo */
public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) {
return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
} | 3.68 |
flink_DiskCacheManager_append | /**
* Append buffer to {@link DiskCacheManager}.
*
* @param buffer to be managed by this class.
* @param subpartitionId the subpartition of this record.
*/
void append(Buffer buffer, int subpartitionId) {
subpartitionCacheManagers[subpartitionId].append(buffer);
increaseNumCachedBytesAndCheckFlush(buffer.readableBytes());
} | 3.68 |
hadoop_ActiveAuditManagerS3A_getSpanId | /**
* The Span ID in the audit manager is the ID of the auditor,
* which can be used in the filesystem toString() method
* to assist in correlating client logs with S3 logs.
* It is returned here as part of the implementation of
* {@link AWSAuditEventCallbacks}.
* @return the unique ID of the FS.
*/
@Override
public String getSpanId() {
return auditor != null
? auditor.getAuditorId()
: "(auditor not yet created)";
} | 3.68 |
hbase_MergeTableRegionsProcedure_rollbackState | /**
* To rollback {@link MergeTableRegionsProcedure}, two AssignProcedures are asynchronously
* submitted for each region to be merged (rollback doesn't wait on the completion of the
* AssignProcedures) . This can be improved by changing rollback() to support sub-procedures. See
* HBASE-19851 for details.
*/
@Override
protected void rollbackState(final MasterProcedureEnv env, final MergeTableRegionsState state)
throws IOException {
LOG.trace("{} rollback state={}", this, state);
try {
switch (state) {
case MERGE_TABLE_REGIONS_POST_OPERATION:
case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
case MERGE_TABLE_REGIONS_UPDATE_META:
String msg = this + " We are in the " + state + " state."
+ " It is complicated to rollback the merge operation that region server is working on."
+ " Rollback is not supported and we should let the merge operation to complete";
LOG.warn(msg);
// PONR
throw new UnsupportedOperationException(this + " unhandled state=" + state);
case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
break;
case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
case MERGE_TABLE_REGIONS_WRITE_MAX_SEQUENCE_ID_FILE:
cleanupMergedRegion(env);
break;
case MERGE_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
break;
case MERGE_TABLE_REGIONS_CLOSE_REGIONS:
rollbackCloseRegionsForMerge(env);
break;
case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION:
postRollBackMergeRegions(env);
break;
case MERGE_TABLE_REGIONS_PREPARE:
break;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (Exception e) {
// This will be retried. Unless there is a bug in the code,
// this should be just a "temporary error" (e.g. network down)
LOG.warn("Failed rollback attempt step " + state + " for merging the regions "
+ RegionInfo.getShortNameToLog(regionsToMerge) + " in table " + getTableName(), e);
throw e;
}
} | 3.68 |
hadoop_PeriodicRLESparseResourceAllocation_removeInterval | /**
* Removes a resource for the specified interval.
*
* @param interval the {@link ReservationInterval} for which the resource is
* to be removed.
* @param resource the {@link Resource} to be removed.
* @return true if removal is successful, false otherwise
*/
public boolean removeInterval(ReservationInterval interval,
Resource resource) {
long startTime = interval.getStartTime();
long endTime = interval.getEndTime();
// If the resource to be subtracted is less than the minimum resource in
// the range, abort removal to avoid negative capacity.
// TODO revesit decrementing endTime
if (!Resources.fitsIn(resource, getMinimumCapacityInInterval(
new ReservationInterval(startTime, endTime - 1)))) {
LOG.info("Request to remove more resources than what is available");
return false;
}
if (startTime >= 0 && endTime > startTime && endTime <= timePeriod) {
return super.removeInterval(interval, resource);
} else {
LOG.info("Interval extends beyond the end time " + timePeriod);
return false;
}
} | 3.68 |
hbase_MiniHBaseCluster_getMasterThread | /**
* Returns the current active master thread, if available.
* @return the active MasterThread, null if none is active.
*/
public MasterThread getMasterThread() {
for (MasterThread mt : hbaseCluster.getLiveMasters()) {
if (mt.getMaster().isActiveMaster()) {
return mt;
}
}
return null;
} | 3.68 |
flink_TestcontainersSettings_builder | /**
* A new builder for {@code TestcontainersSettings}.
*
* @return The builder.
*/
public static Builder builder() {
return new Builder();
} | 3.68 |
rocketmq-connect_ExpressionBuilder_appendIdentifierQuoted | /**
* Append to this builder's expression the specified identifier, surrounded by the leading and
* trailing quotes.
*
* @param name the name to be appended
* @return this builder to enable methods to be chained; never null
*/
public ExpressionBuilder appendIdentifierQuoted(String name) {
appendLeadingQuote();
sb.append(name);
appendTrailingQuote();
return this;
} | 3.68 |
hadoop_SaslOutputStream_flush | /**
* Flushes this output stream
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void flush() throws IOException {
outStream.flush();
} | 3.68 |
hbase_CompoundConfiguration_get | /**
* Get the value of the <code>name</code>. If the key is deprecated, it returns the value of the
* first key which replaces the deprecated key and is not null. If no such property exists, then
* <code>defaultValue</code> is returned. The CompooundConfiguration does not do property
* substitution. To do so we need Configuration.getProps to be protected or package visible.
* Though in hadoop2 it is protected, in hadoop1 the method is private and not accessible. All of
* the get* methods call this overridden get method.
* @param name property name.
* @param defaultValue default value.
* @return property value, or <code>defaultValue</code> if the property doesn't exist.
**/
@Override
public String get(String name, String defaultValue) {
String ret = get(name);
return ret == null ? defaultValue : ret;
} | 3.68 |
pulsar_GrowablePriorityLongPairQueue_removeIf | /**
* Removes all of the elements of this collection that satisfy the given predicate.
*
* @param filter
* a predicate which returns {@code true} for elements to be removed
*
* @return number of removed values
*/
public synchronized int removeIf(LongPairPredicate filter) {
int removedValues = 0;
int index = 0;
long[] deletedItems = new long[size * 2];
int deleteItemsIndex = 0;
// collect eligible items for deletion
for (int i = 0; i < this.size; i++) {
if (filter.test(data[index], data[index + 1])) {
deletedItems[deleteItemsIndex++] = data[index];
deletedItems[deleteItemsIndex++] = data[index + 1];
removedValues++;
}
index = index + 2;
}
// delete collected items
deleteItemsIndex = 0;
for (int deleteItem = 0; deleteItem < removedValues; deleteItem++) {
// delete item from the heap
index = 0;
for (int i = 0; i < this.size; i++) {
if (data[index] == deletedItems[deleteItemsIndex]
&& data[index + 1] == deletedItems[deleteItemsIndex + 1]) {
removeAtWithoutLock(index);
}
index = index + 2;
}
deleteItemsIndex = deleteItemsIndex + 2;
}
return removedValues;
} | 3.68 |
hibernate-validator_AnnotationApiHelper_filterByType | /**
* Returns a list containing those annotation mirrors from the input list,
* which are of type {@code annotationType}. The input collection
* remains untouched.
*
* @param annotationMirrors A list of annotation mirrors.
* @param annotationType The type to be compared against.
*
* @return A list with those annotation mirrors from the input list, which
* are of type {@code annotationType}. May be empty but never
* null.
*/
public List<AnnotationMirror> filterByType(List<? extends AnnotationMirror> annotationMirrors, TypeMirror annotationType) {
List<AnnotationMirror> theValue = CollectionHelper.newArrayList();
if ( annotationMirrors == null || annotationType == null ) {
return theValue;
}
for ( AnnotationMirror oneAnnotationMirror : annotationMirrors ) {
if ( typeUtils.isSameType( oneAnnotationMirror.getAnnotationType(), annotationType ) ) {
theValue.add( oneAnnotationMirror );
}
}
return theValue;
} | 3.68 |
flink_JoinOperationFactory_create | /**
* Creates a valid {@link JoinQueryOperation} operation.
*
* <p>It performs validations such as:
*
* <ul>
* <li>condition returns boolean
* <li>the condition is either always true or contains equi join
* <li>left and right side of the join do not contain ambiguous column names
* <li>that correlated join is an INNER join
* </ul>
*
* @param left left side of the relational operation
* @param right right side of the relational operation
* @param joinType what sort of join to create
* @param condition join condition to apply
* @param correlated if the join should be a correlated join
* @return valid join operation
*/
QueryOperation create(
QueryOperation left,
QueryOperation right,
JoinType joinType,
ResolvedExpression condition,
boolean correlated) {
verifyConditionType(condition);
validateNamesAmbiguity(left, right);
validateCondition(right, joinType, condition, correlated);
return new JoinQueryOperation(left, right, joinType, condition, correlated);
} | 3.68 |
morf_SchemaHomology_checkIndexes | /**
* Compare the indexes of two tables.
*
* @param tableName Name of the table on which we are comparing indexes.
* @param indexes1 The first set of indexes
* @param indexes2 The second set of indexes
*/
private void checkIndexes(String tableName, List<Index> indexes1, List<Index> indexes2) {
Map<String, Index> sourceIndexes2 = new HashMap<>();
for (Index index : indexes2) {
sourceIndexes2.put(index.getName().toUpperCase(), index);
}
// Comparison of indexes is not order dependent
for (Index index1 : indexes1) {
// Find the match
Index index2 = sourceIndexes2.remove(index1.getName().toUpperCase());
if (index2 == null) {
difference("Index [" + index1.getName() + "] on table [" + tableName + "] not found in " + schema2Name);
} else {
checkIndex(tableName, index1, index2);
}
}
for(Index index : sourceIndexes2.values()) {
difference("Index [" + index.getName() + "] on table [" + tableName + "] not found in " + schema1Name);
}
} | 3.68 |
flink_RestClusterClient_requestJobResult | /**
* Requests the {@link JobResult} for the given {@link JobID}. The method retries multiple times
* to poll the {@link JobResult} before giving up.
*
* @param jobId specifying the job for which to retrieve the {@link JobResult}
* @return Future which is completed with the {@link JobResult} once the job has completed or
* with a failure if the {@link JobResult} could not be retrieved.
*/
@Override
public CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId) {
final CheckedSupplier<CompletableFuture<JobResult>> operation =
() -> requestJobResultInternal(jobId);
return retry(operation, unknownJobStateRetryable);
} | 3.68 |
hbase_SnapshotManager_setSnapshotHandlerForTesting | /**
* Set the handler for the current snapshot
* <p>
* Exposed for TESTING
* @param handler handler the master should use TODO get rid of this if possible, repackaging,
* modify tests.
*/
public synchronized void setSnapshotHandlerForTesting(final TableName tableName,
final SnapshotSentinel handler) {
if (handler != null) {
this.snapshotHandlers.put(tableName, handler);
} else {
this.snapshotHandlers.remove(tableName);
}
} | 3.68 |
hbase_AccessControlUtil_grant | /**
* A utility used to grant a user namespace permissions.
* <p>
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace the short name of the user to grant permissions
* @param actions the permissions to be granted
* @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
AccessControlService.BlockingInterface protocol, String userShortName, String namespace,
boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException {
List<AccessControlProtos.Permission.Action> permActions =
Lists.newArrayListWithCapacity(actions.length);
for (Permission.Action a : actions) {
permActions.add(toPermissionAction(a));
}
AccessControlProtos.GrantRequest request =
buildGrantRequest(userShortName, namespace, mergeExistingPermissions,
permActions.toArray(new AccessControlProtos.Permission.Action[actions.length]));
protocol.grant(controller, request);
} | 3.68 |
flink_InputTypeStrategies_and | /**
* Strategy for a conjunction of multiple {@link ArgumentTypeStrategy}s into one like {@code
* f(NUMERIC && LITERAL)}.
*
* <p>Some {@link ArgumentTypeStrategy}s cannot contribute an inferred type that is different
* from the input type (e.g. {@link #LITERAL}). Therefore, the order {@code f(X && Y)} or {@code
* f(Y && X)} matters as it defines the precedence in case the result must be casted to a more
* specific type.
*
* <p>This strategy aims to infer the first more specific, casted type or (if this is not
* possible) a type that has been inferred from all {@link ArgumentTypeStrategy}s.
*/
public static AndArgumentTypeStrategy and(ArgumentTypeStrategy... strategies) {
return new AndArgumentTypeStrategy(Arrays.asList(strategies));
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getWorkItem | /**
* Gets the current status of work for these volumes.
*
* @return - Work Item
*/
public DiskBalancerWorkItem getWorkItem() {
return workItem;
} | 3.68 |
hbase_ByteBuffAllocator_createOnHeap | /**
* Initialize an {@link ByteBuffAllocator} which only allocate ByteBuffer from on-heap, it's
* designed for testing purpose or disabled reservoir case.
* @return allocator to allocate on-heap ByteBuffer.
*/
private static ByteBuffAllocator createOnHeap() {
return new ByteBuffAllocator(false, 0, DEFAULT_BUFFER_SIZE, Integer.MAX_VALUE);
} | 3.68 |
hadoop_WordList_getWords | /**
* Gets the words.
*
* Note: That this API is only for Jackson JSON serialization.
*/
public Map<String, Integer> getWords() {
return list;
} | 3.68 |
hbase_HRegion_openHRegionFromTableDir | /**
* Open a Region.
* @param conf The Configuration object to use.
* @param fs Filesystem to use
* @param info Info for region to be opened.
* @param htd the table descriptor
* @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long)
* passing the result of the call to HRegion#getMinSequenceId() to ensure the
* wal id is properly kept up. HRegionStore does this every time it opens a new
* region.
* @param rsServices An interface we can request flushes against.
* @param reporter An interface we can report progress against.
* @return new HRegion
* @throws NullPointerException if {@code info} is {@code null}
*/
public static HRegion openHRegionFromTableDir(final Configuration conf, final FileSystem fs,
final Path tableDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
final RegionServerServices rsServices, final CancelableProgressable reporter)
throws IOException {
Objects.requireNonNull(info, "RegionInfo cannot be null");
LOG.debug("Opening region: {}", info);
HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices);
return r.openHRegion(reporter);
} | 3.68 |
streampipes_AssetLinkBuilder_withLinkLabel | /**
* Sets the link label for the AssetLink being built.
*
* @param linkLabel The link label to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withLinkLabel(String linkLabel) {
this.assetLink.setLinkLabel(linkLabel);
return this;
} | 3.68 |
framework_Label_setValue | /**
* Set the value of the label. Value of the label is the XML contents of the
* label. Since Vaadin 7.2, changing the value of Label instance with that
* method will fire ValueChangeEvent.
*
* @param newStringValue
* the New value of the label.
*/
@Override
public void setValue(String newStringValue) {
if (getPropertyDataSource() == null) {
LabelState state = getState(false);
String oldTextValue = state.text;
if (!SharedUtil.equals(oldTextValue, newStringValue)) {
getState().text = newStringValue;
fireValueChange();
}
} else {
throw new IllegalStateException(
"Label is only a Property.Viewer and cannot update its data source");
}
} | 3.68 |
hmily_HmilyParen_match | /**
* Judge left paren match right paren or not.
*
* @param leftToken left token
* @param rightToken right token
* @return match or not
*/
public static boolean match(final char leftToken, final char rightToken) {
for (HmilyParen each : HmilyParen.values()) {
if (each.leftParen == leftToken && each.rightParen == rightToken) {
return true;
}
}
return false;
} | 3.68 |
hbase_OrderedBytesBase_isSkippable | // almost all OrderedBytes implementations are skippable.
@Override
public boolean isSkippable() {
return true;
} | 3.68 |
hbase_ServerName_getServerName | /**
* For internal use only.
* @param hostName the name of the host to use
* @param port the port on the host to use
* @param startCode the startcode to use for formatting
* @return Server name made of the concatenation of hostname, port and startcode formatted as
* <code><hostname> ',' <port> ',' <startcode></code>
*/
private static String getServerName(String hostName, int port, long startCode) {
return hostName.toLowerCase(Locale.ROOT) + SERVERNAME_SEPARATOR + port + SERVERNAME_SEPARATOR
+ startCode;
} | 3.68 |
hbase_CompactSplit_isUnderCompaction | /**
* Check if this store is under compaction
*/
public boolean isUnderCompaction(final HStore s) {
return underCompactionStores.contains(getStoreNameForUnderCompaction(s));
} | 3.68 |
graphhopper_ResponsePath_getRouteWeight | /**
* This method returns a double value which is better than the time for comparison of routes but
* only if you know what you are doing, e.g. only to compare routes gained with the same query
* parameters like vehicle.
*/
public double getRouteWeight() {
check("getRouteWeight");
return routeWeight;
} | 3.68 |
flink_StreamExecutionEnvironment_getStateBackend | /**
* Gets the state backend that defines how to store and checkpoint state.
*
* @see #setStateBackend(StateBackend)
*/
@PublicEvolving
public StateBackend getStateBackend() {
return defaultStateBackend;
} | 3.68 |
hudi_HoodieHeartbeatClient_stop | /**
* Stops the heartbeat and deletes the heartbeat file for the specified instant.
*
* @param instantTime The instant time for the heartbeat.
* @throws HoodieException
*/
public void stop(String instantTime) throws HoodieException {
Heartbeat heartbeat = instantToHeartbeatMap.get(instantTime);
if (isHeartbeatStarted(heartbeat)) {
stopHeartbeatTimer(heartbeat);
HeartbeatUtils.deleteHeartbeatFile(fs, basePath, instantTime);
LOG.info("Deleted heartbeat file for instant " + instantTime);
}
} | 3.68 |
flink_OptimizedPlan_accept | /**
* Applies the given visitor top down to all nodes, starting at the sinks.
*
* @param visitor The visitor to apply to the nodes in this plan.
* @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor)
*/
@Override
public void accept(Visitor<PlanNode> visitor) {
for (SinkPlanNode node : this.dataSinks) {
node.accept(visitor);
}
} | 3.68 |
hadoop_IOStatisticsSnapshot_snapshot | /**
* Take a snapshot.
*
* This completely overwrites the map data with the statistics
* from the source.
* @param source statistics source.
*/
public synchronized void snapshot(IOStatistics source) {
checkNotNull(source);
counters = snapshotMap(source.counters());
gauges = snapshotMap(source.gauges());
minimums = snapshotMap(source.minimums());
maximums = snapshotMap(source.maximums());
meanStatistics = snapshotMap(source.meanStatistics(),
MeanStatistic::copy);
} | 3.68 |
flink_AbstractServerBase_createQueryExecutor | /**
* Creates a thread pool for the query execution.
*
* @return Thread pool for query execution
*/
private ExecutorService createQueryExecutor() {
ThreadFactory threadFactory =
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Flink " + getServerName() + " Thread %d")
.build();
return Executors.newFixedThreadPool(numQueryThreads, threadFactory);
} | 3.68 |
framework_VTabsheet_setTabulatorIndex | /**
* Sets the index that represents the tab's position in the browser's
* focus cycle. Negative index means that this tab element is not
* reachable via tabulator navigation.
* <p>
* By default only the selected tab has a non-negative tabulator index,
* and represents the entire tab sheet. If there are any other navigable
* tabs in the same tab sheet those can be navigated into with
* next/previous buttons, which does not update the selection until
* confirmed with a selection key press.
*
* @param tabIndex
* the tabulator index
*
* @see VTabsheet#getNextTabKey()
* @see VTabsheet#getPreviousTabKey()
* @see VTabsheet#getSelectTabKey()
*/
public void setTabulatorIndex(int tabIndex) {
getElement().setTabIndex(tabIndex);
} | 3.68 |
hbase_SaslClientAuthenticationProviders_reset | /**
* Removes the cached singleton instance of {@link SaslClientAuthenticationProviders}.
*/
public static synchronized void reset() {
providersRef.set(null);
} | 3.68 |
hbase_VersionInfoUtil_buildVersionNumber | /**
* Pack the full number version in a int. by shifting each component by 8bit, except the dot
* release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000)
* @param major version major number
* @param minor version minor number
* @param patch version patch number
* @return the version number as int. (e.g. 0x0103004 is 1.3.4)
*/
private static int buildVersionNumber(int major, int minor, int patch) {
return (major << 20) | (minor << 12) | patch;
} | 3.68 |
flink_ResourceProfile_getTaskOffHeapMemory | /**
* Get the task off-heap memory needed.
*
* @return The task off-heap memory
*/
public MemorySize getTaskOffHeapMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return taskOffHeapMemory;
} | 3.68 |
hadoop_HadoopExecutors_newSingleThreadScheduledExecutor | //Executors.newSingleThreadScheduledExecutor has special semantics - for the
// moment we'll delegate to it rather than implement the semantics here
public static ScheduledExecutorService newSingleThreadScheduledExecutor(
ThreadFactory threadFactory) {
return Executors.newSingleThreadScheduledExecutor(threadFactory);
} | 3.68 |
flink_WritableSavepoint_write | /**
* Write out a new or updated savepoint.
*
* @param path The path to where the savepoint should be written.
*/
public final void write(String path) {
final Path savepointPath = new Path(path);
List<BootstrapTransformationWithID<?>> newOperatorTransformations =
metadata.getNewOperators();
DataSet<OperatorState> newOperatorStates =
writeOperatorStates(newOperatorTransformations, configuration, savepointPath);
List<OperatorState> existingOperators = metadata.getExistingOperators();
DataSet<OperatorState> finalOperatorStates;
if (existingOperators.isEmpty()) {
finalOperatorStates = newOperatorStates;
} else {
DataSet<OperatorState> existingOperatorStates =
newOperatorStates
.getExecutionEnvironment()
.fromCollection(existingOperators)
.name("existingOperatorStates");
existingOperatorStates
.flatMap(new StatePathExtractor())
.setParallelism(1)
.output(new FileCopyFunction(path));
finalOperatorStates = newOperatorStates.union(existingOperatorStates);
}
finalOperatorStates
.reduceGroup(new MergeOperatorStates(metadata.getMasterStates()))
.name("reduce(OperatorState)")
.output(new SavepointOutputFormat(savepointPath))
.name(path);
} | 3.68 |
querydsl_GuavaGroupBy_sortedTable | /**
* Create a new aggregating map expression using a backing LinkedHashMap
*
* @param row row for the table entries
* @param column column for the table entries
* @param value value for the table entries
* @return wrapper expression
*/
public static <R, C, V, T, U, W> AbstractGroupExpression<Pair<Pair<R, C>, V>, TreeBasedTable<T, U, W>> sortedTable(GroupExpression<R, T> row,
GroupExpression<C, U> column,
GroupExpression<V, W> value,
Comparator<? super T> rowComparator,
Comparator<? super U> columnComparator) {
return new GTable.Mixin<R, C, V, T, U, W, TreeBasedTable<T, U, W>>(
row, column, value, GTable.createSorted(QPair.create(QPair.create(row, column), value), rowComparator, columnComparator));
} | 3.68 |
dubbo_Stack_push | /**
* push.
*
* @param ele
*/
public void push(E ele) {
if (mElements.size() > mSize) {
mElements.set(mSize, ele);
} else {
mElements.add(ele);
}
mSize++;
} | 3.68 |
hbase_ColumnRangeFilter_isMaxColumnInclusive | /** Returns if max column range is inclusive. */
public boolean isMaxColumnInclusive() {
return maxColumnInclusive;
} | 3.68 |
hadoop_BalanceJob_getDetailMessage | /**
* Get the detail description of this job.
*/
public String getDetailMessage() {
StringBuilder builder = new StringBuilder();
builder.append("id=").append(id);
if (firstProcedure != null) {
builder.append(",firstProcedure=").append(firstProcedure);
}
if (curProcedure != null) {
builder.append(",currentProcedure=").append(curProcedure);
}
builder.append(",jobDone=").append(jobDone);
if (error != null) {
builder.append(",error=").append(error.getMessage());
}
return builder.toString();
} | 3.68 |
rocketmq-connect_MySqlDatabaseDialect_getSqlType | /**
* get sql type
*
* @param field
* @return
*/
@Override
protected String getSqlType(SinkRecordField field) {
switch (field.schemaType()) {
case INT8:
return "TINYINT";
case INT32:
return "INT";
case INT64:
return "BIGINT";
case FLOAT32:
return "FLOAT";
case FLOAT64:
return "DOUBLE";
case BOOLEAN:
return "TINYINT";
case STRING:
return "TEXT";
case BYTES:
return "VARBINARY(1024)";
default:
return super.getSqlType(field);
}
} | 3.68 |
hudi_Pipelines_hoodieStreamWrite | /**
* The streaming write pipeline.
*
* <p>The input dataset shuffles by the primary key first then
* shuffles by the file group ID before passing around to the write function.
* The whole pipeline looks like the following:
*
* <pre>
* | input1 | ===\ /=== | bucket assigner | ===\ /=== | task1 |
* shuffle(by PK) shuffle(by bucket ID)
* | input2 | ===/ \=== | bucket assigner | ===/ \=== | task2 |
*
* Note: a file group must be handled by one write task to avoid write conflict.
* </pre>
*
* <p>The bucket assigner assigns the inputs to suitable file groups, the write task caches
* and flushes the data set to disk.
*
* @param conf The configuration
* @param dataStream The input data stream
* @return the stream write data stream pipeline
*/
public static DataStream<Object> hoodieStreamWrite(Configuration conf, DataStream<HoodieRecord> dataStream) {
if (OptionsResolver.isBucketIndexType(conf)) {
HoodieIndex.BucketIndexEngineType bucketIndexEngineType = OptionsResolver.getBucketEngineType(conf);
switch (bucketIndexEngineType) {
case SIMPLE:
int bucketNum = conf.getInteger(FlinkOptions.BUCKET_INDEX_NUM_BUCKETS);
String indexKeyFields = OptionsResolver.getIndexKeyField(conf);
BucketIndexPartitioner<HoodieKey> partitioner = new BucketIndexPartitioner<>(bucketNum, indexKeyFields);
return dataStream.partitionCustom(partitioner, HoodieRecord::getKey)
.transform(
opName("bucket_write", conf),
TypeInformation.of(Object.class),
BucketStreamWriteOperator.getFactory(conf))
.uid(opUID("bucket_write", conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
case CONSISTENT_HASHING:
if (OptionsResolver.isInsertOverwrite(conf)) {
// TODO support insert overwrite for consistent bucket index
throw new HoodieException("Consistent hashing bucket index does not work with insert overwrite using FLINK engine. Use simple bucket index or Spark engine.");
}
return dataStream
.transform(
opName("consistent_bucket_assigner", conf),
TypeInformation.of(HoodieRecord.class),
new ProcessOperator<>(new ConsistentBucketAssignFunction(conf)))
.uid(opUID("consistent_bucket_assigner", conf))
.setParallelism(conf.getInteger(FlinkOptions.BUCKET_ASSIGN_TASKS))
.keyBy(record -> record.getCurrentLocation().getFileId())
.transform(
opName("consistent_bucket_write", conf),
TypeInformation.of(Object.class),
BucketStreamWriteOperator.getFactory(conf))
.uid(opUID("consistent_bucket_write", conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
default:
throw new HoodieNotSupportedException("Unknown bucket index engine type: " + bucketIndexEngineType);
}
} else {
WriteOperatorFactory<HoodieRecord> operatorFactory = StreamWriteOperator.getFactory(conf);
return dataStream
// Key-by record key, to avoid multiple subtasks write to a bucket at the same time
.keyBy(HoodieRecord::getRecordKey)
.transform(
"bucket_assigner",
TypeInformation.of(HoodieRecord.class),
new KeyedProcessOperator<>(new BucketAssignFunction<>(conf)))
.uid(opUID("bucket_assigner", conf))
.setParallelism(conf.getInteger(FlinkOptions.BUCKET_ASSIGN_TASKS))
// shuffle by fileId(bucket id)
.keyBy(record -> record.getCurrentLocation().getFileId())
.transform(opName("stream_write", conf), TypeInformation.of(Object.class), operatorFactory)
.uid(opUID("stream_write", conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
}
} | 3.68 |
flink_PrioritizedDeque_contains | /**
* Returns whether the given element is contained in this list. Test is performed by identity.
*/
public boolean contains(T element) {
if (deque.isEmpty()) {
return false;
}
final Iterator<T> iterator = deque.iterator();
while (iterator.hasNext()) {
if (iterator.next() == element) {
return true;
}
}
return false;
} | 3.68 |
flink_JoinHintsResolver_resolve | /**
* Resolves and validates join hints in the given {@link RelNode} list, an {@link
* ValidationException} will be raised for invalid hints.
*
* <p>After resolving join hints, the options of the join hints (declared table name or query
* block name) will be replaced to {@link JoinStrategy#LEFT_INPUT} or {@link
* JoinStrategy#RIGHT_INPUT}
*
* <p>If the declared table name or query name in a join hint could not match the left side or
* right side of this join, that means this join hint is invalid and a {@link
* ValidationException} will be thrown.
*/
final List<RelNode> resolve(List<RelNode> roots) {
List<RelNode> resolvedRoots =
roots.stream().map(node -> node.accept(this)).collect(Collectors.toList());
validateHints();
return resolvedRoots;
} | 3.68 |
hudi_AbstractTableFileSystemView_getAllPartitionPaths | /**
* @return A list of relative partition paths of all partitions.
* @throws IOException upon error.
*/
protected List<String> getAllPartitionPaths() throws IOException {
throw new HoodieException("Getting all partition paths with file system listing sequentially "
+ "can be very slow. This should not be invoked.");
} | 3.68 |
streampipes_TextBlock_addLabel | /**
* Adds an arbitrary String label to this {@link TextBlock}.
*
* @param label The label
* @see DefaultLabels
*/
public void addLabel(final String label) {
if (labels == null) {
labels = new HashSet<String>(2);
}
labels.add(label);
} | 3.68 |
hadoop_ExternalCall_waitForCompletion | // wait for response to be triggered to support postponed calls
private void waitForCompletion() throws InterruptedException {
synchronized(done) {
while (!done.get()) {
try {
done.wait();
} catch (InterruptedException ie) {
if (Thread.interrupted()) {
throw ie;
}
}
}
}
} | 3.68 |
flink_Order_isOrdered | /**
* Checks, if this enum constant represents in fact an order. That is, whether this property is
* not equal to <tt>Order.NONE</tt>.
*
* @return True, if this enum constant is unequal to <tt>Order.NONE</tt>, false otherwise.
*/
public boolean isOrdered() {
return this != Order.NONE;
} | 3.68 |
hadoop_JobBase_getDoubleValue | /**
*
* @param name
* the counter name
* @return return the value of the given counter.
*/
protected Double getDoubleValue(Object name) {
return this.doubleCounters.get(name);
} | 3.68 |
pulsar_ManagedLedgerConfig_getMaxEntriesPerLedger | /**
* @return the maxEntriesPerLedger
*/
public int getMaxEntriesPerLedger() {
return maxEntriesPerLedger;
} | 3.68 |
flink_BatchTask_invoke | /** The main work method. */
@Override
public void invoke() throws Exception {
// --------------------------------------------------------------------
// Initialize
// --------------------------------------------------------------------
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Start registering input and output."));
}
// obtain task configuration (including stub parameters)
Configuration taskConf = getTaskConfiguration();
this.config = new TaskConfig(taskConf);
// now get the operator class which drives the operation
final Class<? extends Driver<S, OT>> driverClass = this.config.getDriver();
this.driver = InstantiationUtil.instantiate(driverClass, Driver.class);
String headName = getEnvironment().getTaskInfo().getTaskName().split("->")[0].trim();
this.metrics =
getEnvironment()
.getMetricGroup()
.getOrAddOperator(
headName.startsWith("CHAIN") ? headName.substring(6) : headName);
this.metrics.getIOMetricGroup().reuseInputMetricsForTask();
if (config.getNumberOfChainedStubs() == 0) {
this.metrics.getIOMetricGroup().reuseOutputMetricsForTask();
}
// initialize the readers.
// this does not yet trigger any stream consuming or processing.
initInputReaders();
initBroadcastInputReaders();
// initialize the writers.
initOutputs();
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Finished registering input and output."));
}
// --------------------------------------------------------------------
// Invoke
// --------------------------------------------------------------------
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Start task code."));
}
this.runtimeUdfContext = createRuntimeContext(metrics);
// whatever happens in this scope, make sure that the local strategies are cleaned up!
// note that the initialization of the local strategies is in the try-finally block as well,
// so that the thread that creates them catches its own errors that may happen in that
// process.
// this is especially important, since there may be asynchronous closes (such as through
// canceling).
try {
// initialize the remaining data structures on the input and trigger the local
// processing
// the local processing includes building the dams / caches
try {
int numInputs = driver.getNumberOfInputs();
int numComparators = driver.getNumberOfDriverComparators();
int numBroadcastInputs = this.config.getNumBroadcastInputs();
initInputsSerializersAndComparators(numInputs, numComparators);
initBroadcastInputsSerializers(numBroadcastInputs);
// set the iterative status for inputs and broadcast inputs
{
List<Integer> iterativeInputs = new ArrayList<>();
for (int i = 0; i < numInputs; i++) {
final int numberOfEventsUntilInterrupt =
getTaskConfig().getNumberOfEventsUntilInterruptInIterativeGate(i);
if (numberOfEventsUntilInterrupt < 0) {
throw new IllegalArgumentException();
} else if (numberOfEventsUntilInterrupt > 0) {
this.inputReaders[i].setIterativeReader();
iterativeInputs.add(i);
if (LOG.isDebugEnabled()) {
LOG.debug(
formatLogString(
"Input ["
+ i
+ "] reads in supersteps with ["
+ numberOfEventsUntilInterrupt
+ "] event(s) till next superstep."));
}
}
}
this.iterativeInputs = asArray(iterativeInputs);
}
{
List<Integer> iterativeBcInputs = new ArrayList<>();
for (int i = 0; i < numBroadcastInputs; i++) {
final int numberOfEventsUntilInterrupt =
getTaskConfig()
.getNumberOfEventsUntilInterruptInIterativeBroadcastGate(i);
if (numberOfEventsUntilInterrupt < 0) {
throw new IllegalArgumentException();
} else if (numberOfEventsUntilInterrupt > 0) {
this.broadcastInputReaders[i].setIterativeReader();
iterativeBcInputs.add(i);
if (LOG.isDebugEnabled()) {
LOG.debug(
formatLogString(
"Broadcast input ["
+ i
+ "] reads in supersteps with ["
+ numberOfEventsUntilInterrupt
+ "] event(s) till next superstep."));
}
}
}
this.iterativeBroadcastInputs = asArray(iterativeBcInputs);
}
initLocalStrategies(numInputs);
} catch (Exception e) {
throw new RuntimeException(
"Initializing the input processing failed"
+ (e.getMessage() == null ? "." : ": " + e.getMessage()),
e);
}
if (!this.running) {
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Task cancelled before task code was started."));
}
return;
}
// pre main-function initialization
initialize();
// read the broadcast variables. they will be released in the finally clause
for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) {
final String name = this.config.getBroadcastInputName(i);
readAndSetBroadcastInput(
i, name, this.runtimeUdfContext, 1 /* superstep one for the start */);
}
// the work goes here
run();
} finally {
// clean up in any case!
closeLocalStrategiesAndCaches();
clearReaders(inputReaders);
clearWriters(eventualOutputs);
}
if (this.running) {
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Finished task code."));
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Task code cancelled."));
}
}
} | 3.68 |
hadoop_ManifestSuccessData_getSuccess | /**
* Get the success flag.
* @return did the job succeed?
*/
public boolean getSuccess() {
return success;
} | 3.68 |
flink_AscendingTimestampExtractor_withViolationHandler | /**
* Sets the handler for violations to the ascending timestamp order.
*
* @param handler The violation handler to use.
* @return This extractor.
*/
public AscendingTimestampExtractor<T> withViolationHandler(MonotonyViolationHandler handler) {
this.violationHandler = requireNonNull(handler);
return this;
} | 3.68 |
hbase_OrderedBytes_encodeInt8 | /**
* Encode an {@code int8} value using the fixed-length encoding.
* @return the number of bytes written.
* @see #encodeInt64(PositionedByteRange, long, Order)
* @see #decodeInt8(PositionedByteRange)
*/
public static int encodeInt8(PositionedByteRange dst, byte val, Order ord) {
final int offset = dst.getOffset(), start = dst.getPosition();
dst.put(FIXED_INT8).put((byte) (val ^ 0x80));
ord.apply(dst.getBytes(), offset + start, 2);
return 2;
} | 3.68 |
hbase_HFileCorruptionChecker_checkMobRegionDir | /**
* Checks all the mob files of a table.
* @param regionDir The mob region directory
*/
private void checkMobRegionDir(Path regionDir) throws IOException {
if (!fs.exists(regionDir)) {
return;
}
FileStatus[] hfs = null;
try {
hfs = fs.listStatus(regionDir, new FamilyDirFilter(fs));
} catch (FileNotFoundException fnfe) {
// Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
LOG.warn(
"Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(regionDir);
return;
}
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
if (hfs.length == 0 && !fs.exists(regionDir)) {
LOG.warn(
"Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping.");
missedMobFiles.add(regionDir);
return;
}
LOG.info("Checking MOB Region Directory {}. Number of entries = {}", regionDir, hfs.length);
for (FileStatus hfFs : hfs) {
Path hf = hfFs.getPath();
checkMobColFamDir(hf);
}
} | 3.68 |
framework_Form_getType | /**
* Gets the field type.
*
* @see AbstractField#getType()
*/
@Override
public Class<?> getType() {
if (getPropertyDataSource() != null) {
return getPropertyDataSource().getType();
}
return Object.class;
} | 3.68 |
pulsar_BKCluster_startBookie | /**
* Helper method to startup a bookie server using a configuration object.
* Also, starts the auto recovery process if isAutoRecoveryEnabled is true.
*
* @param conf
* Server Configuration Object
*
*/
protected LifecycleComponentStack startBookie(ServerConfiguration conf)
throws Exception {
LifecycleComponentStack server =
org.apache.bookkeeper.server.Main.buildBookieServer(new BookieConfiguration(conf));
BookieId address = BookieImpl.getBookieId(conf);
ComponentStarter.startComponent(server);
// Wait for up to 30 seconds for the bookie to start
for (int i = 0; i < 3000; i++) {
if (server.lifecycleState() == Lifecycle.State.STARTED) {
break;
}
Thread.sleep(10);
}
if (server.lifecycleState() != Lifecycle.State.STARTED) {
throw new RuntimeException("Bookie failed to start within timeout period");
}
log.info("New bookie '{}' has been created.", address);
return server;
} | 3.68 |
flink_InPlaceMutableHashTable_updateTableEntryWithReduce | /**
* Looks up the table entry that has the same key as the given record, and updates it by
* performing a reduce step.
*
* @param record The record to update.
* @throws Exception
*/
public void updateTableEntryWithReduce(T record) throws Exception {
T match = prober.getMatchFor(record, reuse);
if (match == null) {
prober.insertAfterNoMatch(record);
} else {
// do the reduce step
T res = reducer.reduce(match, record);
// We have given reuse to the reducer UDF, so create new one if object reuse is
// disabled
if (!objectReuseEnabled) {
reuse = buildSideSerializer.createInstance();
}
prober.updateMatch(res);
}
} | 3.68 |
flink_SkipListUtils_getValuePointer | /**
* Returns the value pointer.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
*/
public static long getValuePointer(MemorySegment memorySegment, int offset) {
return memorySegment.getLong(offset + VALUE_POINTER_OFFSET);
} | 3.68 |
rocketmq-connect_KafkaSourceAdaptorConnector_start | /**
* Start the component
*
* @param config component context
*/
@Override
public void start(KeyValue config) {
super.start(config);
sourceConnector.validate(taskConfig);
sourceConnector.initialize(new KafkaConnectorContext(connectorContext));
sourceConnector.start(taskConfig);
} | 3.68 |
hudi_HoodieTimeline_isInRange | /**
* Return true if specified timestamp is in range (startTs, endTs].
*/
static boolean isInRange(String timestamp, String startTs, String endTs) {
return HoodieTimeline.compareTimestamps(timestamp, GREATER_THAN, startTs)
&& HoodieTimeline.compareTimestamps(timestamp, LESSER_THAN_OR_EQUALS, endTs);
} | 3.68 |
hadoop_OBSBlockOutputStream_createBlockIfNeeded | /**
* Demand create a destination block.
*
* @return the active block; null if there isn't one.
* @throws IOException on any failure to create
*/
private synchronized OBSDataBlocks.DataBlock createBlockIfNeeded()
throws IOException {
if (activeBlock == null) {
blockCount++;
if (blockCount >= OBSConstants.MAX_MULTIPART_COUNT) {
LOG.warn(
"Number of partitions in stream exceeds limit for OBS: "
+ OBSConstants.MAX_MULTIPART_COUNT
+ " write may fail.");
}
activeBlock = blockFactory.create(blockCount, this.blockSize);
}
return activeBlock;
} | 3.68 |
hadoop_QueueStateManager_stopQueue | /**
* Stop the queue.
* @param queueName the queue name
* @throws YarnException if the queue does not exist
*/
@SuppressWarnings("unchecked")
public synchronized void stopQueue(String queueName) throws YarnException {
SchedulerQueue<T> queue = queueManager.getQueue(queueName);
if (queue == null) {
throw new YarnException("The specified queue:" + queueName
+ " does not exist!");
}
queue.stopQueue();
} | 3.68 |
hbase_ResponseConverter_buildRunCleanerChoreResponse | /**
* Creates a response for the cleaner chore request
* @return A RunCleanerChoreResponse
*/
public static RunCleanerChoreResponse buildRunCleanerChoreResponse(boolean ran) {
return RunCleanerChoreResponse.newBuilder().setCleanerChoreRan(ran).build();
} | 3.68 |
hbase_MemStoreSnapshot_isTagsPresent | /** Returns true if tags are present in this snapshot */
public boolean isTagsPresent() {
return this.tagsPresent;
} | 3.68 |
hudi_HoodieIndexID_asBase64EncodedString | /**
* Get the Base64 encoded version of the ID.
*/
public String asBase64EncodedString() {
throw new HoodieNotSupportedException("Unsupported hash for " + getType());
} | 3.68 |
hbase_MasterRpcServices_getCompletedSnapshots | /**
* List the currently available/stored snapshots. Any in-progress snapshots are ignored
*/
@Override
public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
GetCompletedSnapshotsRequest request) throws ServiceException {
try {
server.checkInitialized();
GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots();
// convert to protobuf
for (SnapshotDescription snapshot : snapshots) {
builder.addSnapshots(snapshot);
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
framework_AbstractComponent_setId | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Component#setId(java.lang.String)
*/
@Override
public void setId(String id) {
getState().id = id;
} | 3.68 |
framework_Tree_addActionHandler | /**
* Adds an action handler.
*
* @see com.vaadin.event.Action.Container#addActionHandler(Action.Handler)
*/
@Override
public void addActionHandler(Action.Handler actionHandler) {
if (actionHandler != null) {
if (actionHandlers == null) {
actionHandlers = new LinkedList<Action.Handler>();
actionMapper = new KeyMapper<Action>();
}
if (!actionHandlers.contains(actionHandler)) {
actionHandlers.add(actionHandler);
markAsDirty();
}
}
} | 3.68 |
hbase_HRegionServer_blockAndCheckIfStopped | /**
* Utilty method to wait indefinitely on a znode availability while checking if the region server
* is shut down
* @param tracker znode tracker to use
* @throws IOException any IO exception, plus if the RS is stopped
* @throws InterruptedException if the waiting thread is interrupted
*/
private void blockAndCheckIfStopped(ZKNodeTracker tracker)
throws IOException, InterruptedException {
while (tracker.blockUntilAvailable(this.msgInterval, false) == null) {
if (this.stopped) {
throw new IOException("Received the shutdown message while waiting.");
}
}
} | 3.68 |
dubbo_DynamicConfiguration_removeListener | /**
* {@link #removeListener(String, String, ConfigurationListener)}
*
* @param key the key to represent a configuration
* @param listener configuration listener
*/
default void removeListener(String key, ConfigurationListener listener) {
removeListener(key, getDefaultGroup(), listener);
} | 3.68 |
morf_GraphBasedUpgradeNode_getChildren | /**
* @return upgrade nodes which depend on this upgrade node
*/
public Set<GraphBasedUpgradeNode> getChildren() {
return children;
} | 3.68 |
hbase_ZKUtil_watchAndCheckExists | /**
* Watch the specified znode for delete/create/change events. The watcher is set whether or not
* the node exists. If the node already exists, the method returns true. If the node does not
* exist, the method returns false.
* @param zkw zk reference
* @param znode path of node to watch
* @return true if znode exists, false if does not exist or error
* @throws KeeperException if unexpected zookeeper exception
*/
public static boolean watchAndCheckExists(ZKWatcher zkw, String znode) throws KeeperException {
try {
Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw);
boolean exists = s != null;
if (exists) {
LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode));
} else {
LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode));
}
return exists;
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e);
zkw.keeperException(e);
return false;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e);
zkw.interruptedException(e);
return false;
}
} | 3.68 |
flink_RestfulGateway_requestJobStatus | /**
* Request the {@link JobStatus} of the given job.
*
* @param jobId identifying the job for which to retrieve the JobStatus
* @param timeout for the asynchronous operation
* @return A future to the {@link JobStatus} of the given job
*/
default CompletableFuture<JobStatus> requestJobStatus(JobID jobId, @RpcTimeout Time timeout) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_HBaseTestingUtility_loadRegion | /**
* Load region with rows from 'aaa' to 'zzz'.
* @param r Region
* @param f Family
* @param flush flush the cache if true
* @return Count of rows loaded.
*/
public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
byte[] k = new byte[3];
int rowCount = 0;
for (byte b1 = 'a'; b1 <= 'z'; b1++) {
for (byte b2 = 'a'; b2 <= 'z'; b2++) {
for (byte b3 = 'a'; b3 <= 'z'; b3++) {
k[0] = b1;
k[1] = b2;
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(f, null, k);
if (r.getWAL() == null) {
put.setDurability(Durability.SKIP_WAL);
}
int preRowCount = rowCount;
int pause = 10;
int maxPause = 1000;
while (rowCount == preRowCount) {
try {
r.put(put);
rowCount++;
} catch (RegionTooBusyException e) {
pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
Threads.sleep(pause);
}
}
}
}
if (flush) {
r.flush(true);
}
}
return rowCount;
} | 3.68 |
hbase_RequestConverter_buildGetLastFlushedSequenceIdRequest | /**
* Creates a request for querying the master the last flushed sequence Id for a region
* @return A {@link GetLastFlushedSequenceIdRequest}
*/
public static GetLastFlushedSequenceIdRequest
buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
return GetLastFlushedSequenceIdRequest.newBuilder()
.setRegionName(UnsafeByteOperations.unsafeWrap(regionName)).build();
} | 3.68 |
framework_StringToBooleanConverter_getFalseString | /**
* Gets the locale-depended string representation for false. Default is
* locale-independent value provided by {@link #getFalseString()}
*
* @since 7.5.4
* @param locale
* to be used
* @return the string representation for false
*/
protected String getFalseString(Locale locale) {
return getFalseString();
} | 3.68 |
hadoop_ClientMethod_getTypes | /**
* Get the calling types for this method.
*
* @return An array of calling types.
*/
public Class<?>[] getTypes() {
return Arrays.copyOf(this.types, this.types.length);
} | 3.68 |
dubbo_HashedWheelTimer_clearTimeouts | /**
* Clear this bucket and return all not expired / cancelled {@link Timeout}s.
*/
void clearTimeouts(Set<Timeout> set) {
for (; ; ) {
HashedWheelTimeout timeout = pollTimeout();
if (timeout == null) {
return;
}
if (timeout.isExpired() || timeout.isCancelled()) {
continue;
}
set.add(timeout);
}
} | 3.68 |
morf_SchemaUtils_autonumber | /**
* Build an auto-increment primary key column. Type defaults to a universally
* safe big integer. No further modification is possible.
*
* @param name The column name.
* @param startFrom The auto-increment start value.
* @return A new {@link Column}.
*/
public static Column autonumber(String name, int startFrom) {
return new ColumnBean(name, DataType.BIG_INTEGER, 0, 0, false, null, true, true, startFrom);
} | 3.68 |
graphhopper_ArrayUtil_zero | /**
* Creates an IntArrayList filled with zeros
*/
public static IntArrayList zero(int size) {
IntArrayList result = new IntArrayList(size);
result.elementsCount = size;
return result;
} | 3.68 |
flink_MessageParameter_getKey | /**
* Returns the key of this parameter, e.g. "jobid".
*
* @return key of this parameter
*/
public final String getKey() {
return key;
} | 3.68 |
hadoop_NamenodeStatusReport_setDatanodeInfo | /**
* Set the datanode information.
*
* @param numLive Number of live nodes.
* @param numDead Number of dead nodes.
* @param numStale Number of stale nodes.
* @param numDecom Number of decommissioning nodes.
* @param numLiveDecom Number of decommissioned live nodes.
* @param numDeadDecom Number of decommissioned dead nodes.
* @param numInMaintenanceLive Number of in maintenance live nodes.
* @param numInMaintenanceDead Number of in maintenance dead nodes.
* @param numEnteringMaintenance Number of entering maintenance nodes.
* @param numScheduledReplicationBlocks Number of scheduled rep. blocks.
*/
public void setDatanodeInfo(int numLive, int numDead, int numStale,
int numDecom, int numLiveDecom, int numDeadDecom,
int numInMaintenanceLive, int numInMaintenanceDead,
int numEnteringMaintenance, long numScheduledReplicationBlocks) {
this.liveDatanodes = numLive;
this.deadDatanodes = numDead;
this.staleDatanodes = numStale;
this.decomDatanodes = numDecom;
this.liveDecomDatanodes = numLiveDecom;
this.deadDecomDatanodes = numDeadDecom;
this.inMaintenanceLiveDataNodes = numInMaintenanceLive;
this.inMaintenanceDeadDataNodes = numInMaintenanceDead;
this.enteringMaintenanceDataNodes = numEnteringMaintenance;
this.statsValid = true;
this.scheduledReplicationBlocks = numScheduledReplicationBlocks;
} | 3.68 |
flink_ChannelStateWriteRequest_getReadyFuture | /**
* It means whether the request is ready, e.g: some requests write the channel state data
* future, the data future may be not ready.
*
* <p>The ready future is used for {@link ChannelStateWriteRequestExecutorImpl}, executor will
* process ready requests first to avoid deadlock.
*/
public CompletableFuture<?> getReadyFuture() {
return AvailabilityProvider.AVAILABLE;
} | 3.68 |
hbase_HRegion_getCompactPriority | /** Returns The priority that this region should have in the compaction queue */
public int getCompactPriority() {
if (checkSplit().isPresent() && conf.getBoolean(SPLIT_IGNORE_BLOCKING_ENABLED_KEY, false)) {
// if a region should split, split it before compact
return Store.PRIORITY_USER;
}
return stores.values().stream().mapToInt(HStore::getCompactPriority).min()
.orElse(Store.NO_PRIORITY);
} | 3.68 |
hbase_HRegion_checkResources | /**
* Check if resources to support an update.
* <p/>
* We throw RegionTooBusyException if above memstore limit and expect client to retry using some
* kind of backoff
*/
private void checkResources() throws RegionTooBusyException {
// If catalog region, do not impose resource constraints or block updates.
if (this.getRegionInfo().isMetaRegion()) {
return;
}
MemStoreSize mss = this.memStoreSizing.getMemStoreSize();
if (mss.getHeapSize() + mss.getOffHeapSize() > this.blockingMemStoreSize) {
blockedRequestsCount.increment();
requestFlush();
// Don't print current limit because it will vary too much. The message is used as a key
// over in RetriesExhaustedWithDetailsException processing.
final String regionName =
this.getRegionInfo() == null ? "unknown" : this.getRegionInfo().getEncodedName();
final String serverName = this.getRegionServerServices() == null
? "unknown"
: (this.getRegionServerServices().getServerName() == null
? "unknown"
: this.getRegionServerServices().getServerName().toString());
RegionTooBusyException rtbe = new RegionTooBusyException("Over memstore limit="
+ org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(this.blockingMemStoreSize)
+ ", regionName=" + regionName + ", server=" + serverName);
LOG.warn("Region is too busy due to exceeding memstore size limit.", rtbe);
throw rtbe;
}
} | 3.68 |
hmily_ConfigEnv_getConfig | /**
* Gets config.
*
* @param <T> the type parameter
* @param clazz the clazz
* @return the config
*/
@SuppressWarnings("unchecked")
public <T extends Config> T getConfig(final Class<T> clazz) {
return (T) CONFIGS.get(clazz);
} | 3.68 |
hbase_HMaster_putUpJettyServer | // return the actual infoPort, -1 means disable info server.
private int putUpJettyServer() throws IOException {
if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
return -1;
}
final int infoPort =
conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT);
// -1 is for disabling info server, so no redirecting
if (infoPort < 0 || infoServer == null) {
return -1;
}
if (infoPort == infoServer.getPort()) {
// server is already running
return infoPort;
}
final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
String msg = "Failed to start redirecting jetty server. Address " + addr
+ " does not belong to this host. Correct configuration parameter: "
+ "hbase.master.info.bindAddress";
LOG.error(msg);
throw new IOException(msg);
}
// TODO I'm pretty sure we could just add another binding to the InfoServer run by
// the RegionServer and have it run the RedirectServlet instead of standing up
// a second entire stack here.
masterJettyServer = new Server();
final ServerConnector connector = new ServerConnector(masterJettyServer);
connector.setHost(addr);
connector.setPort(infoPort);
masterJettyServer.addConnector(connector);
masterJettyServer.setStopAtShutdown(true);
masterJettyServer.setHandler(HttpServer.buildGzipHandler(masterJettyServer.getHandler()));
final String redirectHostname =
StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;
final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname);
final WebAppContext context =
new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
context.addServlet(new ServletHolder(redirect), "/*");
context.setServer(masterJettyServer);
try {
masterJettyServer.start();
} catch (Exception e) {
throw new IOException("Failed to start redirecting jetty server", e);
}
return connector.getLocalPort();
} | 3.68 |
hbase_BlockingRpcConnection_writeConnectionHeader | /**
* Write the connection header.
*/
private void writeConnectionHeader() throws IOException {
boolean isCryptoAesEnable = false;
// check if Crypto AES is enabled
if (saslRpcClient != null) {
boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop()
.equalsIgnoreCase(saslRpcClient.getSaslQOP());
isCryptoAesEnable = saslEncryptionEnabled
&& conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT);
}
// if Crypto AES is enabled, set transformation and negotiate with server
if (isCryptoAesEnable) {
waitingConnectionHeaderResponse = true;
}
this.out.write(connectionHeaderWithLength);
this.out.flush();
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_isIncrementalCheckpointsEnabled | /** Gets whether incremental checkpoints are enabled for this state backend. */
public boolean isIncrementalCheckpointsEnabled() {
return enableIncrementalCheckpointing.getOrDefault(
CheckpointingOptions.INCREMENTAL_CHECKPOINTS.defaultValue());
} | 3.68 |
morf_RenameIndex_apply | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema apply(Schema schema) {
return applyChange(schema, fromIndexName, toIndexName);
} | 3.68 |
hudi_HoodieAvroUtils_getRootLevelFieldName | /**
* Obtain the root-level field name of a full field name, possibly a nested field.
* For example, given "a.b.c", the output is "a"; given "a", the output is "a".
*
* @param fieldName The field name.
* @return Root-level field name
*/
public static String getRootLevelFieldName(String fieldName) {
return fieldName.split("\\.")[0];
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.