name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SinkTestSuiteBase_testScaleUp | /**
* Test connector sink restart from a completed savepoint with a higher parallelism.
*
* <p>This test will create a sink in the external system, generate a collection of test data
* and write a half part of them to this sink by the Flink Job with parallelism 2 at first. Then
* stop the job, restart the same job from the completed savepoint with a higher parallelism 4.
* After the job has been running, write the other part to the sink and compare the result.
*
* <p>In order to pass this test, the number of records produced by Flink need to be equals to
* the generated test data. And the records in the sink will be compared to the test data by the
* different semantic. There's no requirement for record order.
*/
@TestTemplate
@DisplayName("Test sink restarting with a higher parallelism")
public void testScaleUp(
TestEnvironment testEnv,
DataStreamSinkExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 2, 4);
} | 3.68 |
hmily_HashedWheelTimer_expireTimeouts | /**
* Expire all {@link HashedWheelTimeout}s for the given {@code deadline}.
*/
public void expireTimeouts(final long deadline) {
HashedWheelTimeout timeout = head;
// process all timeouts
while (timeout != null) {
boolean remove = false;
if (timeout.remainingRounds <= 0) {
if (timeout.deadline <= deadline) {
timeout.expire();
} else {
// The timeout was placed into a wrong slot. This should never happen.
throw new IllegalStateException(String.format(
"timeout.deadline (%d) > deadline (%d)", timeout.deadline, deadline));
}
remove = true;
} else if (timeout.isCancelled()) {
remove = true;
} else {
timeout.remainingRounds--;
}
// store reference to next as we may null out timeout.next in the remove block.
HashedWheelTimeout next = timeout.next;
if (remove) {
remove(timeout);
}
timeout = next;
}
} | 3.68 |
flink_PythonDependencyInfo_create | /**
* Creates PythonDependencyInfo from GlobalJobParameters and DistributedCache.
*
* @param config The config.
* @param distributedCache The DistributedCache object of current task.
* @return The PythonDependencyInfo object that contains whole information of python dependency.
*/
public static PythonDependencyInfo create(
ReadableConfig config, DistributedCache distributedCache) {
Map<String, String> pythonFiles = new LinkedHashMap<>();
for (Map.Entry<String, String> entry :
config.getOptional(PYTHON_FILES_DISTRIBUTED_CACHE_INFO)
.orElse(new HashMap<>())
.entrySet()) {
File pythonFile = distributedCache.getFile(entry.getKey());
String filePath = pythonFile.getAbsolutePath();
pythonFiles.put(filePath, entry.getValue());
}
String requirementsFilePath = null;
String requirementsCacheDir = null;
String requirementsFileName =
config.getOptional(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)
.orElse(new HashMap<>())
.get(PythonDependencyUtils.FILE);
if (requirementsFileName != null) {
requirementsFilePath = distributedCache.getFile(requirementsFileName).getAbsolutePath();
String requirementsFileCacheDir =
config.getOptional(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)
.orElse(new HashMap<>())
.get(PythonDependencyUtils.CACHE);
if (requirementsFileCacheDir != null) {
requirementsCacheDir =
distributedCache.getFile(requirementsFileCacheDir).getAbsolutePath();
}
}
Map<String, String> archives = new HashMap<>();
for (Map.Entry<String, String> entry :
config.getOptional(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO)
.orElse(new HashMap<>())
.entrySet()) {
String archiveFilePath = distributedCache.getFile(entry.getKey()).getAbsolutePath();
String targetPath = entry.getValue();
archives.put(archiveFilePath, targetPath);
}
String pythonExec = config.get(PYTHON_EXECUTABLE);
return new PythonDependencyInfo(
pythonFiles,
requirementsFilePath,
requirementsCacheDir,
archives,
pythonExec,
config.get(PYTHON_EXECUTION_MODE),
config.get(PYTHON_PATH));
} | 3.68 |
framework_ContainerOrderedWrapper_addItem | /**
* Registers a new Item by its ID to the underlying container and to the
* ordering.
*
* @param itemId
* the ID of the Item to be created.
* @return the added Item or <code>null</code> if the operation failed
* @throws UnsupportedOperationException
* if the addItem is not supported.
*/
@Override
public Item addItem(Object itemId) throws UnsupportedOperationException {
final Item item = container.addItem(itemId);
if (!ordered && item != null) {
addToOrderWrapper(itemId);
}
return item;
} | 3.68 |
flink_DataSource_getInputFormat | /**
* Gets the input format that is executed by this data source.
*
* @return The input format that is executed by this data source.
*/
@Internal
public InputFormat<OUT, ?> getInputFormat() {
return this.inputFormat;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithConcatenationUsingCase | /**
* Tests concatenation in a select with {@linkplain CaseStatement}.
*/
@Test
public void testSelectWithConcatenationUsingCase() {
WhenCondition whenCondition = new WhenCondition(eq(new FieldReference("taxVariationIndicator"), new FieldLiteral('Y')), new FieldReference("exposureCustomerNumber"));
SelectStatement stmt = new SelectStatement(new ConcatenatedField(new FieldReference("assetDescriptionLine1"),
new CaseStatement(new FieldReference("invoicingCustomerNumber"), whenCondition)).as("test")).from(new TableReference(
"schedule"));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedConcatenationWithCase(), result);
} | 3.68 |
morf_GraphBasedUpgradeTraversalService_waitForReadyToExecuteNode | /**
* Caller of this method will be blocked awaiting moment when at least one new
* node is available for execution or all the nodes of the upgrade have been
* executed. When the upgrade reaches that point the block will be released and
* the method will be completed. Note that the fact that at the time of the
* block at least one new node has been available doesn't guarantee that it will
* still be available later. It may be potentially acquired by another thread.
*
* @throws InterruptedException - thrown by GraphBasedUpgradeService.waitForAllNodesToBeCompleted.
*/
public void waitForReadyToExecuteNode() throws InterruptedException {
lock.lock();
try {
while(readyToExecuteNodes.isEmpty() && !allNodesCompletedNoLock()) {
// The result of this await is (indirectly) checked by the wait loop
// so there is no need to check the result of the await (so NOSONAR).
newReadyToExecuteNode.await(500, TimeUnit.MILLISECONDS); // NOSONAR
}
} catch (InterruptedException e) {
LOG.error("InterruptedException in GraphBasedUpgradeService.waitForAllNodesToBeCompleted", e);
throw e;
} finally {
lock.unlock();
}
} | 3.68 |
hbase_BufferedMutator_setRpcTimeout | /**
* Set rpc timeout for this mutator instance
* @deprecated Since 3.0.0, will be removed in 4.0.0. Please set this through the
* {@link BufferedMutatorParams}.
*/
@Deprecated
default void setRpcTimeout(int timeout) {
throw new UnsupportedOperationException(
"The BufferedMutator::setRpcTimeout has not been implemented");
} | 3.68 |
framework_VCaption_mightChange | /**
* Checks whether anything in a given state change might cause the caption
* to change.
*
* @param event
* the state change event to check
* @return <code>true</code> if the caption might have changed; otherwise
* <code>false</code>
*/
public static boolean mightChange(StateChangeEvent event) {
if (event.hasPropertyChanged("caption")) {
return true;
}
if (event.hasPropertyChanged("resources")) {
return true;
}
if (event.hasPropertyChanged("errorMessage")) {
return true;
}
return false;
} | 3.68 |
hadoop_ManifestCommitter_commitTask | /**
* Commit the task.
* This is where the task attempt tree list takes place.
* @param context task context.
* @throws IOException IO Failure.
*/
@Override
public void commitTask(final TaskAttemptContext context)
throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(true,
context);
try {
StageConfig stageConfig = committerConfig.createStageConfig()
.withOperations(createManifestStoreOperations())
.build();
taskAttemptCommittedManifest = new CommitTaskStage(stageConfig)
.apply(null).getTaskManifest();
iostatistics.incrementCounter(COMMITTER_TASKS_COMPLETED_COUNT, 1);
} catch (IOException e) {
iostatistics.incrementCounter(COMMITTER_TASKS_FAILED_COUNT, 1);
throw e;
} finally {
logCommitterStatisticsAtDebug();
updateCommonContextOnCommitterExit();
}
} | 3.68 |
framework_BinderValidationStatus_getBeanValidationErrors | /**
* Gets the failed bean level validation results.
*
* @return a list of failed bean level validation results
*/
public List<ValidationResult> getBeanValidationErrors() {
return binderStatuses.stream().filter(ValidationResult::isError)
.collect(Collectors.toList());
} | 3.68 |
hbase_TableInfoModel_toString | /*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (TableRegionModel aRegion : regions) {
sb.append(aRegion.toString());
sb.append('\n');
}
return sb.toString();
} | 3.68 |
framework_VTabsheet_removeAssistiveDescription | /**
* Removes the {@code aria-describedby} attribute from this tab element.
* This should be called when this tab loses focus.
*/
public void removeAssistiveDescription() {
Roles.getTablistRole().removeAriaDescribedbyProperty(getElement());
} | 3.68 |
framework_AbsoluteLayout_setBottomValue | /**
* Sets the 'bottom' attribute value (distance from the bottom of the
* component to the bottom edge of the layout). Currently active units
* are maintained.
*
* @param bottomValue
* The value of the 'bottom' attribute
* @see #setBottomUnits(Unit)
*/
public void setBottomValue(Float bottomValue) {
this.bottomValue = bottomValue;
markAsDirty();
} | 3.68 |
hadoop_OBSCommonUtils_verifyBucketExists | /**
* Verify that the bucket exists. This does not check permissions, not even
* read access.
*
* @param owner the owner OBSFileSystem instance
* @throws FileNotFoundException the bucket is absent
* @throws IOException any other problem talking to OBS
*/
static void verifyBucketExists(final OBSFileSystem owner)
throws FileNotFoundException, IOException {
int retryTime = 1;
while (true) {
try {
if (!owner.getObsClient().headBucket(owner.getBucket())) {
throw new FileNotFoundException(
"Bucket " + owner.getBucket() + " does not exist");
}
return;
} catch (ObsException e) {
LOG.warn("Failed to head bucket for [{}], retry time [{}], "
+ "exception [{}]", owner.getBucket(), retryTime,
translateException("doesBucketExist", owner.getBucket(),
e));
if (MAX_RETRY_TIME == retryTime) {
throw translateException("doesBucketExist",
owner.getBucket(), e);
}
try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
retryTime++;
}
} | 3.68 |
hadoop_RpcNoSuchMethodException_getRpcErrorCodeProto | /**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_METHOD;
} | 3.68 |
hadoop_WeightedPolicyInfo_setAMRMPolicyWeights | /**
* Setter method for ARMRMProxy weights.
*
* @param policyWeights the amrmproxy weights.
*/
public void setAMRMPolicyWeights(Map<SubClusterIdInfo, Float> policyWeights) {
this.amrmPolicyWeights = policyWeights;
} | 3.68 |
querydsl_AliasFactory_createProxy | /**
* Create a proxy instance for the given class and path
*
* @param <A>
* @param cl type of the proxy
* @param path underlying expression
* @return proxy instance
*/
@SuppressWarnings("unchecked")
protected <A> A createProxy(Class<A> cl, Expression<?> path) {
Enhancer enhancer = new Enhancer();
enhancer.setClassLoader(AliasFactory.class.getClassLoader());
if (cl.isInterface()) {
enhancer.setInterfaces(new Class<?>[] {cl, ManagedObject.class});
} else {
enhancer.setSuperclass(cl);
enhancer.setInterfaces(new Class<?>[] {ManagedObject.class});
}
// creates one handler per proxy
MethodInterceptor handler = new PropertyAccessInvocationHandler(path, this, pathFactory, typeSystem);
enhancer.setCallback(handler);
return (A) enhancer.create();
} | 3.68 |
hbase_ResponseConverter_getResult | // Start utilities for Client
public static SingleResponse getResult(final ClientProtos.MutateRequest request,
final ClientProtos.MutateResponse response, final CellScanner cells) throws IOException {
SingleResponse singleResponse = new SingleResponse();
SingleResponse.Entry entry = new SingleResponse.Entry();
entry.setResult(ProtobufUtil.toResult(response.getResult(), cells));
entry.setProcessed(response.getProcessed());
singleResponse.setEntry(entry);
return singleResponse;
} | 3.68 |
framework_Label_fireValueChange | /**
* Emits the options change event.
*/
protected void fireValueChange() {
// Set the error message
fireEvent(new Label.ValueChangeEvent(this));
} | 3.68 |
hbase_ScannerContext_getSkippingRow | /**
* In this mode, only block size progress is tracked, and limits are ignored. We set this mode
* when skipping to next row, in which case all cells returned a thrown away so should not count
* towards progress.
* @return true if we are in skipping row mode.
*/
public boolean getSkippingRow() {
return skippingRow;
} | 3.68 |
hbase_TaskGroup_addTask | /**
* Add a new task to the group, and before that might complete the last task in the group
* @param description the description of the new task
* @param withCompleteLast whether to complete the last task in the group
* @return the added new task
*/
public synchronized MonitoredTask addTask(String description, boolean withCompleteLast) {
if (withCompleteLast) {
MonitoredTask previousTask = this.tasks.peekLast();
if (
previousTask != null && previousTask.getState() != State.COMPLETE
&& previousTask.getState() != State.ABORTED
) {
previousTask.markComplete("Completed");
}
}
MonitoredTask task =
TaskMonitor.get().createStatus(description, ignoreSubTasksInTaskMonitor, true);
this.setStatus(description);
this.tasks.addLast(task);
delegate.setStatus(description);
return task;
} | 3.68 |
flink_FileInputFormat_decorateInputStream | /**
* This method allows to wrap/decorate the raw {@link FSDataInputStream} for a certain file
* split, e.g., for decoding. When overriding this method, also consider adapting {@link
* FileInputFormat#testForUnsplittable} if your stream decoration renders the input file
* unsplittable. Also consider calling existing superclass implementations.
*
* @param inputStream is the input stream to decorated
* @param fileSplit is the file split for which the input stream shall be decorated
* @return the decorated input stream
* @throws Throwable if the decoration fails
* @see org.apache.flink.api.common.io.InputStreamFSInputWrapper
*/
protected FSDataInputStream decorateInputStream(
FSDataInputStream inputStream, FileInputSplit fileSplit) throws Throwable {
// Wrap stream in a extracting (decompressing) stream if file ends with a known compression
// file extension.
InflaterInputStreamFactory<?> inflaterInputStreamFactory =
getInflaterInputStreamFactory(fileSplit.getPath());
if (inflaterInputStreamFactory != null) {
return new InputStreamFSInputWrapper(inflaterInputStreamFactory.create(stream));
}
return inputStream;
} | 3.68 |
morf_UpgradeScriptAdditionsProvider_getUpgradeScriptAdditions | /**
* Returns all script additions with the filtering criteria applied.
* The filtering logic should be provided by calling {@link #setAllowedPredicate(Predicate)} first.
* @return set of upgrade script additions.
*/
default Set<UpgradeScriptAddition> getUpgradeScriptAdditions() {
return Collections.emptySet();
} | 3.68 |
hadoop_BytesWritable_get | /**
* Get the data from the BytesWritable.
* @deprecated Use {@link #getBytes()} instead.
* @return data from the BytesWritable.
*/
@Deprecated
public byte[] get() {
return getBytes();
} | 3.68 |
hbase_ReplicationSourceManager_deleteQueue | /**
* Delete a complete queue of wals associated with a replication source
* @param queueId the id of replication queue to delete
*/
private void deleteQueue(ReplicationQueueId queueId) {
abortWhenFail(() -> this.queueStorage.removeQueue(queueId));
} | 3.68 |
hudi_GcsEventsSource_processMessages | /**
* Convert Pubsub messages into a batch of GCS file MetadataMsg objects, skipping those that
* don't need to be processed.
*
* @param receivedMessages Pubsub messages
* @return A batch of GCS file metadata messages
*/
private MessageBatch processMessages(List<ReceivedMessage> receivedMessages) {
List<String> messages = new ArrayList<>();
for (ReceivedMessage received : receivedMessages) {
MetadataMessage message = new MetadataMessage(received.getMessage());
String msgStr = message.toStringUtf8();
logDetails(message, msgStr);
messagesToAck.add(received.getAckId());
MessageValidity messageValidity = message.shouldBeProcessed();
if (messageValidity.getDecision() == DO_SKIP) {
LOG.info("Skipping message: " + messageValidity.getDescription());
continue;
}
messages.add(msgStr);
}
return new MessageBatch(messages);
} | 3.68 |
flink_ExternalResourceUtils_getExternalResourceAmountMap | /** Get the map of resource name and amount of all of enabled external resources. */
@VisibleForTesting
static Map<String, Long> getExternalResourceAmountMap(Configuration config) {
final Set<String> resourceSet = getExternalResourceSet(config);
if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Map<String, Long> externalResourceAmountMap = new HashMap<>();
for (String resourceName : resourceSet) {
final ConfigOption<Long> amountOption =
key(ExternalResourceOptions.getAmountConfigOptionForResource(resourceName))
.longType()
.noDefaultValue();
final Optional<Long> amountOpt = config.getOptional(amountOption);
if (!amountOpt.isPresent()) {
LOG.warn(
"The amount of the {} should be configured. Will ignore that resource.",
resourceName);
} else if (amountOpt.get() <= 0) {
LOG.warn(
"The amount of the {} should be positive while finding {}. Will ignore that resource.",
amountOpt.get(),
resourceName);
} else {
externalResourceAmountMap.put(resourceName, amountOpt.get());
}
}
return externalResourceAmountMap;
} | 3.68 |
graphhopper_VectorTileEncoder_clipGeometry | /**
* Clip geometry according to buffer given at construct time. This method
* can be overridden to change clipping behavior. See also
* {@link #clipCovers(Geometry)}.
*
* @param geometry a {@link Geometry} to check for intersection with the current clip geometry
* @return a boolean true when current clip geometry intersects with the given geometry.
*/
protected Geometry clipGeometry(Geometry geometry) {
try {
if (clipEnvelope.contains(geometry.getEnvelopeInternal())) {
return geometry;
}
Geometry original = geometry;
geometry = clipGeometry.intersection(original);
// some times a intersection is returned as an empty geometry.
// going via wkt fixes the problem.
if (geometry.isEmpty() && clipGeometryPrepared.intersects(original)) {
Geometry originalViaWkt = new WKTReader().read(original.toText());
geometry = clipGeometry.intersection(originalViaWkt);
}
return geometry;
} catch (TopologyException e) {
// could not intersect. original geometry will be used instead.
return geometry;
} catch (ParseException e1) {
// could not encode/decode WKT. original geometry will be used
// instead.
return geometry;
}
} | 3.68 |
flink_DagConnection_setInterestingProperties | /**
* Sets the interesting properties for this pact connection.
*
* @param props The interesting properties.
*/
public void setInterestingProperties(InterestingProperties props) {
if (this.interestingProps == null) {
this.interestingProps = props;
} else {
throw new IllegalStateException("Interesting Properties have already been set.");
}
} | 3.68 |
framework_ViewBeforeLeaveEvent_isNavigateRun | /**
* Checks if the navigate command has been executed.
*
* @return <code>true</code> if {@link #navigate()} has been called,
* <code>false</code> otherwise
*/
protected boolean isNavigateRun() {
return navigateRun;
} | 3.68 |
open-banking-gateway_FintechConsentSpecSecureStorage_fromInboxForAuth | /**
* Get data from FinTechs' inbox associated with the FinTech user.
* @param authSession Authorization session associated with this user
* @param password FinTech user password
* @return FinTechs' users' keys to access consent, spec. etc.
*/
@SneakyThrows
public FinTechUserInboxData fromInboxForAuth(AuthSession authSession, Supplier<char[]> password) {
try (InputStream is = datasafeServices.inboxService().read(
ReadRequest.forDefaultPrivate(
authSession.getFintechUser().getUserIdAuth(password),
new FintechUserAuthSessionTuple(authSession).toDatasafePathWithoutParent()))
) {
return mapper.readValue(is, FinTechUserInboxData.class);
}
} | 3.68 |
shardingsphere-elasticjob_JobFacade_isNeedSharding | /**
* Judge job whether to need resharding.
*
* @return need resharding or not
*/
public boolean isNeedSharding() {
return shardingService.isNeedSharding();
} | 3.68 |
morf_NamedParameterPreparedStatement_setInt | /**
* Sets the value of a named integer parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setInt(SqlParameter parameter, final int value) throws SQLException {
forEachOccurrenceOfParameter(parameter, new Operation() {
@Override
public void apply(int parameterIndex) throws SQLException {
statement.setInt(parameterIndex, value);
}
});
return this;
} | 3.68 |
flink_FlinkAggregateJoinTransposeRule_registry | /**
* Creates a {@link org.apache.calcite.sql.SqlSplittableAggFunction.Registry} that is a view of
* a list.
*/
private static <E> SqlSplittableAggFunction.Registry<E> registry(final List<E> list) {
return new SqlSplittableAggFunction.Registry<E>() {
public int register(E e) {
int i = list.indexOf(e);
if (i < 0) {
i = list.size();
list.add(e);
}
return i;
}
};
} | 3.68 |
hbase_HFileBlock_getByteStream | /** Returns a byte stream reading the data(excluding header and checksum) of this block */
DataInputStream getByteStream() {
ByteBuff dup = this.bufWithoutChecksum.duplicate();
dup.position(this.headerSize());
return new DataInputStream(new ByteBuffInputStream(dup));
} | 3.68 |
morf_AbstractSqlDialectTest_testAddIntegerColumn | /**
* Test adding an integer column.
*/
@Test
public void testAddIntegerColumn() {
testAlterTableColumn(AlterationType.ADD, column("intField_new", DataType.INTEGER).nullable(), expectedAlterTableAddIntegerColumnStatement());
} | 3.68 |
hbase_SerialReplicationChecker_isLastRangeAndOpening | // We may write a open region marker to WAL before we write the open sequence number to meta, so
// if a region is in OPENING state and we are in the last range, it is not safe to say we can push
// even if the previous range is finished.
private boolean isLastRangeAndOpening(ReplicationBarrierResult barrierResult, int index) {
return index == barrierResult.getBarriers().length
&& barrierResult.getState() == RegionState.State.OPENING;
} | 3.68 |
hadoop_DiskBalancerDataNode_equals | /**
* Indicates whether some other object is "equal to" this one.
*/
@Override
public boolean equals(Object obj) {
if ((obj == null) || (obj.getClass() != getClass())) {
return false;
}
DiskBalancerDataNode that = (DiskBalancerDataNode) obj;
return dataNodeUUID.equals(that.getDataNodeUUID());
} | 3.68 |
hudi_HoodieExampleDataGenerator_generateInsertsStreamOnPartition | /**
* Generates new inserts, across a single partition path. It also updates the list of existing keys.
*/
public Stream<HoodieRecord<T>> generateInsertsStreamOnPartition(String commitTime, Integer n, String partitionPath) {
int currSize = getNumExistingKeys();
return IntStream.range(0, n).boxed().map(i -> {
HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
KeyPartition kp = new KeyPartition();
kp.key = key;
kp.partitionPath = partitionPath;
existingKeys.put(currSize + i, kp);
numExistingKeys++;
return new HoodieAvroRecord<>(key, generateRandomValue(key, commitTime));
});
} | 3.68 |
hadoop_SubClusterIdInfo_toId | /**
* Get the sub-cluster identifier as {@link SubClusterId}.
* @return the sub-cluster id.
*/
public SubClusterId toId() {
return SubClusterId.newInstance(id);
} | 3.68 |
flink_HsSelectiveSpillingStrategy_onMemoryUsageChanged | // When the amount of memory used exceeds the threshold, decide action based on global
// information. Otherwise, no need to take action.
@Override
public Optional<Decision> onMemoryUsageChanged(
int numTotalRequestedBuffers, int currentPoolSize) {
return numTotalRequestedBuffers < currentPoolSize * spillThreshold
? Optional.of(Decision.NO_ACTION)
: Optional.empty();
} | 3.68 |
flink_AbstractUdfOperator_getUserCodeWrapper | /**
* Gets the function that is held by this operator. The function is the actual implementation of
* the user code.
*
* <p>This throws an exception if the pact does not contain an object but a class for the user
* code.
*
* @return The object with the user function for this operator.
* @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper()
*/
@Override
public UserCodeWrapper<FT> getUserCodeWrapper() {
return userFunction;
} | 3.68 |
morf_SqlServerDialect_alterTableAddColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableAddColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableAddColumnStatements(Table table, Column column) {
List<String> statements = new ArrayList<>();
// TODO looks like if we're adding to an existing PK we should drop the PK first here. SQL
// server is currently hard to test so need to investigate further.
StringBuilder statement = new StringBuilder()
.append("ALTER TABLE ")
.append(schemaNamePrefix())
.append(table.getName())
.append(" ADD ") // We don't say COLUMN here for some reason
.append(column.getName())
.append(' ')
.append(sqlRepresentationOfColumnType(table, column, true));
statements.add(statement.toString());
// Recreate the primary key if the column is in it
if (column.isPrimaryKey()) {
// Add the new column if this is a change and it wasn't part of they key
// before. Remove it if it now isn't part of the key and it was before
StringBuilder primaryKeyStatement = new StringBuilder()
.append("ALTER TABLE ")
.append(schemaNamePrefix())
.append(table.getName())
.append(" ADD ")
.append(buildPrimaryKeyConstraint(table.getName(), namesOfColumns(primaryKeysForTable(table))));
statements.add(primaryKeyStatement.toString());
}
return statements;
} | 3.68 |
flink_StructuredOptionsSplitter_splitEscaped | /**
* Splits the given string on the given delimiter. It supports quoting parts of the string with
* either single (') or double quotes ("). Quotes can be escaped by doubling the quotes.
*
* <p>Examples:
*
* <ul>
* <li>'A;B';C => [A;B], [C]
* <li>"AB'D";B;C => [AB'D], [B], [C]
* <li>"AB'""D;B";C => [AB'\"D;B], [C]
* </ul>
*
* <p>For more examples check the tests.
*
* @param string a string to split
* @param delimiter delimiter to split on
* @return a list of splits
*/
static List<String> splitEscaped(String string, char delimiter) {
List<Token> tokens = tokenize(checkNotNull(string), delimiter);
return processTokens(tokens);
} | 3.68 |
hbase_ChecksumType_nameToType | /**
* Map a checksum name to a specific type. Do our own names.
* @return Type associated with passed code.
*/
public static ChecksumType nameToType(final String name) {
for (ChecksumType t : ChecksumType.values()) {
if (t.getName().equals(name)) {
return t;
}
}
throw new RuntimeException("Unknown checksum type name " + name);
} | 3.68 |
hbase_TableRegionModel_setStartKey | /**
* @param startKey the start key
*/
public void setStartKey(byte[] startKey) {
this.startKey = startKey;
} | 3.68 |
hudi_AvroSchemaEvolutionUtils_reconcileSchemaRequirements | /**
* Reconciles nullability and datatype requirements b/w {@code source} and {@code target} schemas,
* by adjusting these of the {@code source} schema to be in-line with the ones of the
* {@code target} one. Source is considered to be new incoming schema, while target could refer to prev table schema.
* For example,
* if colA in source is non-nullable, but is nullable in target, output schema will have colA as nullable.
* if "hoodie.datasource.write.new.columns.nullable" is set to true and if colB is not present in source, but
* is present in target, output schema will have colB as nullable.
* if colC has different data type in source schema compared to target schema and if its promotable, (say source is int,
* and target is long and since int can be promoted to long), colC will be long data type in output schema.
*
*
* @param sourceSchema source schema that needs reconciliation
* @param targetSchema target schema that source schema will be reconciled against
* @param opts config options
* @return schema (based off {@code source} one) that has nullability constraints and datatypes reconciled
*/
public static Schema reconcileSchemaRequirements(Schema sourceSchema, Schema targetSchema, Map<String, String> opts) {
if (sourceSchema.getType() == Schema.Type.NULL || sourceSchema.getFields().isEmpty() || targetSchema.getFields().isEmpty()) {
return sourceSchema;
}
InternalSchema sourceInternalSchema = convert(sourceSchema);
InternalSchema targetInternalSchema = convert(targetSchema);
List<String> colNamesSourceSchema = sourceInternalSchema.getAllColsFullName();
List<String> colNamesTargetSchema = targetInternalSchema.getAllColsFullName();
boolean makeNewColsNullable = "true".equals(opts.get(MAKE_NEW_COLUMNS_NULLABLE.key()));
List<String> nullableUpdateColsInSource = new ArrayList<>();
List<String> typeUpdateColsInSource = new ArrayList<>();
colNamesSourceSchema.forEach(field -> {
// handle columns that needs to be made nullable
if ((makeNewColsNullable && !colNamesTargetSchema.contains(field))
|| colNamesTargetSchema.contains(field) && sourceInternalSchema.findField(field).isOptional() != targetInternalSchema.findField(field).isOptional()) {
nullableUpdateColsInSource.add(field);
}
// handle columns that needs type to be updated
if (colNamesTargetSchema.contains(field) && SchemaChangeUtils.shouldPromoteType(sourceInternalSchema.findType(field), targetInternalSchema.findType(field))) {
typeUpdateColsInSource.add(field);
}
});
if (nullableUpdateColsInSource.isEmpty() && typeUpdateColsInSource.isEmpty()) {
//standardize order of unions
return convert(sourceInternalSchema, sourceSchema.getFullName());
}
TableChanges.ColumnUpdateChange schemaChange = TableChanges.ColumnUpdateChange.get(sourceInternalSchema);
// Reconcile nullability constraints (by executing phony schema change)
if (!nullableUpdateColsInSource.isEmpty()) {
schemaChange = reduce(nullableUpdateColsInSource, schemaChange,
(change, field) -> change.updateColumnNullability(field, true));
}
// Reconcile type promotions
if (!typeUpdateColsInSource.isEmpty()) {
schemaChange = reduce(typeUpdateColsInSource, schemaChange,
(change, field) -> change.updateColumnType(field, targetInternalSchema.findType(field)));
}
return convert(SchemaChangeUtils.applyTableChanges2Schema(sourceInternalSchema, schemaChange), sourceSchema.getFullName());
} | 3.68 |
hadoop_Container_getAllocationRequestId | /**
* Get the optional <em>ID</em> corresponding to the original {@code
* ResourceRequest{@link #getAllocationRequestId()}}s which is satisfied by
* this allocated {@code Container}.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}s.
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* <p>
*
* @return the <em>ID</em> corresponding to the original allocation request
* which is satisfied by this allocation.
*/
@Public
@Evolving
public long getAllocationRequestId() {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_TableDescriptorBuilder_setCoprocessorToMap | /**
* Add coprocessor to values Map
* @param specStr The Coprocessor specification all in in one String
* @return Returns <code>this</code>
*/
private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) {
if (specStr == null) {
return this;
}
// generate a coprocessor key
int maxCoprocessorNumber = 0;
Matcher keyMatcher;
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;
}
maxCoprocessorNumber =
Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
}
maxCoprocessorNumber++;
String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr)));
} | 3.68 |
querydsl_ExpressionUtils_extract | /**
* Get the potentially wrapped expression
*
* @param expr expression to analyze
* @return inner expression
*/
@SuppressWarnings("unchecked")
public static <T> Expression<T> extract(Expression<T> expr) {
if (expr != null) {
final Class<?> clazz = expr.getClass();
if (clazz == PathImpl.class || clazz == PredicateOperation.class || clazz == ConstantImpl.class) {
return expr;
} else {
return (Expression<T>) expr.accept(ExtractorVisitor.DEFAULT, null);
}
} else {
return null;
}
} | 3.68 |
zilla_DispatchAgent_supplyCounterWriter | // required for testing
public LongConsumer supplyCounterWriter(
long bindingId,
long metricId)
{
return countersLayout.supplyWriter(bindingId, metricId);
} | 3.68 |
framework_AutomaticImmediate_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final TextField textField = new TextField() {
/*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#fireValueChange(boolean)
*/
@Override
protected void fireValueChange(boolean repaintIsNotNeeded) {
log("fireValueChange");
super.fireValueChange(repaintIsNotNeeded);
}
};
textField.setId(FIELD);
final ValueChangeListener listener = event -> log(
"Value changed: " + event.getProperty().getValue());
final CheckBox checkBox = new CheckBox("Toggle listener");
checkBox.addValueChangeListener(valueChange -> {
if (checkBox.getValue()) {
textField.addValueChangeListener(listener);
} else {
textField.removeValueChangeListener(listener);
}
});
checkBox.setId(LISTENER_TOGGLE);
Button b = new Button(
"setImmediate(false), sets explicitly false and causes server roundtrip",
event -> textField.setImmediate(false));
b.setId(EXPLICIT_FALSE);
Button b2 = new Button("Hit server, causes server roundtrip", event -> {
});
b2.setId(BUTTON);
addComponent(textField);
addComponent(checkBox);
addComponent(b);
addComponent(b2);
} | 3.68 |
hadoop_CachedDNSToSwitchMapping_getCachedHosts | /**
* @param names a list of hostnames to look up (can be be empty)
* @return the cached resolution of the list of hostnames/addresses.
* or null if any of the names are not currently in the cache
*/
private List<String> getCachedHosts(List<String> names) {
List<String> result = new ArrayList<String>(names.size());
// Construct the result
for (String name : names) {
String networkLocation = cache.get(name);
if (networkLocation != null) {
result.add(networkLocation);
} else {
return null;
}
}
return result;
} | 3.68 |
dubbo_RpcContext_isConsumerSide | /**
* is consumer side.
*
* @return consumer side.
*/
public boolean isConsumerSide() {
return newRpcContext.isConsumerSide();
} | 3.68 |
graphhopper_ReaderElement_hasTag | /**
* Check a number of tags in the given order if their value is equal to the specified value.
*/
public boolean hasTag(List<String> keyList, Object value) {
for (String key : keyList) {
if (value.equals(getTag(key, null)))
return true;
}
return false;
} | 3.68 |
flink_RoundRobinOperatorStateRepartitioner_collectStates | /** Collect the states from given parallelSubtaskStates with the specific {@code mode}. */
private Map<String, StateEntry> collectStates(
List<List<OperatorStateHandle>> parallelSubtaskStates, OperatorStateHandle.Mode mode) {
Map<String, StateEntry> states =
CollectionUtil.newHashMapWithExpectedSize(parallelSubtaskStates.size());
for (int i = 0; i < parallelSubtaskStates.size(); ++i) {
final int subtaskIndex = i;
List<OperatorStateHandle> subTaskState = parallelSubtaskStates.get(i);
for (OperatorStateHandle operatorStateHandle : subTaskState) {
if (operatorStateHandle == null) {
continue;
}
final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>>
partitionOffsetEntries =
operatorStateHandle.getStateNameToPartitionOffsets().entrySet();
partitionOffsetEntries.stream()
.filter(entry -> entry.getValue().getDistributionMode().equals(mode))
.forEach(
entry -> {
StateEntry stateEntry =
states.computeIfAbsent(
entry.getKey(),
k ->
new StateEntry(
parallelSubtaskStates.size()
* partitionOffsetEntries
.size(),
parallelSubtaskStates.size()));
stateEntry.addEntry(
subtaskIndex,
Tuple2.of(
operatorStateHandle.getDelegateStateHandle(),
entry.getValue()));
});
}
}
return states;
} | 3.68 |
hbase_HMaster_modifyNamespace | /**
* Modify an existing Namespace.
* @param nonceGroup Identifier for the source of the request, a client or process.
* @param nonce A unique identifier for this operation from the client or process identified
* by <code>nonceGroup</code> (the source must ensure each operation gets a
* unique id).
* @return procedure id
*/
long modifyNamespace(final NamespaceDescriptor newNsDescriptor, final long nonceGroup,
final long nonce) throws IOException {
checkInitialized();
TableName.isLegalNamespaceName(Bytes.toBytes(newNsDescriptor.getName()));
return MasterProcedureUtil
.submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName());
getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor,
newNsDescriptor);
// We need to wait for the procedure to potentially fail due to "prepare" sanity
// checks. This will block only the beginning of the procedure. See HBASE-19953.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
LOG.info(getClientIdAuditPrefix() + " modify " + newNsDescriptor);
// Execute the operation synchronously - wait for the operation to complete before
// continuing.
setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch));
latch.await();
getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor,
newNsDescriptor);
}
@Override
protected String getDescription() {
return "ModifyNamespaceProcedure";
}
});
} | 3.68 |
hbase_MetricRegistries_global | /**
* Return the global singleton instance for the MetricRegistries.
* @return MetricRegistries implementation.
*/
public static MetricRegistries global() {
return LazyHolder.GLOBAL;
} | 3.68 |
hudi_DiskMap_cleanup | /**
* Cleanup all resources, files and folders.
*/
private void cleanup(boolean isTriggeredFromShutdownHook) {
try {
FileIOUtils.deleteDirectory(diskMapPathFile);
} catch (IOException exception) {
LOG.warn("Error while deleting the disk map directory=" + diskMapPath, exception);
}
if (!isTriggeredFromShutdownHook && shutdownThread != null) {
Runtime.getRuntime().removeShutdownHook(shutdownThread);
}
} | 3.68 |
hbase_CanaryTool_generateMonitorTables | /** Returns List of tables to use in test. */
private String[] generateMonitorTables(String[] monitorTargets) throws IOException {
String[] returnTables = null;
if (this.useRegExp) {
Pattern pattern = null;
List<TableDescriptor> tds = null;
Set<String> tmpTables = new TreeSet<>();
try {
LOG.debug(String.format("reading list of tables"));
tds = this.admin.listTableDescriptors(pattern);
if (tds == null) {
tds = Collections.emptyList();
}
for (String monitorTarget : monitorTargets) {
pattern = Pattern.compile(monitorTarget);
for (TableDescriptor td : tds) {
if (pattern.matcher(td.getTableName().getNameAsString()).matches()) {
tmpTables.add(td.getTableName().getNameAsString());
}
}
}
} catch (IOException e) {
LOG.error("Communicate with admin failed", e);
throw e;
}
if (tmpTables.size() > 0) {
returnTables = tmpTables.toArray(new String[tmpTables.size()]);
} else {
String msg = "No HTable found, tablePattern:" + Arrays.toString(monitorTargets);
LOG.error(msg);
this.errorCode = INIT_ERROR_EXIT_CODE;
throw new TableNotFoundException(msg);
}
} else {
returnTables = monitorTargets;
}
return returnTables;
} | 3.68 |
morf_MySqlDialect_repairAutoNumberStartPosition | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#repairAutoNumberStartPosition(Table, SqlScriptExecutor, Connection)
*/
@Override
public void repairAutoNumberStartPosition(Table table, SqlScriptExecutor executor,Connection connection) {
Column autoIncrementColumn = getAutoIncrementColumnForTable(table);
if (autoIncrementColumn == null) {
executor.execute(updateStatisticsStatement(table), connection);
return;
}
long maxId = executor
.executeQuery(checkMaxIdAutonumberStatement(table,autoIncrementColumn))
.withConnection(connection)
.processWith(new ResultSetProcessor<Long>() {
@Override
public Long process(ResultSet resultSet) throws SQLException {
if (!resultSet.next()) {
throw new UnsupportedOperationException("Nothing returned by results set");
}
return resultSet.getLong(1);
}
});
// We reset the auto increment seed to our start value every time we bulk insert data. If the max value
// on the table is greater, mySQL will just use that instead
Collection<String> repairStatements = maxId < AUTONUMBER_LIMIT ?
ImmutableList.of(alterAutoincrementStatement(table,autoIncrementColumn),updateStatisticsStatement(table)) :
ImmutableList.of(updateStatisticsStatement(table));
executor.execute(repairStatements,connection);
} | 3.68 |
framework_Link_setTargetWidth | /**
* Sets the target window width.
*
* @param targetWidth
* the targetWidth to set.
*/
public void setTargetWidth(int targetWidth) {
getState().targetWidth = targetWidth;
} | 3.68 |
flink_PermanentBlobCache_registerJob | /**
* Registers use of job-related BLOBs.
*
* <p>Using any other method to access BLOBs, e.g. {@link #getFile}, is only valid within calls
* to <tt>registerJob(JobID)</tt> and {@link #releaseJob(JobID)}.
*
* @param jobId ID of the job this blob belongs to
* @see #releaseJob(JobID)
*/
@Override
public void registerJob(JobID jobId) {
checkNotNull(jobId);
synchronized (jobRefCounters) {
RefCount ref = jobRefCounters.get(jobId);
if (ref == null) {
ref = new RefCount();
jobRefCounters.put(jobId, ref);
} else {
// reset cleanup timeout
ref.keepUntil = -1;
}
++ref.references;
}
} | 3.68 |
flink_EnvironmentInformation_getTemporaryFileDirectory | /**
* Gets the directory for temporary files, as returned by the JVM system property
* "java.io.tmpdir".
*
* @return The directory for temporary files.
*/
public static String getTemporaryFileDirectory() {
return System.getProperty("java.io.tmpdir");
} | 3.68 |
dubbo_AbstractClusterInvoker_invokeWithContextAsync | /**
* When using a thread pool to fork a child thread, ThreadLocal cannot be passed.
* In this scenario, please use the invokeWithContextAsync method.
*
* @return
*/
protected Result invokeWithContextAsync(Invoker<T> invoker, Invocation invocation, URL consumerUrl) {
Invoker<T> originInvoker = setContext(invoker, consumerUrl);
Result result;
try {
result = invoker.invoke(invocation);
} finally {
clearContext(originInvoker);
}
return result;
} | 3.68 |
hbase_Mutation_get | /**
* Returns a list of all KeyValue objects with matching column family and qualifier.
* @param family column family
* @param qualifier column qualifier
* @return a list of KeyValue objects with the matching family and qualifier, returns an empty
* list if one doesn't exist for the given family.
*/
public List<Cell> get(byte[] family, byte[] qualifier) {
List<Cell> filteredList = new ArrayList<>();
for (Cell cell : getCellList(family)) {
if (CellUtil.matchingQualifier(cell, qualifier)) {
filteredList.add(cell);
}
}
return filteredList;
} | 3.68 |
hbase_HRegion_visitBatchOperations | /**
* Helper method for visiting pending/ all batch operations
*/
public void visitBatchOperations(boolean pendingOnly, int lastIndexExclusive, Visitor visitor)
throws IOException {
assert lastIndexExclusive <= this.size();
for (int i = nextIndexToProcess; i < lastIndexExclusive; i++) {
if (!pendingOnly || isOperationPending(i)) {
if (!visitor.visit(i)) {
break;
}
}
}
} | 3.68 |
flink_MetricConfig_getFloat | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key the hashtable key.
* @param defaultValue a default value.
* @return the value in this property list with the specified key value parsed as a float.
*/
public float getFloat(String key, float defaultValue) {
String argument = getProperty(key, null);
return argument == null ? defaultValue : Float.parseFloat(argument);
} | 3.68 |
flink_GSBlobIdentifier_getBlobId | /**
* Get a Google blob id for this identifier, with generation=null.
*
* @return The BlobId
*/
public BlobId getBlobId() {
return BlobId.of(bucketName, objectName);
} | 3.68 |
flink_Operator_getParallelism | /**
* Returns the parallelism of this operator.
*
* @return The parallelism of this operator.
*/
public int getParallelism() {
return this.parallelism;
} | 3.68 |
hadoop_MappingRuleResult_createDefaultPlacementResult | /**
* Generator method for default placement results. It is a specialized
* placement result which will only use the "%default" as a queue name.
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createDefaultPlacementResult() {
return RESULT_DEFAULT_PLACEMENT;
} | 3.68 |
zxing_SearchBookContentsActivity_parseResult | // Available fields: page_id, page_number, snippet_text
private SearchBookContentsResult parseResult(JSONObject json) {
String pageId;
String pageNumber;
String snippet;
try {
pageId = json.getString("page_id");
pageNumber = json.optString("page_number");
snippet = json.optString("snippet_text");
} catch (JSONException e) {
Log.w(TAG, e);
// Never seen in the wild, just being complete.
return new SearchBookContentsResult(getString(R.string.msg_sbc_no_page_returned), "", "", false);
}
if (pageNumber == null || pageNumber.isEmpty()) {
// This can happen for text on the jacket, and possibly other reasons.
pageNumber = "";
} else {
pageNumber = getString(R.string.msg_sbc_page) + ' ' + pageNumber;
}
boolean valid = snippet != null && !snippet.isEmpty();
if (valid) {
// Remove all HTML tags and encoded characters.
snippet = TAG_PATTERN.matcher(snippet).replaceAll("");
snippet = LT_ENTITY_PATTERN.matcher(snippet).replaceAll("<");
snippet = GT_ENTITY_PATTERN.matcher(snippet).replaceAll(">");
snippet = QUOTE_ENTITY_PATTERN.matcher(snippet).replaceAll("'");
snippet = QUOT_ENTITY_PATTERN.matcher(snippet).replaceAll("\"");
} else {
snippet = '(' + getString(R.string.msg_sbc_snippet_unavailable) + ')';
}
return new SearchBookContentsResult(pageId, pageNumber, snippet, valid);
} | 3.68 |
querydsl_MapPath_getKeyType | /**
* Get the key type
*
* @return key type
*/
public Class<K> getKeyType() {
return keyType;
} | 3.68 |
hbase_ByteBufferUtils_copyFromBufferToArray | /**
* Copies specified number of bytes from given offset of 'in' ByteBuffer to the array. This
* doesn't affect the position of buffer.
* @param out output array to copy input bytebuffer to
* @param in input bytebuffer to copy from
* @param sourceOffset offset of source bytebuffer
* @param destinationOffset offset of destination array
* @param length the number of bytes to copy
*/
public static void copyFromBufferToArray(byte[] out, ByteBuffer in, int sourceOffset,
int destinationOffset, int length) {
if (in.hasArray()) {
System.arraycopy(in.array(), sourceOffset + in.arrayOffset(), out, destinationOffset, length);
} else if (UNSAFE_AVAIL) {
UnsafeAccess.copy(in, sourceOffset, out, destinationOffset, length);
} else {
ByteBuffer inDup = in.duplicate();
inDup.position(sourceOffset);
inDup.get(out, destinationOffset, length);
}
} | 3.68 |
dubbo_RpcStatus_getActive | /**
* get active.
*
* @return active
*/
public int getActive() {
return active.get();
} | 3.68 |
hbase_StoreFileInfo_setRegionCoprocessorHost | /**
* Sets the region coprocessor env.
*/
public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) {
this.coprocessorHost = coprocessorHost;
} | 3.68 |
flink_KeyMap_traverseMaps | /**
* Performs a traversal about logical the multi-map that results from the union of the given
* maps. This method does not actually build a union of the map, but traverses the hash maps
* together.
*
* @param maps The array uf maps whose union should be traversed.
* @param visitor The visitor that is called for each key and all values.
* @param touchedTag A tag that is used to mark elements that have been touched in this specific
* traversal. Each successive traversal should supply a larger value for this tag than the
* previous one.
* @param <K> The type of the map's key.
* @param <V> The type of the map's value.
*/
public static <K, V> void traverseMaps(
final KeyMap<K, V>[] maps,
final TraversalEvaluator<K, V> visitor,
final long touchedTag)
throws Exception {
// we need to work on the maps in descending size
Arrays.sort(maps, CapacityDescendingComparator.INSTANCE);
final int[] shifts = new int[maps.length];
final int[] lowBitsMask = new int[maps.length];
final int numSlots = maps[0].table.length;
final int numTables = maps.length;
// figure out how much each hash table collapses the entries
for (int i = 0; i < numTables; i++) {
shifts[i] = maps[0].log2size - maps[i].log2size;
lowBitsMask[i] = (1 << shifts[i]) - 1;
}
// go over all slots (based on the largest hash table)
for (int pos = 0; pos < numSlots; pos++) {
// for each slot, go over all tables, until the table does not have that slot any more
// for tables where multiple slots collapse into one, we visit that one when we process
// the
// latest of all slots that collapse to that one
int mask;
for (int rootTable = 0;
rootTable < numTables && ((mask = lowBitsMask[rootTable]) & pos) == mask;
rootTable++) {
// use that table to gather keys and start collecting keys from the following tables
// go over all entries of that slot in the table
Entry<K, V> entry = maps[rootTable].table[pos >> shifts[rootTable]];
while (entry != null) {
// take only entries that have not been collected as part of other tables
if (entry.touchedTag < touchedTag) {
entry.touchedTag = touchedTag;
final K key = entry.key;
final int hashCode = entry.hashCode;
visitor.startNewKey(key);
visitor.nextValue(entry.value);
addEntriesFromChain(entry.next, visitor, key, touchedTag, hashCode);
// go over the other hash tables and collect their entries for the key
for (int followupTable = rootTable + 1;
followupTable < numTables;
followupTable++) {
Entry<K, V> followupEntry =
maps[followupTable].table[pos >> shifts[followupTable]];
if (followupEntry != null) {
addEntriesFromChain(
followupEntry, visitor, key, touchedTag, hashCode);
}
}
visitor.keyDone();
}
entry = entry.next;
}
}
}
} | 3.68 |
pulsar_FunctionRuntimeManager_restartFunctionUsingPulsarAdmin | /**
* Restart the entire function or restart a single instance of the function.
*/
@VisibleForTesting
void restartFunctionUsingPulsarAdmin(Assignment assignment, String tenant, String namespace,
String functionName, boolean restartEntireFunction)
throws PulsarAdminException {
ComponentType componentType = assignment.getInstance().getFunctionMetaData()
.getFunctionDetails().getComponentType();
if (restartEntireFunction) {
if (ComponentType.SOURCE == componentType) {
this.functionAdmin.sources().restartSource(tenant, namespace, functionName);
} else if (ComponentType.SINK == componentType) {
this.functionAdmin.sinks().restartSink(tenant, namespace, functionName);
} else {
this.functionAdmin.functions().restartFunction(tenant, namespace, functionName);
}
} else {
// only restart single instance
if (ComponentType.SOURCE == componentType) {
this.functionAdmin.sources().restartSource(tenant, namespace, functionName,
assignment.getInstance().getInstanceId());
} else if (ComponentType.SINK == componentType) {
this.functionAdmin.sinks().restartSink(tenant, namespace, functionName,
assignment.getInstance().getInstanceId());
} else {
this.functionAdmin.functions().restartFunction(tenant, namespace, functionName,
assignment.getInstance().getInstanceId());
}
}
} | 3.68 |
druid_DataSourceSelectorFactory_getSelector | /**
* Get a new instance of the given selector name.
*
* @return null if the given name do not represent a DataSourceSelector
*/
public static DataSourceSelector getSelector(String name, HighAvailableDataSource highAvailableDataSource) {
for (DataSourceSelectorEnum e : DataSourceSelectorEnum.values()) {
if (e.getName().equalsIgnoreCase(name)) {
return e.newInstance(highAvailableDataSource);
}
}
return null;
} | 3.68 |
hbase_VisibilityLabelsCache_getLabelOrdinal | /**
* @param label Not null label string
* @return The ordinal for the label. The ordinal starts from 1. Returns 0 when passed a non
* existing label.
*/
@Override
public int getLabelOrdinal(String label) {
Integer ordinal = null;
this.lock.readLock().lock();
try {
ordinal = labels.get(label);
} finally {
this.lock.readLock().unlock();
}
if (ordinal != null) {
return ordinal.intValue();
}
// 0 denotes not available
return VisibilityConstants.NON_EXIST_LABEL_ORDINAL;
} | 3.68 |
hadoop_SnappyCodec_getConf | /**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
} | 3.68 |
hbase_CommonFSUtils_create | /**
* Create the specified file on the filesystem. By default, this will:
* <ol>
* <li>apply the umask in the configuration (if it is enabled)</li>
* <li>use the fs configured buffer size (or 4096 if not set)</li>
* <li>use the default replication</li>
* <li>use the default block size</li>
* <li>not track progress</li>
* </ol>
* @param fs {@link FileSystem} on which to write the file
* @param path {@link Path} to the file to write
* @param perm intial permissions
* @param overwrite Whether or not the created file should be overwritten.
* @return output stream to the created file
* @throws IOException if the file cannot be created
*/
public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
boolean overwrite) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Creating file={} with permission={}, overwrite={}", path, perm, overwrite);
}
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
} | 3.68 |
hbase_MasterObserver_postEnableReplicationPeer | /**
* Called after enable a replication peer
* @param peerId a short name that identifies the peer
*/
default void postEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
} | 3.68 |
flink_AllocatedSlot_getSlotId | /** Gets the Slot's unique ID defined by its TaskManager. */
public SlotID getSlotId() {
return new SlotID(getTaskManagerId(), physicalSlotNumber);
} | 3.68 |
flink_SharedObjects_add | /**
* Adds a new object to this {@code SharedObjects}. Although not necessary, it is recommended to
* only access the object through the returned {@link SharedReference}.
*/
public <T> SharedReference<T> add(T object) {
SharedReference<T> tag = new DefaultTag<>(id, objects.size());
objects.put(tag, object);
return tag;
} | 3.68 |
hbase_ZKUtil_createNodeIfNotExistsNoWatch | /**
* Creates the specified znode with the specified data but does not watch it. Returns the znode of
* the newly created node If there is another problem, a KeeperException will be thrown.
* @param zkw zk reference
* @param znode path of node
* @param data data of node
* @param createMode specifying whether the node to be created is ephemeral and/or sequential
* @return true name of the newly created znode or null
* @throws KeeperException if unexpected zookeeper exception
*/
public static String createNodeIfNotExistsNoWatch(ZKWatcher zkw, String znode, byte[] data,
CreateMode createMode) throws KeeperException {
try {
return zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), createMode);
} catch (KeeperException.NodeExistsException nee) {
return znode;
} catch (InterruptedException e) {
zkw.interruptedException(e);
return null;
}
} | 3.68 |
hbase_RestoreSnapshotHelper_hasRegionsToRemove | /** Returns true if there're regions to remove */
public boolean hasRegionsToRemove() {
return this.regionsToRemove != null && this.regionsToRemove.size() > 0;
} | 3.68 |
hbase_QuotaSettingsFactory_throttleUser | /**
* Throttle the specified user on the specified namespace.
* @param userName the user to throttle
* @param namespace the namespace to throttle
* @param type the type of throttling
* @param limit the allowed number of request/data per timeUnit
* @param timeUnit the limit time unit
* @param scope the scope of throttling
* @return the quota settings
*/
public static QuotaSettings throttleUser(final String userName, final String namespace,
final ThrottleType type, final long limit, final TimeUnit timeUnit, QuotaScope scope) {
return throttle(userName, null, namespace, null, type, limit, timeUnit, scope);
} | 3.68 |
flink_FutureUtils_retryOperation | /**
* Helper method which retries the provided operation in case of a failure.
*
* @param resultFuture to complete
* @param operation to retry
* @param retries until giving up
* @param retryPredicate Predicate to test whether an exception is retryable
* @param executor to run the futures
* @param <T> type of the future's result
*/
private static <T> void retryOperation(
final CompletableFuture<T> resultFuture,
final Supplier<CompletableFuture<T>> operation,
final int retries,
final Predicate<Throwable> retryPredicate,
final Executor executor) {
if (!resultFuture.isDone()) {
final CompletableFuture<T> operationFuture = operation.get();
operationFuture.whenCompleteAsync(
(t, throwable) -> {
if (throwable != null) {
if (throwable instanceof CancellationException) {
resultFuture.completeExceptionally(
new RetryException(
"Operation future was cancelled.", throwable));
} else {
throwable = ExceptionUtils.stripExecutionException(throwable);
if (!retryPredicate.test(throwable)) {
resultFuture.completeExceptionally(
new RetryException(
"Stopped retrying the operation because the error is not "
+ "retryable.",
throwable));
} else {
if (retries > 0) {
retryOperation(
resultFuture,
operation,
retries - 1,
retryPredicate,
executor);
} else {
resultFuture.completeExceptionally(
new RetryException(
"Could not complete the operation. Number of retries "
+ "has been exhausted.",
throwable));
}
}
}
} else {
resultFuture.complete(t);
}
},
executor);
resultFuture.whenComplete((t, throwable) -> operationFuture.cancel(false));
}
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_stateCancelled | /**
* Called when the state is cancelled.
* @param state the state that was cancelled.
*/
void stateCancelled(FlowState state) { } | 3.68 |
hbase_HBaseTestingUtility_assignRegion | /**
* Uses directly the assignment manager to assign the region. and waits until the specified region
* has completed assignment.
* @return true if the region is assigned false otherwise.
*/
public boolean assignRegion(final RegionInfo regionInfo)
throws IOException, InterruptedException {
final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
am.assign(regionInfo);
return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
} | 3.68 |
framework_ComboBoxElement_getValue | /**
* Return value of the combo box element.
*
* @return value of the combo box element
*/
public String getValue() {
return getInputField().getAttribute("value");
} | 3.68 |
hudi_SqlQueryBuilder_select | /**
* Creates a SELECT query.
*
* @param columns The column names to select.
* @return The new {@link SqlQueryBuilder} instance.
*/
public static SqlQueryBuilder select(String... columns) {
if (columns == null || columns.length == 0) {
throw new IllegalArgumentException("No columns provided with SELECT statement. Please mention column names or '*' to select all columns.");
}
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append("select ");
sqlBuilder.append(String.join(", ", columns));
return new SqlQueryBuilder(sqlBuilder);
} | 3.68 |
hadoop_NamenodeStatusReport_getNumDeadDatanodes | /**
* Get the number of dead nodes.
*
* @return The number of dead nodes.
*/
public int getNumDeadDatanodes() {
return this.deadDatanodes;
} | 3.68 |
hmily_ResourceIdUtils_getResourceId | /**
* Gets resource id.
*
* @param jdbcUrl the jdbc url
* @return the resource id
*/
public String getResourceId(final String jdbcUrl) {
return resourceIds.computeIfAbsent(jdbcUrl, u -> u.contains("?") ? u.substring(0, u.indexOf('?')) : u);
} | 3.68 |
hadoop_EditLogOutputStream_setCurrentLogVersion | /**
* @param logVersion The version of the current edit log
*/
public void setCurrentLogVersion(int logVersion) {
this.currentLogVersion = logVersion;
} | 3.68 |
flink_ZooKeeperUtils_getZooKeeperEnsemble | /**
* Returns the configured ZooKeeper quorum (and removes whitespace, because ZooKeeper does not
* tolerate it).
*/
public static String getZooKeeperEnsemble(Configuration flinkConf)
throws IllegalConfigurationException {
String zkQuorum = flinkConf.getValue(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM);
if (zkQuorum == null || StringUtils.isBlank(zkQuorum)) {
throw new IllegalConfigurationException("No ZooKeeper quorum specified in config.");
}
// Remove all whitespace
zkQuorum = zkQuorum.replaceAll("\\s+", "");
return zkQuorum;
} | 3.68 |
pulsar_AuthorizationService_revokePermissionAsync | /**
* Revoke authorization-action permission on a topic to the given client.
*
* @param topicName
* @param role
*/
public CompletableFuture<Void> revokePermissionAsync(TopicName topicName, String role) {
return provider.revokePermissionAsync(topicName, role);
} | 3.68 |
pulsar_ManagedLedgerInterceptor_processPayloadBeforeLedgerWrite | /**
* Intercept before payload gets written to ledger.
* @param ledgerWriteOp OpAddEntry used to trigger ledger write.
* @param dataToBeStoredInLedger data to be stored in ledger
* @return handle to the processor
*/
default PayloadProcessorHandle processPayloadBeforeLedgerWrite(OpAddEntry ledgerWriteOp,
ByteBuf dataToBeStoredInLedger){
return null;
} | 3.68 |