name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
AreaShop_CommandAreaShop_confirm | /**
* Confirm a command.
* @param sender To confirm it for, or send a message to confirm
* @param args Command args
* @param message Message to send when confirmation is required
* @return true if confirmed, false if confirmation is required
*/
public boolean confirm(CommandSender sender, String[] args, Message message) {
String command = "/" + getCommandStart() + " " + StringUtils.join(args, " ", 1, args.length);
long now = System.currentTimeMillis();
CommandTime last = lastUsed.get(sender.getName());
if(last != null && last.command.equalsIgnoreCase(command) && last.time > (now - 1000 * 60)) {
return true;
}
message.prefix().append(Message.fromKey("confirm-yes").replacements(command)).send(sender);
lastUsed.put(sender.getName(), new CommandTime(command, now));
return false;
} | 3.68 |
starts_Attribute_isCodeAttribute | /**
* Returns <code>true</code> if this type of attribute is a code attribute.
*
* @return <code>true</code> if this type of attribute is a code attribute.
*/
public boolean isCodeAttribute() {
return false;
} | 3.68 |
hudi_HoodieBackedTableMetadata_isFullScanAllowedForPartition | // NOTE: We're allowing eager full-scan of the log-files only for "files" partition.
// Other partitions (like "column_stats", "bloom_filters") will have to be fetched
// t/h point-lookups
private boolean isFullScanAllowedForPartition(String partitionName) {
switch (partitionName) {
case PARTITION_NAME_FILES:
return DEFAULT_METADATA_ENABLE_FULL_SCAN_LOG_FILES;
case PARTITION_NAME_COLUMN_STATS:
case PARTITION_NAME_BLOOM_FILTERS:
default:
return false;
}
} | 3.68 |
hadoop_FullCredentialsTokenBinding_deployUnbonded | /**
* Serve up the credentials retrieved from configuration/environment in
* {@link #loadAWSCredentials()}.
* @return a credential provider for the unbonded instance.
* @throws IOException failure to load
*/
@Override
public AWSCredentialProviderList deployUnbonded() throws IOException {
requireServiceStarted();
loadAWSCredentials();
return new AWSCredentialProviderList(
"Full Credentials Token Binding",
new MarshalledCredentialProvider(
FULL_TOKEN,
getStoreContext().getFsURI(),
getConfig(),
awsCredentials,
MarshalledCredentials.CredentialTypeRequired.AnyNonEmpty));
} | 3.68 |
flink_LocalFileSystem_getSharedInstance | /**
* Gets the shared instance of this file system.
*
* @return The shared instance of this file system.
*/
public static LocalFileSystem getSharedInstance() {
return INSTANCE;
} | 3.68 |
hbase_BalancerClusterState_updateForLocation | /**
* Common method for per host and per Location region index updates when a region is moved.
* @param serverIndexToLocation serverIndexToHostIndex or serverIndexToLocationIndex
* @param regionsPerLocation regionsPerHost or regionsPerLocation
* @param colocatedReplicaCountsPerLocation colocatedReplicaCountsPerHost or
* colocatedReplicaCountsPerRack
*/
private void updateForLocation(int[] serverIndexToLocation, int[][] regionsPerLocation,
Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int oldServer, int newServer,
int primary, int region) {
int oldLocation = oldServer >= 0 ? serverIndexToLocation[oldServer] : -1;
int newLocation = serverIndexToLocation[newServer];
if (newLocation != oldLocation) {
regionsPerLocation[newLocation] = addRegion(regionsPerLocation[newLocation], region);
colocatedReplicaCountsPerLocation[newLocation].getAndIncrement(primary);
if (oldLocation >= 0) {
regionsPerLocation[oldLocation] = removeRegion(regionsPerLocation[oldLocation], region);
colocatedReplicaCountsPerLocation[oldLocation].getAndDecrement(primary);
}
}
} | 3.68 |
flink_ModuleManager_listFunctions | /**
* Get names of all functions from used modules. It excludes hidden functions.
*
* @return a set of function names of used modules
*/
public Set<String> listFunctions() {
return usedModules.stream()
.map(name -> loadedModules.get(name).listFunctions(false))
.flatMap(Collection::stream)
.collect(Collectors.toSet());
} | 3.68 |
flink_WorksetIterationPlanNode_mergeBranchPlanMaps | /**
* Merging can only take place after the solutionSetDelta and nextWorkset PlanNode has been set,
* because they can contain also some of the branching nodes.
*/
@Override
protected void mergeBranchPlanMaps(
Map<OptimizerNode, PlanNode> branchPlan1, Map<OptimizerNode, PlanNode> branchPlan2) {} | 3.68 |
shardingsphere-elasticjob_TransactionOperation_opDelete | /**
* Operation delete.
*
* @param key key
* @return TransactionOperation
*/
public static TransactionOperation opDelete(final String key) {
return new TransactionOperation(Type.DELETE, key, null);
} | 3.68 |
flink_ManagedTableFactory_discoverManagedTableFactory | /** Discovers the unique implementation of {@link ManagedTableFactory} without identifier. */
static ManagedTableFactory discoverManagedTableFactory(ClassLoader classLoader) {
return FactoryUtil.discoverManagedTableFactory(classLoader, ManagedTableFactory.class);
} | 3.68 |
hadoop_ReadBufferManager_getNextBlockToRead | /**
* ReadBufferWorker thread calls this to get the next buffer that it should work on.
*
* @return {@link ReadBuffer}
* @throws InterruptedException if thread is interrupted
*/
ReadBuffer getNextBlockToRead() throws InterruptedException {
ReadBuffer buffer = null;
synchronized (this) {
//buffer = readAheadQueue.take(); // blocking method
while (readAheadQueue.size() == 0) {
wait();
}
buffer = readAheadQueue.remove();
notifyAll();
if (buffer == null) {
return null; // should never happen
}
buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
inProgressList.add(buffer);
}
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
buffer.getStream().getPath(), buffer.getOffset());
}
return buffer;
} | 3.68 |
hbase_StoreFileInfo_getReference | /**
* @return the Reference object associated to this StoreFileInfo. null if the StoreFile is not a
* reference.
*/
public Reference getReference() {
return this.reference;
} | 3.68 |
morf_Function_greatest | /**
* Helper method to create an instance of the "greatest" SQL function.
*
* @param fields the fields to evaluate.
* @return an instance of the "greatest" function.
*/
public static Function greatest(Iterable<? extends AliasedField> fields) {
return new Function(FunctionType.GREATEST, fields);
} | 3.68 |
hadoop_HdfsFileStatus_getLocalName | /**
* Get the string representation of the local name.
* @return the local name in string
*/
default String getLocalName() {
return DFSUtilClient.bytes2String(getLocalNameInBytes());
} | 3.68 |
flink_SlidingWindowAssigner_of | /**
* Creates a new {@code SlidingEventTimeWindows} {@link
* org.apache.flink.streaming.api.windowing.assigners.WindowAssigner} that assigns elements to
* sliding time windows based on the element timestamp.
*
* @param size The size of the generated windows.
* @param slide The slide interval of the generated windows.
* @return The time policy.
*/
public static SlidingWindowAssigner of(Duration size, Duration slide) {
return new SlidingWindowAssigner(size.toMillis(), slide.toMillis(), 0, true);
} | 3.68 |
hbase_HbckRegionInfo_loadHdfsRegioninfo | /**
* Read the .regioninfo file from the file system. If there is no .regioninfo, add it to the
* orphan hdfs region list.
*/
public void loadHdfsRegioninfo(Configuration conf) throws IOException {
Path regionDir = getHdfsRegionDir();
if (regionDir == null) {
if (getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
// Log warning only for default/ primary replica with no region dir
LOG.warn("No HDFS region dir found: " + this + " meta=" + metaEntry);
}
return;
}
if (hdfsEntry.hri != null) {
// already loaded data
return;
}
FileSystem fs = FileSystem.get(conf);
RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
LOG.debug("RegionInfo read: " + hri.toString());
hdfsEntry.hri = hri;
} | 3.68 |
flink_ExceptionUtils_rethrow | /**
* Throws the given {@code Throwable} in scenarios where the signatures do not allow you to
* throw an arbitrary Throwable. Errors and RuntimeExceptions are thrown directly, other
* exceptions are packed into a parent RuntimeException.
*
* @param t The throwable to be thrown.
* @param parentMessage The message for the parent RuntimeException, if one is needed.
*/
public static void rethrow(Throwable t, String parentMessage) {
if (t instanceof Error) {
throw (Error) t;
} else if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else {
throw new RuntimeException(parentMessage, t);
}
} | 3.68 |
flink_FileBasedOneShotLatch_await | /**
* Waits until the latch file is created.
*
* <p>When this method returns, subsequent invocations will not block even after the latch file
* is deleted. Note that this method may not return if the latch file is deleted before this
* method returns.
*
* @throws InterruptedException if interrupted while waiting
*/
public void await() throws InterruptedException {
if (isReleasedOrReleasable()) {
return;
}
awaitLatchFile(watchService);
} | 3.68 |
hadoop_BytesWritable_getBytes | /**
* Get the data backing the BytesWritable. Please use {@link #copyBytes()}
* if you need the returned array to be precisely the length of the data.
* @return The data is only valid between 0 and getLength() - 1.
*/
@Override
public byte[] getBytes() {
return bytes;
} | 3.68 |
pulsar_ClientConfiguration_getStatsIntervalSeconds | /**
* Stats will be activated with positive statsIntervalSeconds.
*
* @return the interval between each stat info <i>(default: 60 seconds)</i>
*/
public long getStatsIntervalSeconds() {
return confData.getStatsIntervalSeconds();
} | 3.68 |
flink_Description_linebreak | /** Creates a line break in the description. */
public DescriptionBuilder linebreak() {
blocks.add(LineBreakElement.linebreak());
return this;
} | 3.68 |
hbase_MetaTableAccessor_getMetaHTable | /**
* Callers should call close on the returned {@link Table} instance.
* @param connection connection we're using to access Meta
* @return An {@link Table} for <code>hbase:meta</code>
* @throws NullPointerException if {@code connection} is {@code null}
*/
public static Table getMetaHTable(final Connection connection) throws IOException {
// We used to pass whole CatalogTracker in here, now we just pass in Connection
Objects.requireNonNull(connection, "Connection cannot be null");
if (connection.isClosed()) {
throw new IOException("connection is closed");
}
return connection.getTable(TableName.META_TABLE_NAME);
} | 3.68 |
rocketmq-connect_DatabaseDialectLoader_getDatabaseDialect | /**
* Get database dialect factory
*
* @param config
* @return
*/
public static DatabaseDialect getDatabaseDialect(AbstractConfig config) {
String url = config.getConnectionDbUrl();
assert url != null;
JdbcUrlInfo jdbcUrlInfo = extractJdbcUrlInfo(url);
final List<DatabaseDialectFactory> matchingFactories =
DATABASE_DIALECT_FACTORY.stream().filter(f -> f.subProtocols().contains(jdbcUrlInfo.subprotocol())).collect(Collectors.toList());
if (matchingFactories.isEmpty()) {
throw new ConnectException(String.format("Cannot get database dialect by url [%s]", url));
}
return matchingFactories.get(0).create(config);
} | 3.68 |
flink_SinkUtils_tryAcquire | /**
* Acquire permits on the given semaphore within a given allowed timeout and deal with errors.
*
* @param permits the mumber of permits to acquire.
* @param maxConcurrentRequests the maximum number of permits the semaphore was initialized
* with.
* @param maxConcurrentRequestsTimeout the timeout to acquire the permits.
* @param semaphore the semaphore to acquire permits to.
* @throws InterruptedException if the current thread was interrupted.
* @throws TimeoutException if the waiting time elapsed before all permits were acquired.
*/
public static void tryAcquire(
int permits,
int maxConcurrentRequests,
Duration maxConcurrentRequestsTimeout,
Semaphore semaphore)
throws InterruptedException, TimeoutException {
if (!semaphore.tryAcquire(
permits, maxConcurrentRequestsTimeout.toMillis(), TimeUnit.MILLISECONDS)) {
throw new TimeoutException(
String.format(
"Failed to acquire %d out of %d permits to send value in %s.",
permits, maxConcurrentRequests, maxConcurrentRequestsTimeout));
}
} | 3.68 |
flink_KeyedStream_sum | /**
* Applies an aggregation that gives the current sum of the data stream at the given field by
* the given key. An independent aggregate is kept per key.
*
* @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public)
* field on which to perform the aggregation. Additionally, a dot can be used to drill down
* into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in
* case of a basic type (which is considered as having only one field).
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> sum(String field) {
return aggregate(new SumAggregator<>(field, getType(), getExecutionConfig()));
} | 3.68 |
pulsar_AuthorizationProvider_allowTopicPolicyOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
default Boolean allowTopicPolicyOperation(TopicName topicName,
String role,
PolicyName policy,
PolicyOperation operation,
AuthenticationDataSource authData) {
try {
return allowTopicPolicyOperationAsync(topicName, role, policy, operation, authData).get();
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
framework_VComboBox_updateSuggestionPopupMinWidth | /**
* Update minimum width for combo box textarea based on input prompt and
* suggestions.
* <p>
* For internal use only. May be removed or replaced in the future.
*/
public void updateSuggestionPopupMinWidth() {
debug("VComboBox: updateSuggestionPopupMinWidth()");
// used only to calculate minimum width
String captions = WidgetUtil.escapeHTML(inputPrompt);
for (ComboBoxSuggestion suggestion : currentSuggestions) {
// Collect captions so we can calculate minimum width for
// textarea
if (!captions.isEmpty()) {
captions += "|";
}
captions += WidgetUtil
.escapeHTML(suggestion.getReplacementString());
}
// Calculate minimum textarea width
suggestionPopupMinWidth = minWidth(captions);
} | 3.68 |
graphhopper_GraphHopperWeb_setOptimize | /**
* @param optimize "false" if the order of the locations should be left
* unchanged, this is the default. Or if "true" then the order of the
* location is optimized according to the overall best route and returned
* this way i.e. the traveling salesman problem is solved under the hood.
* Note that in this case the request takes longer and costs more credits.
* For more details see:
* https://github.com/graphhopper/directions-api/blob/master/FAQ.md#what-is-one-credit
*/
public GraphHopperWeb setOptimize(String optimize) {
this.optimize = optimize;
return this;
} | 3.68 |
flink_DefaultCheckpointPlanCalculator_calculateAfterTasksFinished | /**
* Calculates the checkpoint plan after some tasks have finished. We iterate the job graph to
* find the task that is still running, but do not has precedent running tasks.
*
* @return The plan of this checkpoint.
*/
private CheckpointPlan calculateAfterTasksFinished() {
// First collect the task running status into BitSet so that we could
// do JobVertex level judgement for some vertices and avoid time-consuming
// access to volatile isFinished flag of Execution.
Map<JobVertexID, BitSet> taskRunningStatusByVertex = collectTaskRunningStatus();
List<Execution> tasksToTrigger = new ArrayList<>();
List<Execution> tasksToWaitFor = new ArrayList<>();
List<ExecutionVertex> tasksToCommitTo = new ArrayList<>();
List<Execution> finishedTasks = new ArrayList<>();
List<ExecutionJobVertex> fullyFinishedJobVertex = new ArrayList<>();
for (ExecutionJobVertex jobVertex : jobVerticesInTopologyOrder) {
BitSet taskRunningStatus = taskRunningStatusByVertex.get(jobVertex.getJobVertexId());
if (taskRunningStatus.cardinality() == 0) {
fullyFinishedJobVertex.add(jobVertex);
for (ExecutionVertex task : jobVertex.getTaskVertices()) {
finishedTasks.add(task.getCurrentExecutionAttempt());
}
continue;
}
List<JobEdge> prevJobEdges = jobVertex.getJobVertex().getInputs();
// this is an optimization: we determine at the JobVertex level if some tasks can even
// be eligible for being in the "triggerTo" set.
boolean someTasksMustBeTriggered =
someTasksMustBeTriggered(taskRunningStatusByVertex, prevJobEdges);
for (int i = 0; i < jobVertex.getTaskVertices().length; ++i) {
ExecutionVertex task = jobVertex.getTaskVertices()[i];
if (taskRunningStatus.get(task.getParallelSubtaskIndex())) {
tasksToWaitFor.add(task.getCurrentExecutionAttempt());
tasksToCommitTo.add(task);
if (someTasksMustBeTriggered) {
boolean hasRunningPrecedentTasks =
hasRunningPrecedentTasks(
task, prevJobEdges, taskRunningStatusByVertex);
if (!hasRunningPrecedentTasks) {
tasksToTrigger.add(task.getCurrentExecutionAttempt());
}
}
} else {
finishedTasks.add(task.getCurrentExecutionAttempt());
}
}
}
return new DefaultCheckpointPlan(
Collections.unmodifiableList(tasksToTrigger),
Collections.unmodifiableList(tasksToWaitFor),
Collections.unmodifiableList(tasksToCommitTo),
Collections.unmodifiableList(finishedTasks),
Collections.unmodifiableList(fullyFinishedJobVertex),
allowCheckpointsAfterTasksFinished);
} | 3.68 |
hadoop_FederationUtil_newActiveNamenodeResolver | /**
* Creates an instance of an ActiveNamenodeResolver from the configuration.
*
* @param conf Configuration that defines the namenode resolver class.
* @param stateStore State store passed to class constructor.
* @return New active namenode resolver.
*/
public static ActiveNamenodeResolver newActiveNamenodeResolver(
Configuration conf, StateStoreService stateStore) {
Class<? extends ActiveNamenodeResolver> clazz = conf.getClass(
RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS,
RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT,
ActiveNamenodeResolver.class);
return newInstance(conf, stateStore, StateStoreService.class, clazz);
} | 3.68 |
pulsar_OneStageAuthenticationState_isComplete | /**
* @deprecated rely on result from {@link #authenticateAsync(AuthData)}. For more information, see the Javadoc
* for {@link AuthenticationState#isComplete()}.
*/
@Deprecated(since = "3.0.0")
@Override
public boolean isComplete() {
return authRole != null;
} | 3.68 |
hadoop_AbstractDTService_getOwner | /**
* Get the owner of this Service.
* @return owner; non-null after binding to an FS.
*/
public UserGroupInformation getOwner() {
return owner;
} | 3.68 |
hadoop_QuotaUsage_getSpaceConsumed | /**
* Return (disk) space consumed.
*
* @return space consumed.
*/
public long getSpaceConsumed() {
return spaceConsumed;
} | 3.68 |
hbase_KeyValue_equals | /**
* Needed doing 'contains' on List. Only compares the key portion, not the value.
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof Cell)) {
return false;
}
return CellUtil.equals(this, (Cell) other);
} | 3.68 |
framework_VScrollTable_getColKey | /**
* Returns the column key of the column.
*
* @return The column key
*/
public String getColKey() {
return cid;
} | 3.68 |
framework_ErrorIndicator_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 10046;
} | 3.68 |
hbase_RequestConverter_buildNoDataRegionActions | /**
* Create a protocol buffer multirequest with NO data for a list of actions (data is carried
* otherwise than via protobuf). This means it just notes attributes, whether to write the WAL,
* etc., and the presence in protobuf serves as place holder for the data which is coming along
* otherwise. Note that Get is different. It does not contain 'data' and is always carried by
* protobuf. We return references to the data by adding them to the passed in <code>data</code>
* param.
* <p>
* Propagates Actions original index.
* <p>
* The passed in multiRequestBuilder will be populated with region actions.
* @param regionName The region name of the actions.
* @param actions The actions that are grouped by the same region name.
* @param cells Place to stuff references to actual data.
* @param multiRequestBuilder The multiRequestBuilder to be populated with region actions.
* @param regionActionBuilder regionActionBuilder to be used to build region action.
* @param actionBuilder actionBuilder to be used to build action.
* @param mutationBuilder mutationBuilder to be used to build mutation.
* @param nonceGroup nonceGroup to be applied.
* @param indexMap Map of created RegionAction to the original index for a
* RowMutations/CheckAndMutate within the original list of actions
*/
public static void buildNoDataRegionActions(final byte[] regionName,
final Iterable<Action> actions, final List<CellScannable> cells,
final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder,
final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder,
long nonceGroup, final Map<Integer, Integer> indexMap) throws IOException {
regionActionBuilder.clear();
RegionAction.Builder builder =
getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
boolean hasNonce = false;
List<Action> rowMutationsList = new ArrayList<>();
List<Action> checkAndMutates = new ArrayList<>();
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());
mutationBuilder.clear();
if (row instanceof Get) {
Get g = (Get) row;
builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Delete) {
buildNoDataRegionAction((Delete) row, cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Append) {
buildNoDataRegionAction((Append) row, cells, action.getNonce(), builder, actionBuilder,
mutationBuilder);
hasNonce = true;
} else if (row instanceof Increment) {
buildNoDataRegionAction((Increment) row, cells, action.getNonce(), builder, actionBuilder,
mutationBuilder);
hasNonce = true;
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
org.apache.hbase.thirdparty.com.google.protobuf.ByteString value =
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations
.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
builder.addAction(actionBuilder
.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow()))
.setServiceName(exec.getMethod().getService().getFullName())
.setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) {
rowMutationsList.add(action);
} else if (row instanceof CheckAndMutate) {
checkAndMutates.add(action);
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
if (builder.getActionCount() > 0) {
multiRequestBuilder.addRegionAction(builder.build());
}
// Process RowMutations here. We can not process it in the big loop above because
// it will corrupt the sequence order maintained in cells.
// RowMutations is a set of Puts and/or Deletes all to be applied atomically
// on the one row. We do separate RegionAction for each RowMutations.
// We maintain a map to keep track of this RegionAction and the original Action index.
for (Action action : rowMutationsList) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) action.getAction(),
cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
multiRequestBuilder.addRegionAction(builder.build());
// This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
// Process CheckAndMutate here. Similar to RowMutations, we do separate RegionAction for each
// CheckAndMutate and maintain a map to keep track of this RegionAction and the original
// Action index.
for (Action action : checkAndMutates) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
CheckAndMutate cam = (CheckAndMutate) action.getAction();
builder
.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(),
cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange()));
if (cam.getAction() instanceof Put) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Put) cam.getAction(), cells, builder, actionBuilder,
mutationBuilder);
} else if (cam.getAction() instanceof Delete) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Delete) cam.getAction(), cells, builder, actionBuilder,
mutationBuilder);
} else if (cam.getAction() instanceof Increment) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Increment) cam.getAction(), cells, action.getNonce(), builder,
actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof Append) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Append) cam.getAction(), cells, action.getNonce(), builder,
actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof RowMutations) {
boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) cam.getAction(),
cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
} else {
throw new DoNotRetryIOException(
"CheckAndMutate doesn't support " + cam.getAction().getClass().getName());
}
multiRequestBuilder.addRegionAction(builder.build());
// This CheckAndMutate region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
multiRequestBuilder.setNonceGroup(nonceGroup);
}
} | 3.68 |
framework_VTree_onKeyPress | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyPressHandler#onKeyPress(com.google
* .gwt.event.dom.client.KeyPressEvent)
*/
@Override
public void onKeyPress(KeyPressEvent event) {
NativeEvent nativeEvent = event.getNativeEvent();
int keyCode = nativeEvent.getKeyCode();
if (keyCode == 0 && nativeEvent.getCharCode() == ' ') {
// Provide a keyCode for space to be compatible with FireFox
// keypress event
keyCode = CHARCODE_SPACE;
}
if (handleKeyNavigation(keyCode,
event.isControlKeyDown() || event.isMetaKeyDown(),
event.isShiftKeyDown())) {
event.preventDefault();
event.stopPropagation();
}
} | 3.68 |
hbase_IdReadWriteLockWithObjectPool_getLock | /**
* Get the ReentrantReadWriteLock corresponding to the given id
* @param id an arbitrary number to identify the lock
*/
@Override
public ReentrantReadWriteLock getLock(T id) {
lockPool.purge();
ReentrantReadWriteLock readWriteLock = lockPool.get(id);
return readWriteLock;
} | 3.68 |
dubbo_AbstractConfigManager_isRequired | /**
* The configuration that does not affect the main process is not necessary.
*
* @param clazz
* @param <T>
* @return
*/
protected <T extends AbstractConfig> boolean isRequired(Class<T> clazz) {
if (clazz == RegistryConfig.class
|| clazz == MetadataReportConfig.class
|| clazz == MonitorConfig.class
|| clazz == MetricsConfig.class
|| clazz == TracingConfig.class) {
return false;
}
return true;
} | 3.68 |
cron-utils_StringUtils_isNumeric | /**
* <p>
* Checks if the CharSequence contains only Unicode digits. A decimal point is
* not a Unicode digit and returns false.
* </p>
*
* <p>
* {@code null} will return {@code false}. An empty CharSequence (length()=0)
* will return {@code false}.
* </p>
*
* <p>
* Note that the method does not allow for a leading sign, either positive or
* negative. Also, if a String passes the numeric test, it may still generate a
* NumberFormatException when parsed by Integer.parseInt or Long.parseLong, e.g.
* if the value is outside the range for int or long respectively.
* </p>
*
* <pre>
* StringUtils.isNumeric(null) = false
* StringUtils.isNumeric("") = false
* StringUtils.isNumeric(" ") = false
* StringUtils.isNumeric("123") = true
* StringUtils.isNumeric("\u0967\u0968\u0969") = true
* StringUtils.isNumeric("12 3") = false
* StringUtils.isNumeric("ab2c") = false
* StringUtils.isNumeric("12-3") = false
* StringUtils.isNumeric("12.3") = false
* StringUtils.isNumeric("-123") = false
* StringUtils.isNumeric("+123") = false
* </pre>
*
* @param cs the CharSequence to check, may be null
* @return {@code true} if only contains digits, and is non-null
* @since 3.0 Changed signature from isNumeric(String) to
* isNumeric(CharSequence)
* @since 3.0 Changed "" to return false and not true
*/
public static boolean isNumeric(final CharSequence cs) {
if (isEmpty(cs)) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
if (!Character.isDigit(cs.charAt(i))) {
return false;
}
}
return true;
} | 3.68 |
framework_CustomField_getContent | /**
* Returns the content (UI) of the custom component.
*
* @return Component
*/
protected Component getContent() {
if (null == root) {
root = initContent();
root.setParent(this);
}
return root;
} | 3.68 |
hadoop_FlowRunRowKey_encode | /*
* (non-Javadoc)
*
* Encodes FlowRunRowKey object into a byte array with each component/field
* in FlowRunRowKey separated by Separator#QUALIFIERS. This leads to an flow
* run row key of the form clusterId!userId!flowName!flowrunId If flowRunId
* in passed FlowRunRowKey object is null (and the fields preceding it i.e.
* clusterId, userId and flowName are not null), this returns a row key
* prefix of the form clusterId!userName!flowName! flowRunId is inverted
* while encoding as it helps maintain a descending order for flow keys in
* flow run table.
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#encode(java.lang.Object)
*/
@Override
public byte[] encode(FlowRunRowKey rowKey) {
byte[] first =
Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Separator
.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(),
Separator.SPACE, Separator.TAB, Separator.QUALIFIERS));
if (rowKey.getFlowRunId() == null) {
return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
} else {
// Note that flowRunId is a long, so we can't encode them all at the
// same
// time.
byte[] second =
Bytes.toBytes(LongConverter.invertLong(rowKey.getFlowRunId()));
return Separator.QUALIFIERS.join(first, second);
}
} | 3.68 |
framework_VaadinSession_access | /**
* Provides exclusive access to this session from outside a request handling
* thread.
* <p>
* The given runnable is executed while holding the session lock to ensure
* exclusive access to this session. If this session is not locked, the lock
* will be acquired and the runnable is run right away. If this session is
* currently locked, the runnable will be run before that lock is released.
* </p>
* <p>
* RPC handlers for components inside this session do not need to use this
* method as the session is automatically locked by the framework during RPC
* handling.
* </p>
* <p>
* Please note that the runnable might be invoked on a different thread or
* later on the current thread, which means that custom thread locals might
* not have the expected values when the command is executed.
* {@link VaadinSession#getCurrent()} and {@link VaadinService#getCurrent()}
* are set according to this session before executing the command. Other
* standard CurrentInstance values such as
* {@link VaadinService#getCurrentRequest()} and
* {@link VaadinService#getCurrentResponse()} will not be defined.
* </p>
* <p>
* The returned future can be used to check for task completion and to
* cancel the task. To help avoiding deadlocks, {@link Future#get()} throws
* an exception if it is detected that the current thread holds the lock for
* some other session.
* </p>
*
* @see #lock()
* @see #getCurrent()
* @see #accessSynchronously(Runnable)
* @see UI#access(Runnable)
*
* @since 7.1
*
* @param runnable
* the runnable which accesses the session
* @return a future that can be used to check for task completion and to
* cancel the task
*/
public Future<Void> access(Runnable runnable) {
return getService().accessSession(this, runnable);
} | 3.68 |
framework_Range_combineWith | /**
* Combines two ranges to create a range containing all values in both
* ranges, provided there are no gaps between the ranges.
*
* @param other
* the range to combine with this range
*
* @return the combined range
*
* @throws IllegalArgumentException
* if the two ranges aren't connected
*/
public Range combineWith(Range other) throws IllegalArgumentException {
if (getStart() > other.getEnd() || other.getStart() > getEnd()) {
throw new IllegalArgumentException(
"There is a gap between " + this + " and " + other);
}
return Range.between(Math.min(getStart(), other.getStart()),
Math.max(getEnd(), other.getEnd()));
} | 3.68 |
hudi_StringUtils_split | /**
* Splits input string, delimited {@code delimiter} into a list of non-empty strings
* (skipping any empty string produced during splitting)
*/
public static List<String> split(@Nullable String input, String delimiter) {
if (isNullOrEmpty(input)) {
return Collections.emptyList();
}
return Stream.of(input.split(delimiter)).map(String::trim).filter(s -> !s.isEmpty()).collect(Collectors.toList());
} | 3.68 |
hbase_RSMobFileCleanerChore_archiveMobFiles | /**
* Archives the mob files.
* @param conf The current configuration.
* @param tableName The table name.
* @param family The name of the column family.
* @param storeFiles The files to be archived.
* @throws IOException exception
*/
public void archiveMobFiles(Configuration conf, TableName tableName, byte[] family,
List<Path> storeFiles) throws IOException {
if (storeFiles.size() == 0) {
// nothing to remove
LOG.debug("Skipping archiving old MOB files - no files found for table={} cf={}", tableName,
Bytes.toString(family));
return;
}
Path mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName);
FileSystem fs = storeFiles.get(0).getFileSystem(conf);
for (Path p : storeFiles) {
LOG.debug("MOB Cleaner is archiving: {}", p);
HFileArchiver.archiveStoreFile(conf, fs, MobUtils.getMobRegionInfo(tableName), mobTableDir,
family, p);
}
} | 3.68 |
framework_DefaultFieldGroupFieldFactory_createDefaultField | /**
* Fallback when no specific field has been created. Typically returns a
* TextField.
*
* @param <T>
* The type of field to create
* @param type
* The type of data that should be edited
* @param fieldType
* The type of field to create
* @return A field capable of editing the data or null if no field could be
* created
*/
@SuppressWarnings("rawtypes")
protected <T extends Field> T createDefaultField(Class<?> type,
Class<T> fieldType) {
if (fieldType.isAssignableFrom(TextField.class)) {
return fieldType.cast(createAbstractTextField(TextField.class));
}
return null;
} | 3.68 |
framework_VFilterSelect_setNextButtonActive | /**
* Should the next page button be visible to the user?
*
* @param active
*/
private void setNextButtonActive(boolean active) {
if (enableDebug) {
debug("VFS.SP: setNextButtonActive(" + active + ")");
}
if (active) {
DOM.sinkEvents(down, Event.ONCLICK);
down.setClassName(
VFilterSelect.this.getStylePrimaryName() + "-nextpage");
} else {
DOM.sinkEvents(down, 0);
down.setClassName(VFilterSelect.this.getStylePrimaryName()
+ "-nextpage-off");
}
} | 3.68 |
flink_FutureCompletingBlockingQueue_remainingCapacity | /**
* Checks the remaining capacity in the queue. That is the difference between the maximum
* capacity and the current number of elements in the queue.
*/
public int remainingCapacity() {
lock.lock();
try {
return capacity - queue.size();
} finally {
lock.unlock();
}
} | 3.68 |
hadoop_HdfsFileStatus_blocksize | /**
* Set the blocksize of this entity (default = 0).
* @param blocksize Target, default blocksize
* @return This Builder instance
*/
public Builder blocksize(long blocksize) {
this.blocksize = blocksize;
return this;
} | 3.68 |
flink_MurmurHashUtils_hashUnsafeBytesByWords | /**
* Hash unsafe bytes, length must be aligned to 4 bytes.
*
* @param base base unsafe object
* @param offset offset for unsafe object
* @param lengthInBytes length in bytes
* @return hash code
*/
public static int hashUnsafeBytesByWords(Object base, long offset, int lengthInBytes) {
return hashUnsafeBytesByWords(base, offset, lengthInBytes, DEFAULT_SEED);
} | 3.68 |
hbase_DoubleArrayCost_getMaxSkew | /**
* Return the max deviation of distribution Compute max as if all region servers had 0 and one had
* the sum of all costs. This must be a zero sum cost for this to make sense.
*/
public static double getMaxSkew(double total, double numServers) {
if (numServers == 0) {
return 0;
}
double mean = total / numServers;
return Math.sqrt((total - mean) * (total - mean) + (numServers - 1) * mean * mean);
} | 3.68 |
pulsar_ProducerConfiguration_addEncryptionKey | /**
* Add public encryption key, used by producer to encrypt the data key.
*
* At the time of producer creation, Pulsar client checks if there are keys added to encryptionKeys. If keys are
* found, a callback getKey(String keyName) is invoked against each key to load the values of the key. Application
* should implement this callback to return the key in pkcs8 format. If compression is enabled, message is encrypted
* after compression. If batch messaging is enabled, the batched message is encrypted.
*
*/
public void addEncryptionKey(String key) {
conf.getEncryptionKeys().add(key);
} | 3.68 |
framework_Upload_setAcceptMimeTypes | /**
* Sets the component's list of accepted content-types. According to RFC
* 1867, if the attribute is present, the browser might constrain the file
* patterns prompted for to match those with the corresponding appropriate
* file extensions for the platform. Good examples are: {@code image/*} or
* {@code image/png,text/plain}
*
* @param acceptMimeTypes
* comma-separated list of desired mime types to be uploaded
* @see #getAcceptMimeTypes
* @since 8.5
*/
public void setAcceptMimeTypes(String acceptMimeTypes) {
getState().acceptMimeTypes = acceptMimeTypes;
} | 3.68 |
pulsar_ProducerBuilderImpl_schema | /**
* Allow to override schema in builder implementation.
* @return
*/
public ProducerBuilder<T> schema(Schema<T> schema) {
this.schema = schema;
return this;
} | 3.68 |
rocketmq-connect_AvroData_fixedValueSizeMatch | /**
* Returns true if the fixed value size of the value matches the expected size
*/
private static boolean fixedValueSizeMatch(Schema fieldSchema,
Object value,
int size,
boolean enhancedSchemaSupport) {
if (value instanceof byte[]) {
return ((byte[]) value).length == size;
} else if (value instanceof ByteBuffer) {
return ((ByteBuffer) value).remaining() == size;
} else if (value instanceof GenericFixed) {
return unionMemberFieldName(((GenericFixed) value).getSchema(), enhancedSchemaSupport)
.equals(fieldSchema.getName());
} else {
throw new ConnectException("Invalid class for fixed, expecting GenericFixed, byte[]"
+ " or ByteBuffer but found " + value.getClass());
}
} | 3.68 |
hudi_AvroSchemaUtils_getAvroRecordQualifiedName | /**
* Generates fully-qualified name for the Avro's schema based on the Table's name
*
* NOTE: PLEASE READ CAREFULLY BEFORE CHANGING
* This method should not change for compatibility reasons as older versions
* of Avro might be comparing fully-qualified names rather than just the record
* names
*/
public static String getAvroRecordQualifiedName(String tableName) {
String sanitizedTableName = HoodieAvroUtils.sanitizeName(tableName);
return "hoodie." + sanitizedTableName + "." + sanitizedTableName + "_record";
} | 3.68 |
hbase_QuotaObserverChore_updateNamespaceQuota | /**
* Updates the hbase:quota table with the target quota policy for this <code>namespace</code> if
* necessary.
* @param namespace The namespace being checked
* @param currentSnapshot The state of the quota on this namespace from the previous invocation
* @param targetSnapshot The state the quota should be in for this namespace
* @param tablesByNamespace A mapping of tables in namespaces.
*/
void updateNamespaceQuota(String namespace, SpaceQuotaSnapshot currentSnapshot,
SpaceQuotaSnapshot targetSnapshot, final Multimap<String, TableName> tablesByNamespace)
throws IOException {
final SpaceQuotaStatus targetStatus = targetSnapshot.getQuotaStatus();
// When the policies differ, we need to move into or out of violation
if (!currentSnapshot.equals(targetSnapshot)) {
// We want to have a policy of "NONE", moving out of violation
if (!targetStatus.isInViolation()) {
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
// If there is a quota on this table in violation
if (tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
LOG.trace("Not activating Namespace violation policy because a Table violation"
+ " policy is already in effect for " + tableInNS);
}
} else {
LOG.info(tableInNS + " moving into observance of namespace space quota");
this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
}
}
// We want to move into violation at the NS level
} else {
// Moving tables in the namespace into violation or to a different violation policy
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
final SpaceQuotaSnapshot tableQuotaSnapshot =
tableSnapshotStore.getCurrentState(tableInNS);
final boolean hasTableQuota =
!Objects.equals(QuotaSnapshotStore.NO_QUOTA, tableQuotaSnapshot);
if (hasTableQuota && tableQuotaSnapshot.getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
LOG.trace("Not activating Namespace violation policy because a Table violation"
+ " policy is already in effect for " + tableInNS);
}
} else {
// No table quota present or a table quota present that is not in violation
LOG.info(tableInNS + " moving into violation of namespace space quota with policy "
+ targetStatus.getPolicy());
this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
}
}
}
// Update the new state in memory for this namespace
namespaceSnapshotStore.setCurrentState(namespace, targetSnapshot);
} else {
// Policies are the same
if (!targetStatus.isInViolation()) {
// Both are NONE, so we remain in observance
if (LOG.isTraceEnabled()) {
LOG.trace(namespace + " remains in observance of quota.");
}
} else {
// Namespace quota is still in violation, need to enact if the table quota is not
// taking priority.
for (TableName tableInNS : tablesByNamespace.get(namespace)) {
// Does a table policy exist
if (tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation()) {
// Table-level quota violation policy is being applied here.
if (LOG.isTraceEnabled()) {
LOG.trace("Not activating Namespace violation policy because Table violation"
+ " policy is already in effect for " + tableInNS);
}
} else {
// No table policy, so enact namespace policy
LOG.info(tableInNS + " moving into violation of namespace space quota");
this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
}
}
}
}
} | 3.68 |
framework_VCheckBox_getLabelElement | /**
* Gives access to the label element.
*
* @return Element of the Label itself
* @since 8.7
*/
public Element getLabelElement() {
// public to allow CheckBoxState to access it.
// FIXME: Would love to use a better way to access the label element
return getInputElement().getNextSiblingElement();
} | 3.68 |
AreaShop_FriendsFeature_getFriendNames | /**
* Get the list of friends added to this region.
* @return Friends added to this region
*/
public Set<String> getFriendNames() {
HashSet<String> result = new HashSet<>();
for(UUID friend : getFriends()) {
OfflinePlayer player = Bukkit.getOfflinePlayer(friend);
if(player != null && player.getName() != null) {
result.add(player.getName());
}
}
return result;
} | 3.68 |
hadoop_S3LogParser_eNoTrailing | /**
* An entry in the regexp.
* @param name name of the group
* @param pattern pattern to use in the regexp
* @return the pattern for the regexp
*/
private static String eNoTrailing(String name, String pattern) {
return String.format("(?<%s>%s)", name, pattern);
} | 3.68 |
hudi_WriteMetadataEvent_emptyBootstrap | /**
* Creates empty bootstrap event for task {@code taskId}.
*
* <p>The event indicates that the new instant can start directly,
* there is no old instant write statuses to recover.
*/
public static WriteMetadataEvent emptyBootstrap(int taskId) {
return WriteMetadataEvent.builder()
.taskID(taskId)
.instantTime(BOOTSTRAP_INSTANT)
.writeStatus(Collections.emptyList())
.bootstrap(true)
.build();
} | 3.68 |
flink_ResultPartitionMetrics_refreshAndGetMin | /**
* Iterates over all sub-partitions and collects the minimum number of queued buffers in a
* sub-partition in a best-effort way.
*
* @return minimum number of queued buffers per sub-partition (<tt>0</tt> if sub-partitions
* exist)
*/
int refreshAndGetMin() {
int min = Integer.MAX_VALUE;
int numSubpartitions = partition.getNumberOfSubpartitions();
if (numSubpartitions == 0) {
// meaningful value when no channels exist:
return 0;
}
for (int targetSubpartition = 0;
targetSubpartition < numSubpartitions;
++targetSubpartition) {
int size = partition.getNumberOfQueuedBuffers(targetSubpartition);
min = Math.min(min, size);
}
return min;
} | 3.68 |
flink_FileLock_inTempFolder | /**
* Initialize a FileLock using a file located inside temp folder.
*
* @param fileName The name of the locking file
* @return The initialized FileLock
*/
public static FileLock inTempFolder(String fileName) {
return new FileLock(TEMP_DIR, fileName);
} | 3.68 |
AreaShop_AreaShop_setChatprefix | /**
* Set the chatprefix to use in the chat (loaded from config normally).
* @param chatprefix The string to use in front of chat messages (supports formatting codes)
*/
public void setChatprefix(List<String> chatprefix) {
this.chatprefix = chatprefix;
} | 3.68 |
hadoop_RouterFedBalance_setDiffThreshold | /**
* Specify the threshold of diff entries.
* @param value the threshold of a fast distcp.
*/
public Builder setDiffThreshold(int value) {
this.diffThreshold = value;
return this;
} | 3.68 |
flink_StreamTask_finalize | /**
* The finalize method shuts down the timer. This is a fail-safe shutdown, in case the original
* shutdown method was never called.
*
* <p>This should not be relied upon! It will cause shutdown to happen much later than if manual
* shutdown is attempted, and cause threads to linger for longer than needed.
*/
@Override
protected void finalize() throws Throwable {
super.finalize();
if (!timerService.isTerminated()) {
LOG.info("Timer service is shutting down.");
timerService.shutdownService();
}
if (!systemTimerService.isTerminated()) {
LOG.info("System timer service is shutting down.");
systemTimerService.shutdownService();
}
cancelables.close();
} | 3.68 |
hbase_Chunk_init | /**
* Actually claim the memory for this chunk. This should only be called from the thread that
* constructed the chunk. It is thread-safe against other threads calling alloc(), who will block
* until the allocation is complete.
*/
public void init() {
assert nextFreeOffset.get() == UNINITIALIZED;
try {
allocateDataBuffer();
} catch (OutOfMemoryError e) {
boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM);
assert failInit; // should be true.
throw e;
}
// Mark that it's ready for use
// Move 4 bytes since the first 4 bytes are having the chunkid in it
boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, Bytes.SIZEOF_INT);
// We should always succeed the above CAS since only one thread
// calls init()!
Preconditions.checkState(initted, "Multiple threads tried to init same chunk");
} | 3.68 |
framework_Slider_setUpdateValueOnClick | /**
* Sets the slider to update its value when the user clicks on it. By
* default, the slider value is updated by dragging the slider's handle or
* clicking arrows.
*
* @param updateValueOnClick
* {@code true} to update the value of the slider on click,
* {@code false} otherwise.
* @since 8.8
*/
public void setUpdateValueOnClick(boolean updateValueOnClick) {
getState().updateValueOnClick = updateValueOnClick;
} | 3.68 |
hbase_MasterObserver_preGetUserPermissions | /**
* Called before getting user permissions.
* @param ctx the coprocessor instance's environment
* @param userName the user name, null if get all user permissions
* @param namespace the namespace, null if don't get namespace permission
* @param tableName the table name, null if don't get table permission
* @param family the table column family, null if don't get table family permission
* @param qualifier the table column qualifier, null if don't get table qualifier permission
* @throws IOException if something went wrong
*/
default void preGetUserPermissions(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName, String namespace, TableName tableName, byte[] family, byte[] qualifier)
throws IOException {
} | 3.68 |
hbase_Procedure_incChildrenLatch | /**
* Called by the ProcedureExecutor on procedure-load to restore the latch state
*/
protected synchronized void incChildrenLatch() {
// TODO: can this be inferred from the stack? I think so...
this.childrenLatch++;
if (LOG.isTraceEnabled()) {
LOG.trace("CHILD LATCH INCREMENT " + this.childrenLatch, new Throwable(this.toString()));
}
} | 3.68 |
framework_AbstractOrderedLayout_replaceComponent | /* Documented in superclass */
@Override
public void replaceComponent(Component oldComponent,
Component newComponent) {
// Gets the locations
int oldLocation = -1;
int newLocation = -1;
int location = 0;
for (final Component component : components) {
if (component == oldComponent) {
oldLocation = location;
}
if (component == newComponent) {
newLocation = location;
}
location++;
}
if (oldLocation == -1) {
addComponent(newComponent);
} else if (newLocation == -1) {
Alignment alignment = getComponentAlignment(oldComponent);
float expandRatio = getExpandRatio(oldComponent);
removeComponent(oldComponent);
addComponent(newComponent, oldLocation);
applyLayoutSettings(newComponent, alignment, expandRatio);
} else {
// Both old and new are in the layout
if (oldLocation > newLocation) {
components.remove(oldComponent);
components.add(newLocation, oldComponent);
components.remove(newComponent);
components.add(oldLocation, newComponent);
} else {
components.remove(newComponent);
components.add(oldLocation, newComponent);
components.remove(oldComponent);
components.add(newLocation, oldComponent);
}
markAsDirty();
}
} | 3.68 |
hmily_PropertyKeySource_getSource | /**
* Return original data.
*
* @return source
*/
public T getSource() {
return source;
} | 3.68 |
hadoop_IOStatisticsLogging_ioStatisticsSourceToString | /**
* Extract the statistics from a source object -or ""
* if it is not an instance of {@link IOStatistics},
* {@link IOStatisticsSource} or the retrieved
* statistics are null.
* <p>
* Exceptions are caught and downgraded to debug logging.
* @param source source of statistics.
* @return a string for logging.
*/
public static String ioStatisticsSourceToString(@Nullable Object source) {
try {
return ioStatisticsToString(retrieveIOStatistics(source));
} catch (RuntimeException e) {
LOG.debug("Ignoring", e);
return "";
}
} | 3.68 |
pulsar_PulsarFieldValueProviders_timeValueProvider | /**
* FieldValueProvider for Time (Data, Timestamp etc.) with indicate Null instead of longValueProvider.
*/
public static FieldValueProvider timeValueProvider(long millis, boolean isNull) {
return new FieldValueProvider() {
@Override
public long getLong() {
return millis * Timestamps.MICROSECONDS_PER_MILLISECOND;
}
@Override
public boolean isNull() {
return isNull;
}
};
} | 3.68 |
framework_DateCellDayEvent_calculateDateCellOffsetPx | /**
* @param dateOffset
* @return the amount of pixels the given date is from the left side
*/
private int calculateDateCellOffsetPx(int dateOffset) {
int dateCellOffset = 0;
int[] dateWidths = weekGrid.getDateCellWidths();
if (dateWidths != null) {
for (int i = 0; i < dateOffset; i++) {
dateCellOffset += dateWidths[i] + 1;
}
} else {
dateCellOffset = dateOffset * weekGrid.getDateCellWidth();
}
return dateCellOffset;
} | 3.68 |
hudi_HoodieSparkQuickstart_runQuickstart | /**
* Visible for testing
*/
public static void runQuickstart(JavaSparkContext jsc, SparkSession spark, String tableName, String tablePath) {
final HoodieExampleDataGenerator<HoodieAvroPayload> dataGen = new HoodieExampleDataGenerator<>();
String snapshotQuery = "SELECT begin_lat, begin_lon, driver, end_lat, end_lon, fare, partitionpath, rider, ts, uuid FROM hudi_ro_table";
Dataset<Row> insertDf = insertData(spark, jsc, tablePath, tableName, dataGen);
queryData(spark, jsc, tablePath, tableName, dataGen);
assert insertDf.except(spark.sql(snapshotQuery)).count() == 0;
Dataset<Row> snapshotBeforeUpdate = spark.sql(snapshotQuery);
Dataset<Row> updateDf = updateData(spark, jsc, tablePath, tableName, dataGen);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> snapshotAfterUpdate = spark.sql(snapshotQuery);
assert snapshotAfterUpdate.intersect(updateDf).count() == updateDf.count();
assert snapshotAfterUpdate.except(updateDf).except(snapshotBeforeUpdate).count() == 0;
incrementalQuery(spark, tablePath, tableName);
pointInTimeQuery(spark, tablePath, tableName);
Dataset<Row> snapshotBeforeDelete = snapshotAfterUpdate;
Dataset<Row> deleteDf = delete(spark, tablePath, tableName);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> snapshotAfterDelete = spark.sql(snapshotQuery);
assert snapshotAfterDelete.intersect(deleteDf).count() == 0;
assert snapshotBeforeDelete.except(deleteDf).except(snapshotAfterDelete).count() == 0;
Dataset<Row> snapshotBeforeOverwrite = snapshotAfterDelete;
Dataset<Row> overwriteDf = insertOverwriteData(spark, jsc, tablePath, tableName, dataGen);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> withoutThirdPartitionDf = snapshotBeforeOverwrite.filter("partitionpath != '" + HoodieExampleDataGenerator.DEFAULT_THIRD_PARTITION_PATH + "'");
Dataset<Row> expectedDf = withoutThirdPartitionDf.union(overwriteDf);
Dataset<Row> snapshotAfterOverwrite = spark.sql(snapshotQuery);
assert snapshotAfterOverwrite.except(expectedDf).count() == 0;
Dataset<Row> snapshotBeforeDeleteByPartition = snapshotAfterOverwrite;
deleteByPartition(spark, tablePath, tableName);
queryData(spark, jsc, tablePath, tableName, dataGen);
Dataset<Row> snapshotAfterDeleteByPartition = spark.sql(snapshotQuery);
assert snapshotAfterDeleteByPartition.intersect(snapshotBeforeDeleteByPartition.filter("partitionpath == '" + HoodieExampleDataGenerator.DEFAULT_FIRST_PARTITION_PATH + "'")).count() == 0;
assert snapshotAfterDeleteByPartition.count() == snapshotBeforeDeleteByPartition.filter("partitionpath != '" + HoodieExampleDataGenerator.DEFAULT_FIRST_PARTITION_PATH + "'").count();
} | 3.68 |
streampipes_StreamRequirementsBuilder_requiredPropertyWithNaryMapping | /**
* Sets a new property requirement and, in addition, adds a
* {@link org.apache.streampipes.model.staticproperty.MappingPropertyNary} static property to the pipeline element
* definition.
*
* @param propertyRequirement The property requirement.
* Use {@link org.apache.streampipes.sdk.helpers.EpRequirements} to
* create a new requirement.
* @param label The {@link org.apache.streampipes.sdk.helpers.Label} that defines the mapping property.
* @param propertyScope The {@link org.apache.streampipes.model.schema.PropertyScope} of the requirement.
* @return this
*/
public StreamRequirementsBuilder requiredPropertyWithNaryMapping(EventProperty propertyRequirement, Label label,
PropertyScope propertyScope) {
propertyRequirement.setRuntimeName(label.getInternalId());
this.eventProperties.add(propertyRequirement);
MappingPropertyNary mp = new MappingPropertyNary(label.getInternalId(), label
.getInternalId(), label.getLabel(), label.getDescription());
mp.setPropertyScope(propertyScope.name());
this.mappingProperties.add(mp);
return this;
} | 3.68 |
flink_SingleInputPlanNode_setDriverKeyInfo | /**
* Sets the key field information for the specified driver comparator.
*
* @param keys The key field indexes for the specified driver comparator.
* @param sortOrder The key sort order for the specified driver comparator.
* @param id The ID of the driver comparator.
*/
public void setDriverKeyInfo(FieldList keys, boolean[] sortOrder, int id) {
if (id < 0 || id >= driverKeys.length) {
throw new CompilerException(
"Invalid id for driver key information. DriverStrategy requires only "
+ super.getDriverStrategy().getNumRequiredComparators()
+ " comparators.");
}
this.driverKeys[id] = keys;
this.driverSortOrders[id] = sortOrder;
} | 3.68 |
hudi_HoodieTableMetadata_getDataTableBasePathFromMetadataTable | /**
* Returns the base path of the Dataset provided the base-path of the Metadata Table of this
* Dataset
*/
static String getDataTableBasePathFromMetadataTable(String metadataTableBasePath) {
checkArgument(isMetadataTable(metadataTableBasePath));
return metadataTableBasePath.substring(0, metadataTableBasePath.lastIndexOf(HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH) - 1);
} | 3.68 |
hbase_CompositeImmutableSegment_incMemStoreSize | /**
* Updates the heap size counter of the segment by the given delta
*/
@Override
public long incMemStoreSize(long delta, long heapOverhead, long offHeapOverhead, int cellsCount) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.68 |
shardingsphere-elasticjob_ZookeeperElectionService_stop | /**
* Stop election.
*/
public void stop() {
log.info("Elastic job: stop leadership election");
leaderLatch.countDown();
try {
leaderSelector.close();
// CHECKSTYLE:OFF
} catch (final Exception ignore) {
}
// CHECKSTYLE:ON
} | 3.68 |
pulsar_PulsarAdminImpl_functions | /**
*
* @return the functions management object
*/
public Functions functions() {
return functions;
} | 3.68 |
zxing_AbstractRSSReader_count | /**
* @param array values to sum
* @return sum of values
* @deprecated call {@link MathUtils#sum(int[])}
*/
@Deprecated
protected static int count(int[] array) {
return MathUtils.sum(array);
} | 3.68 |
rocketmq-connect_WorkerDirectTask_resume | /**
* Resume consumption of messages from previously paused Partition.
*
* @param partitions the partition list to be resume.
*/
@Override
public void resume(List<RecordPartition> partitions) {
// no-op
} | 3.68 |
pulsar_OffloadersCache_getOrLoadOffloaders | /**
* Method to load an Offloaders directory or to get an already loaded Offloaders directory.
*
* @param offloadersPath - the directory to search the offloaders nar files
* @param narExtractionDirectory - the directory to use for extraction
* @return the loaded offloaders class
* @throws IOException when fail to retrieve the pulsar offloader class
*/
public Offloaders getOrLoadOffloaders(String offloadersPath, String narExtractionDirectory) {
return loadedOffloaders.computeIfAbsent(offloadersPath,
(directory) -> {
try {
return OffloaderUtils.searchForOffloaders(directory, narExtractionDirectory);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
} | 3.68 |
hbase_Bytes_searchDelimiterIndex | /**
* Find index of passed delimiter.
* @return Index of delimiter having started from start of <code>b</code> moving rightward.
*/
public static int searchDelimiterIndex(final byte[] b, int offset, final int length,
final int delimiter) {
if (b == null) {
throw new IllegalArgumentException("Passed buffer is null");
}
int result = -1;
for (int i = offset; i < length + offset; i++) {
if (b[i] == delimiter) {
result = i;
break;
}
}
return result;
} | 3.68 |
rocketmq-connect_PluginUtils_shouldLoadInIsolation | /**
* Return whether the class with the given name should be loaded in isolation using a plugin
* classloader.
*
* @param name the fully qualified name of the class.
* @return true if this class should be loaded in isolation, false otherwise.
*/
public static boolean shouldLoadInIsolation(String name) {
return !(BLACKLIST.matcher(name).matches() && !INCLUDE.matcher(name).matches());
} | 3.68 |
hmily_CuratorZookeeperClient_pull | /**
* Pull input stream.
*
* @param path the path
* @return the input stream
*/
public InputStream pull(final String path) {
String content = get(path);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("zookeeper content {}", content);
}
if (StringUtils.isBlank(content)) {
return null;
}
return new ByteArrayInputStream(content.getBytes());
} | 3.68 |
flink_PythonOperatorChainingOptimizer_of | /** No chaining happens. */
public static ChainInfo of(Transformation<?> newTransformation) {
return new ChainInfo(newTransformation, Collections.emptyList());
} | 3.68 |
hadoop_TimelineEntity_setEntityId | /**
* Set the entity Id
*
* @param entityId
* the entity Id
*/
public void setEntityId(String entityId) {
this.entityId = entityId;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_persistSnapshotSizesForNamespaces | /**
* Writes the size used by snapshots for each namespace to the quota table.
*/
void persistSnapshotSizesForNamespaces(Map<String, Long> snapshotSizesByNamespace)
throws IOException {
try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
quotaTable.put(snapshotSizesByNamespace.entrySet().stream()
.map(e -> QuotaTableUtil.createPutForNamespaceSnapshotSize(e.getKey(), e.getValue()))
.collect(Collectors.toList()));
}
} | 3.68 |
zxing_CaptureActivity_handleDecode | /**
* A valid barcode has been found, so give an indication of success and show the results.
*
* @param rawResult The contents of the barcode.
* @param scaleFactor amount by which thumbnail was scaled
* @param barcode A greyscale bitmap of the camera data which was decoded.
*/
public void handleDecode(Result rawResult, Bitmap barcode, float scaleFactor) {
inactivityTimer.onActivity();
lastResult = rawResult;
ResultHandler resultHandler = ResultHandlerFactory.makeResultHandler(this, rawResult);
boolean fromLiveScan = barcode != null;
if (fromLiveScan) {
historyManager.addHistoryItem(rawResult, resultHandler);
// Then not from history, so beep/vibrate and we have an image to draw on
beepManager.playBeepSoundAndVibrate();
drawResultPoints(barcode, scaleFactor, rawResult);
}
switch (source) {
case NATIVE_APP_INTENT:
case PRODUCT_SEARCH_LINK:
handleDecodeExternally(rawResult, resultHandler, barcode);
break;
case ZXING_LINK:
if (scanFromWebPageManager == null || !scanFromWebPageManager.isScanFromWebPage()) {
handleDecodeInternally(rawResult, resultHandler, barcode);
} else {
handleDecodeExternally(rawResult, resultHandler, barcode);
}
break;
case NONE:
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this);
if (fromLiveScan && prefs.getBoolean(PreferencesActivity.KEY_BULK_MODE, false)) {
Toast.makeText(getApplicationContext(),
getResources().getString(R.string.msg_bulk_mode_scanned) + " (" + rawResult.getText() + ')',
Toast.LENGTH_SHORT).show();
maybeSetClipboard(resultHandler);
// Wait a moment or else it will scan the same barcode continuously about 3 times
restartPreviewAfterDelay(BULK_MODE_SCAN_DELAY_MS);
} else {
handleDecodeInternally(rawResult, resultHandler, barcode);
}
break;
}
} | 3.68 |
flink_AfterMatchSkipStrategy_skipToFirst | /**
* Discards every partial match that started before the first event of emitted match mapped to
* *PatternName*.
*
* @param patternName the pattern name to skip to
* @return the created AfterMatchSkipStrategy
*/
public static SkipToFirstStrategy skipToFirst(String patternName) {
return new SkipToFirstStrategy(patternName, false);
} | 3.68 |
morf_AbstractSqlDialectTest_provideDatabaseType | /**
* This method can be overridden in specific dialects to test DialectSpecificHint in each dialect
* @return a mock database type identifier value or an overridden, dialect specific, database type identfier
*/
protected String provideDatabaseType() {
return "SOME_DATABASE_IDENTIFIER";
} | 3.68 |
framework_VMenuBar_itemOver | /**
* When the user hovers the mouse over the item.
*
* @param item
*/
public void itemOver(CustomMenuItem item) {
if ((openRootOnHover || subMenu || menuVisible)
&& !item.isSeparator()) {
setSelected(item);
if (!subMenu && openRootOnHover && !menuVisible) {
menuVisible = true; // start opening menus
LazyCloser.prepare(this);
}
}
if (menuVisible && visibleChildMenu != item.getSubMenu()
&& popup != null) {
// #15255 - disable animation-in/out when hide in this case
popup.hide(false, false, false);
}
if (menuVisible && item.getSubMenu() != null
&& visibleChildMenu != item.getSubMenu()) {
showChildMenu(item);
}
} | 3.68 |
hadoop_AbstractReservationSystem_getPlanFollowerTimeStep | /**
* @return the planStepSize
*/
@Override
public long getPlanFollowerTimeStep() {
readLock.lock();
try {
return planStepSize;
} finally {
readLock.unlock();
}
} | 3.68 |
hadoop_AzureBlobFileSystemStore_populateAbfsClientContext | /**
* Populate a new AbfsClientContext instance with the desired properties.
*
* @return an instance of AbfsClientContext.
*/
private AbfsClientContext populateAbfsClientContext() {
return new AbfsClientContextBuilder()
.withExponentialRetryPolicy(
new ExponentialRetryPolicy(abfsConfiguration))
.withAbfsCounters(abfsCounters)
.withAbfsPerfTracker(abfsPerfTracker)
.build();
} | 3.68 |
framework_ExtremelyLongPushTime_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
super.setup(request);
duration.setConvertedValue(DURATION_MS);
interval.setConvertedValue(INTERVAL_MS);
dataSize.setConvertedValue(PAYLOAD_SIZE);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.