name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ResourceMappings_getAssignedResources | /**
* Get all resource mappings.
* @param resourceType resourceType
* @return map of resource mapping
*/
public List<Serializable> getAssignedResources(String resourceType) {
AssignedResources ar = assignedResourcesMap.get(resourceType);
if (null == ar) {
return Collections.emptyList();
}
return ar.getAssignedResources();
} | 3.68 |
hudi_BulkInsertWriteFunction_endInput | /**
* End input action for batch source.
*/
public void endInput() {
initWriterHelperIfNeeded();
final List<WriteStatus> writeStatus = this.writerHelper.getWriteStatuses(this.taskID);
final WriteMetadataEvent event = WriteMetadataEvent.builder()
.taskID(taskID)
.instantTime(this.writerHelper.getInstantTime())
.writeStatus(writeStatus)
.lastBatch(true)
.endInput(true)
.build();
this.eventGateway.sendEventToCoordinator(event);
} | 3.68 |
hbase_MasterObserver_preModifyTableStoreFileTracker | /**
* Called prior to modifying a table's store file tracker. Called as part of modify table store
* file tracker RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
* @param dstSFT the store file tracker
* @return the store file tracker
*/
default String preModifyTableStoreFileTracker(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName,
String dstSFT) throws IOException {
return dstSFT;
} | 3.68 |
flink_WebMonitorUtils_loadWebSubmissionExtension | /**
* Loads the {@link WebMonitorExtension} which enables web submission.
*
* @param leaderRetriever to retrieve the leader
* @param timeout for asynchronous requests
* @param responseHeaders for the web submission handlers
* @param localAddressFuture of the underlying REST server endpoint
* @param uploadDir where the web submission handler store uploaded jars
* @param executor to run asynchronous operations
* @param configuration used to instantiate the web submission extension
* @return Web submission extension
* @throws FlinkException if the web submission extension could not be loaded
*/
public static WebMonitorExtension loadWebSubmissionExtension(
GatewayRetriever<? extends DispatcherGateway> leaderRetriever,
Time timeout,
Map<String, String> responseHeaders,
CompletableFuture<String> localAddressFuture,
java.nio.file.Path uploadDir,
Executor executor,
Configuration configuration)
throws FlinkException {
if (isFlinkRuntimeWebInClassPath()) {
try {
final Constructor<?> webSubmissionExtensionConstructor =
Class.forName("org.apache.flink.runtime.webmonitor.WebSubmissionExtension")
.getConstructor(
Configuration.class,
GatewayRetriever.class,
Map.class,
CompletableFuture.class,
java.nio.file.Path.class,
Executor.class,
Time.class);
return (WebMonitorExtension)
webSubmissionExtensionConstructor.newInstance(
configuration,
leaderRetriever,
responseHeaders,
localAddressFuture,
uploadDir,
executor,
timeout);
} catch (ClassNotFoundException
| NoSuchMethodException
| InstantiationException
| InvocationTargetException
| IllegalAccessException e) {
throw new FlinkException("Could not load web submission extension.", e);
}
} else {
throw new FlinkException(
"The module flink-runtime-web could not be found in the class path. Please add "
+ "this jar in order to enable web based job submission.");
}
} | 3.68 |
flink_FileInputFormat_createInputSplits | /**
* Computes the input splits for the file. By default, one file block is one split. If more
* splits are requested than blocks are available, then a split may be a fraction of a block and
* splits may cross block boundaries.
*
* @param minNumSplits The minimum desired number of file splits.
* @return The computed file splits.
* @see org.apache.flink.api.common.io.InputFormat#createInputSplits(int)
*/
@Override
public FileInputSplit[] createInputSplits(int minNumSplits) throws IOException {
if (minNumSplits < 1) {
throw new IllegalArgumentException("Number of input splits has to be at least 1.");
}
// take the desired number of splits into account
minNumSplits = Math.max(minNumSplits, this.numSplits);
final List<FileInputSplit> inputSplits = new ArrayList<FileInputSplit>(minNumSplits);
// get all the files that are involved in the splits
List<FileStatus> files = new ArrayList<>();
long totalLength = 0;
for (Path path : getFilePaths()) {
final FileSystem fs = path.getFileSystem();
final FileStatus pathFile = fs.getFileStatus(path);
if (pathFile.isDir()) {
totalLength += addFilesInDir(path, files, true);
} else {
testForUnsplittable(pathFile);
files.add(pathFile);
totalLength += pathFile.getLen();
}
}
// returns if unsplittable
if (unsplittable) {
int splitNum = 0;
for (final FileStatus file : files) {
final FileSystem fs = file.getPath().getFileSystem();
final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, file.getLen());
Set<String> hosts = new HashSet<String>();
for (BlockLocation block : blocks) {
hosts.addAll(Arrays.asList(block.getHosts()));
}
long len = file.getLen();
if (testForUnsplittable(file)) {
len = READ_WHOLE_SPLIT_FLAG;
}
FileInputSplit fis =
new FileInputSplit(
splitNum++,
file.getPath(),
0,
len,
hosts.toArray(new String[hosts.size()]));
inputSplits.add(fis);
}
return inputSplits.toArray(new FileInputSplit[inputSplits.size()]);
}
final long maxSplitSize =
totalLength / minNumSplits + (totalLength % minNumSplits == 0 ? 0 : 1);
// now that we have the files, generate the splits
int splitNum = 0;
for (final FileStatus file : files) {
final FileSystem fs = file.getPath().getFileSystem();
final long len = file.getLen();
final long blockSize = file.getBlockSize();
final long minSplitSize;
if (this.minSplitSize <= blockSize) {
minSplitSize = this.minSplitSize;
} else {
if (LOG.isWarnEnabled()) {
LOG.warn(
"Minimal split size of "
+ this.minSplitSize
+ " is larger than the block size of "
+ blockSize
+ ". Decreasing minimal split size to block size.");
}
minSplitSize = blockSize;
}
final long splitSize = Math.max(minSplitSize, Math.min(maxSplitSize, blockSize));
final long halfSplit = splitSize >>> 1;
final long maxBytesForLastSplit = (long) (splitSize * MAX_SPLIT_SIZE_DISCREPANCY);
if (len > 0) {
// get the block locations and make sure they are in order with respect to their
// offset
final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, len);
Arrays.sort(blocks);
long bytesUnassigned = len;
long position = 0;
int blockIndex = 0;
while (bytesUnassigned > maxBytesForLastSplit) {
// get the block containing the majority of the data
blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
// create a new split
FileInputSplit fis =
new FileInputSplit(
splitNum++,
file.getPath(),
position,
splitSize,
blocks[blockIndex].getHosts());
inputSplits.add(fis);
// adjust the positions
position += splitSize;
bytesUnassigned -= splitSize;
}
// assign the last split
if (bytesUnassigned > 0) {
blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
final FileInputSplit fis =
new FileInputSplit(
splitNum++,
file.getPath(),
position,
bytesUnassigned,
blocks[blockIndex].getHosts());
inputSplits.add(fis);
}
} else {
// special case with a file of zero bytes size
final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, 0);
String[] hosts;
if (blocks.length > 0) {
hosts = blocks[0].getHosts();
} else {
hosts = new String[0];
}
final FileInputSplit fis =
new FileInputSplit(splitNum++, file.getPath(), 0, 0, hosts);
inputSplits.add(fis);
}
}
return inputSplits.toArray(new FileInputSplit[inputSplits.size()]);
} | 3.68 |
hadoop_SuccessData_getCommitter | /**
* @return committer name.
*/
public String getCommitter() {
return committer;
} | 3.68 |
flink_RestServerEndpointConfiguration_getSslHandlerFactory | /**
* Returns the {@link SSLEngine} that the REST server endpoint should use.
*
* @return SSLEngine that the REST server endpoint should use, or null if SSL was disabled
*/
@Nullable
public SSLHandlerFactory getSslHandlerFactory() {
return sslHandlerFactory;
} | 3.68 |
querydsl_Expressions_simplePath | /**
* Create a new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T> SimplePath<T> simplePath(Class<? extends T> type, PathMetadata metadata) {
return new SimplePath<T>(type, metadata);
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_createSecretMananger | /**
* Create a secret manager.
* @return a secret manager.
* @throws IOException on failure
*/
protected SecretManager<AbstractS3ATokenIdentifier> createSecretMananger()
throws IOException {
return new TokenSecretManager();
} | 3.68 |
flink_DataSetUtils_countElementsPerPartition | /**
* Method that goes over all the elements in each partition in order to retrieve the total
* number of elements.
*
* @param input the DataSet received as input
* @return a data set containing tuples of subtask index, number of elements mappings.
*/
public static <T> DataSet<Tuple2<Integer, Long>> countElementsPerPartition(DataSet<T> input) {
return input.mapPartition(
new RichMapPartitionFunction<T, Tuple2<Integer, Long>>() {
@Override
public void mapPartition(
Iterable<T> values, Collector<Tuple2<Integer, Long>> out)
throws Exception {
long counter = 0;
for (T value : values) {
counter++;
}
out.collect(
new Tuple2<>(getRuntimeContext().getIndexOfThisSubtask(), counter));
}
});
} | 3.68 |
hbase_HRegionLocation_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null) {
return false;
}
if (!(o instanceof HRegionLocation)) {
return false;
}
return this.compareTo((HRegionLocation) o) == 0;
} | 3.68 |
rocketmq-connect_DeadLetterQueueConfig_dlqTopicWriteQueueNums | /**
* get dlq topic write queue nums
*
* @return
*/
public Integer dlqTopicWriteQueueNums() {
return config.getInt(DLQ_TOPIC_WRITE_QUEUE_NUMS, DLQ_TOPIC_WRITE_QUEUE_NUMS_DEFAULT);
} | 3.68 |
flink_MetricStore_getTaskManagerMetricStore | /**
* Returns the {@link TaskManagerMetricStore} for the given taskmanager ID.
*
* @param tmID taskmanager ID
* @return TaskManagerMetricStore for the given ID, or null if no store for the given argument
* exists
*/
public synchronized TaskManagerMetricStore getTaskManagerMetricStore(String tmID) {
return tmID == null ? null : TaskManagerMetricStore.unmodifiable(taskManagers.get(tmID));
} | 3.68 |
framework_VFilterSelect_isScrollActive | /**
* Returns true if the scroll is active on the menu element or if the
* menu currently displays the last page with less items then the
* maximum visibility (in which case the scroll is not active, but the
* scroll is active for any other page in general).
*
* @since 7.2.6
*/
@Override
public boolean isScrollActive() {
String height = getElement().getStyle().getHeight();
String preferredHeight = getPreferredHeight(pageLength);
return !(height == null || height.isEmpty()
|| height.equals(preferredHeight));
} | 3.68 |
framework_VDebugWindow_setFontSize | /**
* Sets the font size in use.
*
* @param size
*/
private void setFontSize(int size) {
removeStyleDependentName("size" + fontSize);
fontSize = size;
addStyleDependentName("size" + size);
} | 3.68 |
hbase_AuthenticationFilterInitializer_initFilter | /**
* Initializes hadoop-auth AuthenticationFilter.
* <p>
* Propagates to hadoop-auth AuthenticationFilter configuration all Hadoop configuration
* properties prefixed with "hadoop.http.authentication."
* @param container The filter container
* @param conf Configuration for run-time parameters
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
container.addFilter("authentication", AuthenticationFilter.class.getName(), filterConfig);
} | 3.68 |
Activiti_AstFunction_invoke | /**
* Invoke method.
* @param bindings
* @param context
* @param base
* @param method
* @return method result
* @throws InvocationTargetException
* @throws IllegalAccessException
*/
protected Object invoke(
Bindings bindings,
ELContext context,
Object base,
Method method
) throws InvocationTargetException, IllegalAccessException {
Class<?>[] types = method.getParameterTypes();
Object[] params = null;
if (types.length > 0) {
params = new Object[types.length];
if (varargs && method.isVarArgs()) {
for (int i = 0; i < params.length - 1; i++) {
Object param = getParam(i).eval(bindings, context);
if (param != null || types[i].isPrimitive()) {
params[i] = bindings.convert(param, types[i]);
}
}
int varargIndex = types.length - 1;
Class<?> varargType = types[varargIndex].getComponentType();
int length = getParamCount() - varargIndex;
Object array = null;
if (length == 1) { // special: eventually use argument as is
Object param = getParam(varargIndex)
.eval(bindings, context);
if (param != null && param.getClass().isArray()) {
if (types[varargIndex].isInstance(param)) {
array = param;
} else { // coerce array elements
length = Array.getLength(param);
array = Array.newInstance(varargType, length);
for (int i = 0; i < length; i++) {
Object elem = Array.get(param, i);
if (elem != null || varargType.isPrimitive()) {
Array.set(
array,
i,
bindings.convert(elem, varargType)
);
}
}
}
} else { // single element array
array = Array.newInstance(varargType, 1);
if (param != null || varargType.isPrimitive()) {
Array.set(
array,
0,
bindings.convert(param, varargType)
);
}
}
} else {
array = Array.newInstance(varargType, length);
for (int i = 0; i < length; i++) {
Object param = getParam(varargIndex + i)
.eval(bindings, context);
if (param != null || varargType.isPrimitive()) {
Array.set(
array,
i,
bindings.convert(param, varargType)
);
}
}
}
params[varargIndex] = array;
} else {
for (int i = 0; i < params.length; i++) {
Object param = getParam(i).eval(bindings, context);
if (param != null || types[i].isPrimitive()) {
params[i] = bindings.convert(param, types[i]);
}
}
}
}
return method.invoke(base, params);
} | 3.68 |
flink_KvStateLocation_unregisterKvState | /**
* Registers a KvState instance for the given key group index.
*
* @param keyGroupRange Key group range to unregister.
* @throws IndexOutOfBoundsException If key group range start < 0 or key group range end >=
* Number of key groups
* @throws IllegalArgumentException If no location information registered for a key group index
* in the range.
*/
void unregisterKvState(KeyGroupRange keyGroupRange) {
if (keyGroupRange.getStartKeyGroup() < 0
|| keyGroupRange.getEndKeyGroup() >= numKeyGroups) {
throw new IndexOutOfBoundsException("Key group index");
}
for (int kgIdx = keyGroupRange.getStartKeyGroup();
kgIdx <= keyGroupRange.getEndKeyGroup();
++kgIdx) {
if (kvStateIds[kgIdx] == null || kvStateAddresses[kgIdx] == null) {
throw new IllegalArgumentException(
"Not registered. Probably registration/unregistration race.");
}
numRegisteredKeyGroups--;
kvStateIds[kgIdx] = null;
kvStateAddresses[kgIdx] = null;
}
} | 3.68 |
hbase_BaseSourceImpl_incCounters | /**
* Increment a named counter by some value.
* @param key the name of the counter
* @param delta the ammount to increment
*/
@Override
public void incCounters(String key, long delta) {
MutableFastCounter counter = metricsRegistry.getCounter(key, 0L);
counter.incr(delta);
} | 3.68 |
framework_DataCommunicator_addActiveData | /**
* Adds given objects as currently active objects.
*
* @param dataObjects
* collection of new active data objects
*/
public void addActiveData(Stream<T> dataObjects) {
dataObjects.map(getKeyMapper()::key)
.filter(key -> !activeData.contains(key))
.forEach(activeData::add);
} | 3.68 |
flink_StateTable_transform | /**
* Applies the given {@link StateTransformationFunction} to the state (1st input argument),
* using the given value as second input argument. The result of {@link
* StateTransformationFunction#apply(Object, Object)} is then stored as the new state. This
* function is basically an optimization for get-update-put pattern.
*
* @param namespace the namespace. Not null.
* @param value the value to use in transforming the state. Can be null.
* @param transformation the transformation function.
* @throws Exception if some exception happens in the transformation function.
*/
public <T> void transform(
N namespace, T value, StateTransformationFunction<S, T> transformation)
throws Exception {
K key = keyContext.getCurrentKey();
checkKeyNamespacePreconditions(key, namespace);
int keyGroup = keyContext.getCurrentKeyGroupIndex();
getMapForKeyGroup(keyGroup).transform(key, namespace, value, transformation);
} | 3.68 |
hadoop_MawoConfiguration_getZKAcl | /**
* Get ZooKeeper Acls.
* @return value of ZooKeeper.acl
*/
public String getZKAcl() {
return configsMap.get(ZK_ACL);
} | 3.68 |
hadoop_ApplicationMaster_init | /**
* Parse command line options.
*
* @param args Command line args
* @return Whether init successful and run should be invoked
* @throws ParseException on error while parsing options
*/
public boolean init(String[] args) throws ParseException {
Options opts = new Options();
AMOptions.setOptions(opts);
CommandLine cliParser = new GnuParser().parse(opts, args);
if (args.length == 0) {
printUsage(opts);
throw new IllegalArgumentException(
"No args specified for application master to initialize");
}
if (cliParser.hasOption("help")) {
printUsage(opts);
return false;
}
Map<String, String> envs = System.getenv();
remoteStoragePath = new Path(
envs.get(DynoConstants.REMOTE_STORAGE_PATH_ENV));
applicationAcls = new HashMap<>();
applicationAcls.put(ApplicationAccessType.VIEW_APP,
envs.get(DynoConstants.JOB_ACL_VIEW_ENV));
launchingUser = envs.get(Environment.USER.name());
if (envs.containsKey(DynoConstants.REMOTE_NN_RPC_ADDR_ENV)) {
launchNameNode = false;
namenodeServiceRpcAddress = envs
.get(DynoConstants.REMOTE_NN_RPC_ADDR_ENV);
} else {
launchNameNode = true;
// namenodeServiceRpcAddress will be set in run() once properties are
// available
}
ContainerId containerId =
ContainerId.fromString(envs.get(Environment.CONTAINER_ID.name()));
ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
LOG.info("Application master for app: appId={}, clusterTimestamp={}, "
+ "attemptId={}", appAttemptID.getApplicationId().getId(),
appAttemptID.getApplicationId().getClusterTimestamp(),
appAttemptID.getAttemptId());
amOptions = AMOptions.initFromParser(cliParser);
return true;
} | 3.68 |
hbase_SchemaLocking_getMetaLock | /**
* @deprecated only used for {@link RecoverMetaProcedure}. Should be removed along with
* {@link RecoverMetaProcedure}.
*/
@Deprecated
LockAndQueue getMetaLock() {
return metaLock;
} | 3.68 |
hbase_HttpServer_defineFilter | /**
* Define a filter for a context and set up default url mappings.
*/
public static void defineFilter(ServletContextHandler handler, String name, String classname,
Map<String, String> parameters, String[] urls) {
FilterHolder holder = new FilterHolder();
holder.setName(name);
holder.setClassName(classname);
if (parameters != null) {
holder.setInitParameters(parameters);
}
FilterMapping fmap = new FilterMapping();
fmap.setPathSpecs(urls);
fmap.setDispatches(FilterMapping.ALL);
fmap.setFilterName(name);
handler.getServletHandler().addFilter(holder, fmap);
} | 3.68 |
hbase_ServerManager_checkIsDead | /**
* Called when RegionServer first reports in for duty and thereafter each time it heartbeats to
* make sure it is has not been figured for dead. If this server is on the dead list, reject it
* with a YouAreDeadException. If it was dead but came back with a new start code, remove the old
* entry from the dead list.
* @param what START or REPORT
*/
private void checkIsDead(final ServerName serverName, final String what)
throws YouAreDeadException {
if (this.deadservers.isDeadServer(serverName)) {
// Exact match: host name, port and start code all match with existing one of the
// dead servers. So, this server must be dead. Tell it to kill itself.
String message =
"Server " + what + " rejected; currently processing " + serverName + " as dead server";
LOG.debug(message);
throw new YouAreDeadException(message);
}
// Remove dead server with same hostname and port of newly checking in rs after master
// initialization. See HBASE-5916 for more information.
if (
(this.master == null || this.master.isInitialized())
&& this.deadservers.cleanPreviousInstance(serverName)
) {
// This server has now become alive after we marked it as dead.
// We removed it's previous entry from the dead list to reflect it.
LOG.debug("{} {} came back up, removed it from the dead servers list", what, serverName);
}
} | 3.68 |
hadoop_LocatedFileStatusFetcher_registerInvalidInputError | /**
* Collect misconfigured Input errors. Errors while actually reading file info
* are reported immediately.
*/
private void registerInvalidInputError(List<IOException> errors) {
synchronized (this) {
this.invalidInputErrors.addAll(errors);
}
} | 3.68 |
flink_PythonDependencyUtils_configurePythonDependencies | /**
* Adds python dependencies to registered cache file list according to given configuration and
* returns a new configuration which contains the metadata of the registered python
* dependencies.
*
* @param cachedFiles The list used to store registered cached files.
* @param config The configuration which contains python dependency configuration.
* @return A new configuration which contains the metadata of the registered python
* dependencies.
*/
public static Configuration configurePythonDependencies(
List<Tuple2<String, DistributedCache.DistributedCacheEntry>> cachedFiles,
ReadableConfig config) {
final PythonDependencyManager pythonDependencyManager =
new PythonDependencyManager(cachedFiles, config);
final Configuration pythonDependencyConfig = new Configuration();
pythonDependencyManager.applyToConfiguration(pythonDependencyConfig);
return pythonDependencyConfig;
} | 3.68 |
hbase_AuthManager_updateTableCache | /**
* Updates the internal table permissions cache for specified table.
* @param table updated table name
* @param tablePerms new table permissions
*/
private void updateTableCache(TableName table, ListMultimap<String, Permission> tablePerms) {
PermissionCache<TablePermission> cacheToUpdate =
tableCache.getOrDefault(table, new PermissionCache<>());
clearCache(cacheToUpdate);
updateCache(tablePerms, cacheToUpdate);
tableCache.put(table, cacheToUpdate);
mtime.incrementAndGet();
} | 3.68 |
hbase_RpcExecutor_getQueueLength | /** Returns the length of the pending queue */
public int getQueueLength() {
int length = 0;
for (final BlockingQueue<CallRunner> queue : queues) {
length += queue.size();
}
return length;
} | 3.68 |
flink_ArrayColumnReader_fetchNextValue | /**
* Reads a single value from parquet page, puts it into lastValue. Returns a boolean indicating
* if there is more values to read (true).
*
* @param type the element type of array
* @return boolean
*/
private boolean fetchNextValue(LogicalType type) {
int left = readPageIfNeed();
if (left > 0) {
// get the values of repetition and definitionLevel
readRepetitionAndDefinitionLevels();
// read the data if it isn't null
if (definitionLevel == maxDefLevel) {
if (isCurrentPageDictionaryEncoded) {
lastValue = dataColumn.readValueDictionaryId();
} else {
lastValue = readPrimitiveTypedRow(type);
}
} else {
lastValue = null;
}
return true;
} else {
eof = true;
return false;
}
} | 3.68 |
hbase_WALEdit_createRegionEventWALEdit | /**
* @return A meta Marker WALEdit that has a single Cell whose value is the passed in
* <code>regionEventDesc</code> serialized and whose row is this region, columnfamily is
* {@link #METAFAMILY} and qualifier is {@link #REGION_EVENT_PREFIX} +
* {@link RegionEventDescriptor#getEventType()}; for example
* HBASE::REGION_EVENT::REGION_CLOSE.
*/
public static WALEdit createRegionEventWALEdit(RegionInfo hri,
RegionEventDescriptor regionEventDesc) {
return createRegionEventWALEdit(getRowForRegion(hri), regionEventDesc);
} | 3.68 |
graphhopper_LineIntIndex_findEdgeIdsInNeighborhood | /**
* This method collects edge ids from the neighborhood of a point and puts them into foundEntries.
* <p>
* If it is called with iteration = 0, it just looks in the tile the query point is in.
* If it is called with iteration = 0,1,2,.., it will look in additional tiles further and further
* from the start tile. (In a square that grows by one pixel in all four directions per iteration).
* <p>
* See discussion at issue #221.
* <p>
*/
public void findEdgeIdsInNeighborhood(double queryLat, double queryLon, int iteration, IntConsumer foundEntries) {
int x = keyAlgo.x(queryLon);
int y = keyAlgo.y(queryLat);
for (int yreg = -iteration; yreg <= iteration; yreg++) {
int subqueryY = y + yreg;
int subqueryXA = x - iteration;
int subqueryXB = x + iteration;
if (subqueryXA >= 0 && subqueryY >= 0 && subqueryXA < indexStructureInfo.getParts() && subqueryY < indexStructureInfo.getParts()) {
long keyPart = keyAlgo.encode(subqueryXA, subqueryY) << (64 - keyAlgo.getBits());
fillIDs(keyPart, foundEntries);
}
if (iteration > 0 && subqueryXB >= 0 && subqueryY >= 0 && subqueryXB < indexStructureInfo.getParts() && subqueryY < indexStructureInfo.getParts()) {
long keyPart = keyAlgo.encode(subqueryXB, subqueryY) << (64 - keyAlgo.getBits());
fillIDs(keyPart, foundEntries);
}
}
for (int xreg = -iteration + 1; xreg <= iteration - 1; xreg++) {
int subqueryX = x + xreg;
int subqueryYA = y - iteration;
int subqueryYB = y + iteration;
if (subqueryX >= 0 && subqueryYA >= 0 && subqueryX < indexStructureInfo.getParts() && subqueryYA < indexStructureInfo.getParts()) {
long keyPart = keyAlgo.encode(subqueryX, subqueryYA) << (64 - keyAlgo.getBits());
fillIDs(keyPart, foundEntries);
}
if (subqueryX >= 0 && subqueryYB >= 0 && subqueryX < indexStructureInfo.getParts() && subqueryYB < indexStructureInfo.getParts()) {
long keyPart = keyAlgo.encode(subqueryX, subqueryYB) << (64 - keyAlgo.getBits());
fillIDs(keyPart, foundEntries);
}
}
} | 3.68 |
flink_BulkIterationBase_getTerminationCriterion | /** @return The operator representing the termination criterion. */
public Operator<?> getTerminationCriterion() {
return this.terminationCriterion;
} | 3.68 |
hadoop_CommonCallableSupplier_waitForCompletionIgnoringExceptions | /**
* Wait for a single of future to complete, ignoring exceptions raised.
* @param future future to wait for.
* @param <T> Generics Type T.
*/
public static <T> void waitForCompletionIgnoringExceptions(
@Nullable final CompletableFuture<T> future) {
if (future != null) {
try (DurationInfo ignore = new DurationInfo(LOG, false,
"Waiting for task completion")) {
future.join();
} catch (Exception e) {
LOG.debug("Ignoring exception raised in task completion: ");
}
}
} | 3.68 |
morf_MySqlDialect_dropStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#dropStatements(org.alfasoftware.morf.metadata.View)
*/
@Override
public Collection<String> dropStatements(View view) {
return Arrays.asList("DROP VIEW IF EXISTS `" + view.getName() + "`");
} | 3.68 |
framework_Slider_setOrientation | /**
* Sets the orientation of the slider.
*
* @param orientation
* the new orientation, either
* {@link SliderOrientation#HORIZONTAL} or
* {@link SliderOrientation#VERTICAL}
*/
public void setOrientation(SliderOrientation orientation) {
getState().orientation = orientation;
} | 3.68 |
hudi_HoodieBaseFileGroupRecordBuffer_extractRecordPositions | /**
* Extract the record positions from a log block header.
*
* @param logBlock
* @return
* @throws IOException
*/
protected static List<Long> extractRecordPositions(HoodieLogBlock logBlock) throws IOException {
List<Long> blockPositions = new ArrayList<>();
Roaring64NavigableMap positions = logBlock.getRecordPositions();
if (positions == null || positions.isEmpty()) {
throw new HoodieValidationException("No record position info is found when attempt to do position based merge.");
}
Iterator<Long> iterator = positions.iterator();
while (iterator.hasNext()) {
blockPositions.add(iterator.next());
}
if (blockPositions.isEmpty()) {
throw new HoodieCorruptedDataException("No positions are extracted.");
}
return blockPositions;
} | 3.68 |
hbase_WALPrettyPrinter_disableValues | /**
* turns value output off
*/
public void disableValues() {
outputValues = false;
} | 3.68 |
hadoop_YarnRegistryViewForProviders_putComponent | /**
* Add a component.
* @param serviceClass service class to use under ~user
* @param componentName component name
* @param record record to put
* @throws IOException
*/
public void putComponent(String serviceClass,
String serviceName,
String componentName,
ServiceRecord record) throws IOException {
String path = RegistryUtils.componentPath(
user, serviceClass, serviceName, componentName);
String parentPath = RegistryPathUtils.parentOf(path);
if (!registryOperations.exists(parentPath)) {
registryOperations.mknode(parentPath, true);
}
registryOperations.bind(path, record, BindFlags.OVERWRITE);
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_getMetricsRegistryInfo | /**
* Get the metrics registry information.
* @return The metrics registry information.
*/
public MetricsInfo getMetricsRegistryInfo() {
return registry.info();
} | 3.68 |
incubator-hugegraph-toolchain_ResultSet_parseResultClass | /**
* TODO: Still need to constantly add and optimize
*/
private Class<?> parseResultClass(Object object) {
if (object.getClass().equals(LinkedHashMap.class)) {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) object;
String type = (String) map.get("type");
if (type != null) {
if ("vertex".equals(type)) {
return Vertex.class;
} else if ("edge".equals(type)) {
return Edge.class;
}
} else {
if (map.get("labels") != null) {
return Path.class;
}
}
}
return object.getClass();
} | 3.68 |
flink_DataStream_getType | /**
* Gets the type of the stream.
*
* @return The type of the datastream.
*/
public TypeInformation<T> getType() {
return transformation.getOutputType();
} | 3.68 |
morf_SqlDialect_useInsertBatching | /**
* Whether insert statement batching should be used for this dialect to
* improve performance.
*
* @return <var>true</var> if code should use statement batches to reduce
* overhead when bulk inserting data.
*/
public boolean useInsertBatching() {
return true;
} | 3.68 |
flink_ScalaProductFieldAccessorFactory_load | /**
* Loads the implementation, if it is accessible.
*
* @param log Logger to be used in case the loading fails
* @return Loaded implementation, if it is accessible.
*/
static ScalaProductFieldAccessorFactory load(Logger log) {
try {
final Object factory =
Class.forName(
"org.apache.flink.streaming.util.typeutils.DefaultScalaProductFieldAccessorFactory")
.getDeclaredConstructor()
.newInstance();
return (ScalaProductFieldAccessorFactory) factory;
} catch (Exception e) {
log.debug("Unable to load Scala API extension.", e);
return null;
}
} | 3.68 |
streampipes_PrimitivePropertyBuilder_create | /**
* A builder class helping to define advanced primitive properties. For simple property definitions, you can also
* use {@link org.apache.streampipes.sdk.helpers.EpProperties}.
*
* @param datatype The primitive {@link org.apache.streampipes.sdk.utils.Datatypes} definition of the new property.
* @param runtimeName The name of the property at runtime (e.g., the field name of the JSON primitive.
* @return this
*/
public static PrimitivePropertyBuilder create(Datatypes datatype, String runtimeName) {
return new PrimitivePropertyBuilder(datatype, runtimeName);
} | 3.68 |
streampipes_TextDocumentStatistics_avgNumWords | /**
* Returns the average number of words at block-level (= overall number of words divided by the
* number of blocks).
*
* @return Average
*/
public float avgNumWords() {
return numWords / (float) numBlocks;
} | 3.68 |
flink_DependencyTree_getKey | /**
* We don't use the {@link Dependency} as a key because we don't want lookups to be dependent on
* scope or the optional flag.
*
* @param dependency
* @return
*/
@VisibleForTesting
static String getKey(Dependency dependency) {
return dependency.getGroupId()
+ ":"
+ dependency.getArtifactId()
+ ":"
+ dependency.getVersion()
+ ":"
+ dependency.getClassifier().orElse("(no-classifier)");
} | 3.68 |
framework_VErrorMessage_updateErrorLevel | /**
* Sets the correct error level style name for the error message and removes
* all previous style names.
*
* @param errorLevel
* error level
* @since 8.2
*/
public void updateErrorLevel(ErrorLevel errorLevel) {
ErrorUtil.setErrorLevelStyle(getStyleElement(), CLASSNAME, errorLevel);
} | 3.68 |
hadoop_TypedBytesOutput_write | /**
* Writes a Java object as a typed bytes sequence.
*
* @param obj the object to be written
* @throws IOException
*/
public void write(Object obj) throws IOException {
if (obj instanceof Buffer) {
writeBytes((Buffer) obj);
} else if (obj instanceof Byte) {
writeByte((Byte) obj);
} else if (obj instanceof Boolean) {
writeBool((Boolean) obj);
} else if (obj instanceof Integer) {
writeInt((Integer) obj);
} else if (obj instanceof Long) {
writeLong((Long) obj);
} else if (obj instanceof Float) {
writeFloat((Float) obj);
} else if (obj instanceof Double) {
writeDouble((Double) obj);
} else if (obj instanceof String) {
writeString((String) obj);
} else if (obj instanceof ArrayList) {
writeVector((ArrayList) obj);
} else if (obj instanceof List) {
writeList((List) obj);
} else if (obj instanceof Map) {
writeMap((Map) obj);
} else {
throw new RuntimeException("cannot write objects of this type");
}
} | 3.68 |
hadoop_OBSCommonUtils_keyToPath | /**
* Convert a path back to a key.
*
* @param key input key
* @return the path from this key
*/
static Path keyToPath(final String key) {
return new Path("/" + key);
} | 3.68 |
hbase_RegionMover_stripMaster | /**
* Exclude master from list of RSs to move regions to
*/
private void stripMaster(List<ServerName> regionServers) throws IOException {
ServerName master = admin.getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
stripServer(regionServers, master.getHostname(), master.getPort());
} | 3.68 |
flink_JobMasterId_generate | /** Generates a new random JobMasterId. */
public static JobMasterId generate() {
return new JobMasterId();
} | 3.68 |
flink_DeclarativeSlotManager_internalTryAllocateSlots | /**
* Tries to allocate slots for the given requirement. If there are not enough slots available,
* the resource manager is informed to allocate more resources.
*
* @param jobId job to allocate slots for
* @param targetAddress address of the jobmaster
* @param resourceRequirement required slots
* @return the number of missing slots
*/
private int internalTryAllocateSlots(
JobID jobId, String targetAddress, ResourceRequirement resourceRequirement) {
final ResourceProfile requiredResource = resourceRequirement.getResourceProfile();
// Use LinkedHashMap to retain the original order
final Map<SlotID, TaskManagerSlotInformation> availableSlots = new LinkedHashMap<>();
for (TaskManagerSlotInformation freeSlot : slotTracker.getFreeSlots()) {
if (!isBlockedTaskManager(freeSlot.getTaskManagerConnection().getResourceID())) {
availableSlots.put(freeSlot.getSlotId(), freeSlot);
}
}
int numUnfulfilled = 0;
for (int x = 0; x < resourceRequirement.getNumberOfRequiredSlots(); x++) {
final Optional<TaskManagerSlotInformation> reservedSlot =
slotMatchingStrategy.findMatchingSlot(
requiredResource,
availableSlots.values(),
this::getNumberRegisteredSlotsOf);
if (reservedSlot.isPresent()) {
allocateSlot(reservedSlot.get(), jobId, targetAddress, requiredResource);
availableSlots.remove(reservedSlot.get().getSlotId());
} else {
// exit loop early; we won't find a matching slot for this requirement
int numRemaining = resourceRequirement.getNumberOfRequiredSlots() - x;
numUnfulfilled += numRemaining;
break;
}
}
return numUnfulfilled;
} | 3.68 |
framework_CalendarTest_resetTime | /*
* Resets the calendar time (hour, minute second and millisecond) either to
* zero or maximum value.
*/
private void resetTime(boolean max) {
if (max) {
calendar.set(GregorianCalendar.HOUR_OF_DAY,
calendar.getMaximum(GregorianCalendar.HOUR_OF_DAY));
calendar.set(GregorianCalendar.MINUTE,
calendar.getMaximum(GregorianCalendar.MINUTE));
calendar.set(GregorianCalendar.SECOND,
calendar.getMaximum(GregorianCalendar.SECOND));
calendar.set(GregorianCalendar.MILLISECOND,
calendar.getMaximum(GregorianCalendar.MILLISECOND));
} else {
calendar.set(GregorianCalendar.HOUR_OF_DAY, 0);
calendar.set(GregorianCalendar.MINUTE, 0);
calendar.set(GregorianCalendar.SECOND, 0);
calendar.set(GregorianCalendar.MILLISECOND, 0);
}
} | 3.68 |
hbase_HBaseRpcControllerImpl_cellScanner | /** Returns One-shot cell scanner (you cannot back it up and restart) */
@Override
public CellScanner cellScanner() {
return cellScanner;
} | 3.68 |
hbase_HFileReaderImpl_updateCurrentBlock | /**
* Updates the current block to be the given {@link HFileBlock}. Seeks to the the first
* key/value pair.
* @param newBlock the block to make current, and read by {@link HFileReaderImpl#readBlock},
* it's a totally new block with new allocated {@link ByteBuff}, so if no
* further reference to this block, we should release it carefully.
*/
@Override
protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
try {
// sanity checks
if (newBlock.getBlockType() != BlockType.ENCODED_DATA) {
throw new IllegalStateException("EncodedScanner works only on encoded data blocks");
}
short dataBlockEncoderId = newBlock.getDataBlockEncodingId();
if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
String encoderCls = dataBlockEncoder.getClass().getName();
throw new CorruptHFileException(
"Encoder " + encoderCls + " doesn't support data block encoding "
+ DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath());
}
updateCurrBlockRef(newBlock);
ByteBuff encodedBuffer = getEncodedBuffer(newBlock);
seeker.setCurrentBuffer(encodedBuffer);
} finally {
releaseIfNotCurBlock(newBlock);
}
// Reset the next indexed key
this.nextIndexedKey = null;
} | 3.68 |
hbase_CoprocessorBlockingRpcCallback_get | /**
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
* passed. When used asynchronously, this method will block until the {@link #run(Object)} method
* has been called.
* @return the response object or {@code null} if no response was passed
*/
public synchronized R get() throws IOException {
while (!resultSet) {
try {
this.wait();
} catch (InterruptedException ie) {
InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
exception.initCause(ie);
throw exception;
}
}
return result;
} | 3.68 |
hadoop_LongValueSum_getReport | /**
* @return the string representation of the aggregated value
*/
public String getReport() {
return ""+sum;
} | 3.68 |
hbase_FSWALEntry_stampRegionSequenceId | /**
* Here is where a WAL edit gets its sequenceid. SIDE-EFFECT is our stamping the sequenceid into
* every Cell AND setting the sequenceid into the MVCC WriteEntry!!!!
* @return The sequenceid we stamped on this edit.
*/
long stampRegionSequenceId(MultiVersionConcurrencyControl.WriteEntry we) throws IOException {
long regionSequenceId = we.getWriteNumber();
if (!this.getEdit().isReplay() && inMemstore) {
for (Cell c : getEdit().getCells()) {
PrivateCellUtil.setSequenceId(c, regionSequenceId);
}
}
getKey().setWriteEntry(we);
return regionSequenceId;
} | 3.68 |
hbase_CoprocessorHost_handleCoprocessorThrowable | /**
* This is used by coprocessor hooks which are declared to throw IOException (or its subtypes).
* For such hooks, we should handle throwable objects depending on the Throwable's type. Those
* which are instances of IOException should be passed on to the client. This is in conformance
* with the HBase idiom regarding IOException: that it represents a circumstance that should be
* passed along to the client for its own handling. For example, a coprocessor that implements
* access controls would throw a subclass of IOException, such as AccessDeniedException, in its
* preGet() method to prevent an unauthorized client's performing a Get on a particular table.
* @param env Coprocessor Environment
* @param e Throwable object thrown by coprocessor.
* @exception IOException Exception
*/
// Note to devs: Class comments of all observers ({@link MasterObserver}, {@link WALObserver},
// etc) mention this nuance of our exception handling so that coprocessor can throw appropriate
// exceptions depending on situation. If any changes are made to this logic, make sure to
// update all classes' comments.
protected void handleCoprocessorThrowable(final E env, final Throwable e) throws IOException {
if (e instanceof IOException) {
throw (IOException) e;
}
// If we got here, e is not an IOException. A loaded coprocessor has a
// fatal bug, and the server (master or regionserver) should remove the
// faulty coprocessor from its set of active coprocessors. Setting
// 'hbase.coprocessor.abortonerror' to true will cause abortServer(),
// which may be useful in development and testing environments where
// 'failing fast' for error analysis is desired.
if (env.getConfiguration().getBoolean(ABORT_ON_ERROR_KEY, DEFAULT_ABORT_ON_ERROR)) {
// server is configured to abort.
abortServer(env, e);
} else {
// If available, pull a table name out of the environment
if (env instanceof RegionCoprocessorEnvironment) {
String tableName =
((RegionCoprocessorEnvironment) env).getRegionInfo().getTable().getNameAsString();
LOG.error("Removing coprocessor '" + env.toString() + "' from table '" + tableName + "'",
e);
} else {
LOG.error("Removing coprocessor '" + env.toString() + "' from " + "environment", e);
}
coprocEnvironments.remove(env);
try {
shutdown(env);
} catch (Exception x) {
LOG.error("Uncaught exception when shutting down coprocessor '" + env.toString() + "'", x);
}
throw new DoNotRetryIOException("Coprocessor: '" + env.toString() + "' threw: '" + e
+ "' and has been removed from the active " + "coprocessor set.", e);
}
} | 3.68 |
hbase_JobUtil_getStagingDir | /**
* Initializes the staging directory and returns the path.
* @param conf system configuration
* @return staging directory path
* @throws IOException if the ownership on the staging directory is not as expected
* @throws InterruptedException if the thread getting the staging directory is interrupted
*/
public static Path getStagingDir(Configuration conf) throws IOException, InterruptedException {
return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
} | 3.68 |
hbase_TableDescriptorBuilder_isCompactionEnabled | /**
* Check if the compaction enable flag of the table is true. If flag is false then no
* minor/major compactions will be done in real.
* @return true if table compaction enabled
*/
@Override
public boolean isCompactionEnabled() {
return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED);
} | 3.68 |
hbase_CatalogJanitorReport_getOverlaps | /**
* @return Overlap pairs found as we scanned hbase:meta; ordered by hbase:meta table sort. Pairs
* of overlaps may have overlap with subsequent pairs.
* @see MetaFixer#calculateMerges(int, List) where we aggregate overlaps for a single 'merge'
* call.
*/
public List<Pair<RegionInfo, RegionInfo>> getOverlaps() {
return this.overlaps;
} | 3.68 |
pulsar_BrokerService_unloadNamespaceBundlesGracefully | /**
* It unloads all owned namespacebundles gracefully.
* <ul>
* <li>First it makes current broker unavailable and isolates from the clusters so, it will not serve any new
* requests.</li>
* <li>Second it starts unloading namespace bundle one by one without closing the connection in order to avoid
* disruption for other namespacebundles which are sharing the same connection from the same client.</li>
* </ul>
*/
public void unloadNamespaceBundlesGracefully() {
unloadNamespaceBundlesGracefully(0, true);
} | 3.68 |
AreaShop_FileManager_saveRequiredFiles | /**
* Save all region related files spread over time (low load).
*/
public void saveRequiredFiles() {
if(isSaveGroupsRequired()) {
saveGroupsNow();
}
this.saveWorldGuardRegions();
Do.forAll(
plugin.getConfig().getInt("saving.regionsPerTick"),
getRegions(),
region -> {
if(region.isSaveRequired()) {
region.saveNow();
}
}
);
} | 3.68 |
framework_AbstractListing_refresh | /**
* A helper method for refreshing the client-side representation of a
* single data item.
*
* @param item
* the item to refresh
*/
protected void refresh(T item) {
getParent().getDataCommunicator().refresh(item);
} | 3.68 |
druid_MySqlStatementParser_parseAssign | /**
* parse assign statement
*/
public SQLSetStatement parseAssign() {
accept(Token.SET);
SQLSetStatement stmt = new SQLSetStatement(getDbType());
parseAssignItems(stmt.getItems(), stmt);
return stmt;
} | 3.68 |
shardingsphere-elasticjob_DataSourceRegistry_registerDataSource | /**
* Register data source.
*
* @param dataSourceConfig data source configuration
* @param dataSource data source
*/
public void registerDataSource(final RDBTracingStorageConfiguration dataSourceConfig, final DataSource dataSource) {
dataSources.putIfAbsent(dataSourceConfig, dataSource);
} | 3.68 |
hbase_RestoreSnapshotHelper_getRegionsToRestore | /**
* Returns the list of 'restored regions' during the on-disk restore. The caller is responsible
* to add the regions to hbase:meta if not present.
* @return the list of regions restored
*/
public List<RegionInfo> getRegionsToRestore() {
return this.regionsToRestore;
} | 3.68 |
zxing_MatrixUtil_isEmpty | // Check if "value" is empty.
private static boolean isEmpty(int value) {
return value == -1;
} | 3.68 |
hbase_ConfigurationManager_registerObserver | /**
* Register an observer class
* @param observer observer to be registered.
*/
public void registerObserver(ConfigurationObserver observer) {
synchronized (configurationObservers) {
configurationObservers.add(observer);
if (observer instanceof PropagatingConfigurationObserver) {
((PropagatingConfigurationObserver) observer).registerChildren(this);
}
}
} | 3.68 |
flink_KeyGroupRange_getIntersection | /**
* Create a range that represent the intersection between this range and the given range.
*
* @param other A KeyGroupRange to intersect.
* @return Key-group range that is the intersection between this and the given key-group range.
*/
public KeyGroupRange getIntersection(KeyGroupRange other) {
int start = Math.max(startKeyGroup, other.startKeyGroup);
int end = Math.min(endKeyGroup, other.endKeyGroup);
return start <= end ? new KeyGroupRange(start, end) : EMPTY_KEY_GROUP_RANGE;
} | 3.68 |
framework_FocusableScrollPanel_getHorizontalScrollPosition | /**
* Gets the horizontal scroll position.
*
* @return the horizontal scroll position, in pixels
*/
public int getHorizontalScrollPosition() {
return getElement().getScrollLeft();
} | 3.68 |
flink_HsSubpartitionMemoryDataManager_spillSubpartitionBuffers | /**
* Spill this subpartition's buffers in a decision.
*
* @param toSpill All buffers that need to be spilled belong to this subpartition in a decision.
* @param spillDoneFuture completed when spill is finished.
* @return {@link BufferWithIdentity}s about these spill buffers.
*/
@SuppressWarnings("FieldAccessNotGuarded")
// Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
public List<BufferWithIdentity> spillSubpartitionBuffers(
List<BufferIndexAndChannel> toSpill, CompletableFuture<Void> spillDoneFuture) {
return callWithLock(
() ->
toSpill.stream()
.map(
indexAndChannel -> {
int bufferIndex = indexAndChannel.getBufferIndex();
return startSpillingBuffer(bufferIndex, spillDoneFuture)
.map(
(context) ->
new BufferWithIdentity(
context.getBuffer(),
bufferIndex,
targetChannel));
})
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toList()));
} | 3.68 |
hadoop_HttpFSServerWebApp_getMetrics | /**
* gets the HttpFSServerMetrics instance.
* @return the HttpFSServerMetrics singleton.
*/
public static HttpFSServerMetrics getMetrics() {
return metrics;
} | 3.68 |
hmily_XaResourcePool_getAllResource | /**
* Gets all resource.
*
* @param globalId the global id
* @return the all resource
*/
public List<XaResourceWrapped> getAllResource(final String globalId) {
Set<Xid> xids = this.xids.get(globalId);
if (xids != null) {
return xids.stream().map(this::getResource).collect(Collectors.toList());
}
return Collections.emptyList();
} | 3.68 |
hmily_GsonUtils_toJson | /**
* To json string.
*
* @param object the object
* @return the string
*/
public String toJson(final Object object) {
return GSON.toJson(object);
} | 3.68 |
hudi_HoodieSyncClient_getAllPartitionPathsOnStorage | /**
* Gets all relative partitions paths in the Hudi table on storage.
*
* @return All relative partitions paths.
*/
public List<String> getAllPartitionPathsOnStorage() {
HoodieLocalEngineContext engineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
return FSUtils.getAllPartitionPaths(engineContext,
config.getString(META_SYNC_BASE_PATH),
config.getBoolean(META_SYNC_USE_FILE_LISTING_FROM_METADATA));
} | 3.68 |
hadoop_GangliaConf_setTmax | /**
* @param tmax the tmax to set
*/
void setTmax(int tmax) {
this.tmax = tmax;
} | 3.68 |
hudi_InternalSchemaBuilder_buildIdToName | /**
* Build a mapping from id to full field name for a internal Type.
* if a field y belong to a struct filed x, then the full name of y is x.y
*
* @param type hoodie internal type
* @return a mapping from id to full field name
*/
public Map<Integer, String> buildIdToName(Type type) {
Map<Integer, String> result = new HashMap<>();
buildNameToId(type).forEach((k, v) -> result.put(v, k));
return result;
} | 3.68 |
flink_BackgroundTask_finishedBackgroundTask | /**
* Creates a finished background task which can be used as the start of a background task chain.
*
* @param <V> type of the background task
* @return A finished background task
*/
static <V> BackgroundTask<V> finishedBackgroundTask() {
return new BackgroundTask<>();
} | 3.68 |
shardingsphere-elasticjob_ExecutionService_getAllRunningItems | /**
* Get all running items with instance.
*
* @return running items with instance.
*/
public Map<Integer, JobInstance> getAllRunningItems() {
int shardingTotalCount = configService.load(true).getShardingTotalCount();
Map<Integer, JobInstance> result = new LinkedHashMap<>(shardingTotalCount, 1);
for (int i = 0; i < shardingTotalCount; i++) {
String data = jobNodeStorage.getJobNodeData(ShardingNode.getRunningNode(i));
if (!Strings.isNullOrEmpty(data)) {
result.put(i, new JobInstance(data));
}
}
return result;
} | 3.68 |
pulsar_ReaderConfiguration_getReaderName | /**
* @return the consumer name
*/
public String getReaderName() {
return conf.getReaderName();
} | 3.68 |
hbase_WALProvider_getPeerActionListener | // sync replication related
default PeerActionListener getPeerActionListener() {
return PeerActionListener.DUMMY;
} | 3.68 |
morf_ConnectionResourcesBean_getFetchSizeForBulkSelects | /**
* @see ConnectionResources#getFetchSizeForBulkSelects()
*/
@Override
public Integer getFetchSizeForBulkSelects() {
return fetchSizeForBulkSelects;
} | 3.68 |
framework_LayoutManager_getMarginHeight | /**
* Gets the combined top & bottom margin of the given element, provided that
* they have been measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured margin for
* @return the measured top+bottom margin of the element in pixels.
*/
public int getMarginHeight(Element element) {
return getMarginTop(element) + getMarginBottom(element);
} | 3.68 |
graphhopper_PathDetailsFromEdges_calcDetails | /**
* Calculates the PathDetails for a Path. This method will return fast, if there are no calculators.
*
* @param pathBuilderFactory Generates the relevant PathBuilders
* @return List of PathDetails for this Path
*/
public static Map<String, List<PathDetail>> calcDetails(Path path, EncodedValueLookup evLookup, Weighting weighting,
List<String> requestedPathDetails, PathDetailsBuilderFactory pathBuilderFactory,
int previousIndex, Graph graph) {
if (!path.isFound() || requestedPathDetails.isEmpty())
return Collections.emptyMap();
HashSet<String> uniquePD = new HashSet<>(requestedPathDetails.size());
Collection<String> res = requestedPathDetails.stream().filter(pd -> !uniquePD.add(pd)).collect(Collectors.toList());
if (!res.isEmpty()) throw new IllegalArgumentException("Do not use duplicate path details: " + res);
List<PathDetailsBuilder> pathBuilders = pathBuilderFactory.createPathDetailsBuilders(requestedPathDetails, path, evLookup, weighting, graph);
if (pathBuilders.isEmpty())
return Collections.emptyMap();
path.forEveryEdge(new PathDetailsFromEdges(pathBuilders, previousIndex));
Map<String, List<PathDetail>> pathDetails = new HashMap<>(pathBuilders.size());
for (PathDetailsBuilder builder : pathBuilders) {
Map.Entry<String, List<PathDetail>> entry = builder.build();
List<PathDetail> existing = pathDetails.put(entry.getKey(), entry.getValue());
if (existing != null)
throw new IllegalStateException("Some PathDetailsBuilders use duplicate key: " + entry.getKey());
}
return pathDetails;
} | 3.68 |
framework_ColumnConnector_getTooltipContentMode | /**
* Gets the content mode for tooltips in this column.
*
* @return the content mode.
*
* @since 8.2
*/
public ContentMode getTooltipContentMode() {
return tooltipContentMode;
} | 3.68 |
pulsar_TopicsBase_findOwnerBrokerForTopic | // Look up topic owner for given topic. Return if asyncResponse has been completed
// which indicating redirect or exception.
private boolean findOwnerBrokerForTopic(boolean authoritative, AsyncResponse asyncResponse) {
PartitionedTopicMetadata metadata = internalGetPartitionedMetadata(authoritative, false);
List<String> redirectAddresses = Collections.synchronizedList(new ArrayList<>());
CompletableFuture<Boolean> future = new CompletableFuture<>();
List<CompletableFuture<Void>> lookupFutures = new ArrayList<>();
if (!topicName.isPartitioned() && metadata.partitions > 1) {
// Partitioned topic with multiple partitions, need to do look up for each partition.
for (int index = 0; index < metadata.partitions; index++) {
lookupFutures.add(lookUpBrokerForTopic(topicName.getPartition(index),
authoritative, redirectAddresses));
}
} else {
// Non-partitioned topic or specific topic partition.
lookupFutures.add(lookUpBrokerForTopic(topicName, authoritative, redirectAddresses));
}
FutureUtil.waitForAll(lookupFutures)
.thenRun(() -> {
processLookUpResult(redirectAddresses, asyncResponse, future);
}).exceptionally(e -> {
processLookUpResult(redirectAddresses, asyncResponse, future);
return null;
});
try {
return future.get();
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug("Fail to lookup topic for rest produce message request for topic {}.", topicName.toString());
}
if (!asyncResponse.isDone()) {
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Internal error: "
+ e.getMessage()));
}
return true;
}
} | 3.68 |
morf_Function_lowerCase | /**
* Helper method to create an instance of the <code>LOWER</code> SQL function.
* Converts all of the characters in this String to lower case using the rules
* of the default locale.
*
* @param expression the expression to evaluate.
* @return an instance of the lower function.
*/
public static Function lowerCase(AliasedField expression) {
return new Function(FunctionType.LOWER, expression);
} | 3.68 |
zxing_MinimalECIInput_getECIValue | /**
* Returns the {@code int} ECI value at the specified index. An index ranges from zero
* to {@code length() - 1}. The first {@code byte} value of the sequence is at
* index zero, the next at index one, and so on, as for array
* indexing.
*
* @param index the index of the {@code int} value to be returned
*
* @return the specified {@code int} ECI value.
* The ECI specified the encoding of all bytes with a higher index until the
* next ECI or until the end of the input if no other ECI follows.
*
* @throws IndexOutOfBoundsException
* if the {@code index} argument is negative or not less than
* {@code length()}
* @throws IllegalArgumentException
* if the value at the {@code index} argument is not an ECI (@see #isECI)
*/
public int getECIValue(int index) {
if (index < 0 || index >= length()) {
throw new IndexOutOfBoundsException("" + index);
}
if (!isECI(index)) {
throw new IllegalArgumentException("value at " + index + " is not an ECI but a character");
}
return bytes[index] - 256;
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_triggerListener | /**
* trigger listener
*/
@Override
public void triggerListener() {
if (null == this.connectorConfigUpdateListener) {
return;
}
for (ConnectorConfigUpdateListener listener : this.connectorConfigUpdateListener) {
listener.onConfigUpdate();
}
} | 3.68 |
hbase_HFileBlockIndex_getRootBlockKey | /**
* from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return seeker.getRootBlockKey(i);
} | 3.68 |
framework_MultiSelectionModelConnector_isAllSelected | /**
* Returns whether all items are selected or not.
*
* @return {@code true} if all items are selected, {@code false} if not
*/
protected boolean isAllSelected() {
return getState().selectAllCheckBoxVisible && getState().allSelected;
} | 3.68 |
hbase_HFileBlock_getHeaderAndDataForTest | /**
* Returns the header or the compressed data (or uncompressed data when not using compression)
* as a byte array. Can be called in the "writing" state or in the "block ready" state. If
* called in the "writing" state, transitions the writer to the "block ready" state. This
* returns the header + data + checksums stored on disk.
* @return header and data as they would be stored on disk in a byte array
*/
byte[] getHeaderAndDataForTest() throws IOException {
ensureBlockReady();
// This is not very optimal, because we are doing an extra copy.
// But this method is used only by unit tests.
byte[] output = new byte[onDiskBlockBytesWithHeader.size() + onDiskChecksum.length];
System.arraycopy(onDiskBlockBytesWithHeader.getBuffer(), 0, output, 0,
onDiskBlockBytesWithHeader.size());
System.arraycopy(onDiskChecksum, 0, output, onDiskBlockBytesWithHeader.size(),
onDiskChecksum.length);
return output;
} | 3.68 |
hbase_ReplicationPeers_createPeer | /**
* Helper method to connect to a peer
* @param peerId peer's identifier
* @return object representing the peer
*/
private ReplicationPeerImpl createPeer(String peerId) throws ReplicationException {
ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId);
boolean enabled = peerStorage.isPeerEnabled(peerId);
SyncReplicationState syncReplicationState = peerStorage.getPeerSyncReplicationState(peerId);
SyncReplicationState newSyncReplicationState =
peerStorage.getPeerNewSyncReplicationState(peerId);
return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf),
peerId, peerConfig, enabled, syncReplicationState, newSyncReplicationState);
} | 3.68 |
hbase_FilterList_addFilter | /**
* Add a filter.
* @param filter another filter
*/
public void addFilter(Filter filter) {
addFilter(Collections.singletonList(filter));
} | 3.68 |
flink_StreamOperatorFactory_setInputType | /**
* Is called by the {@link StreamGraph#addOperator} method when the {@link StreamGraph} is
* generated.
*
* @param type The data type of the input.
* @param executionConfig The execution config for this parallel execution.
*/
default void setInputType(TypeInformation<?> type, ExecutionConfig executionConfig) {} | 3.68 |
flink_LocalInputPreferredSlotSharingStrategy_getExecutionVertices | /**
* The vertices are topologically sorted since {@link DefaultExecutionTopology#getVertices}
* are topologically sorted.
*/
private LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> getExecutionVertices() {
final LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> vertices =
new LinkedHashMap<>();
for (SchedulingExecutionVertex executionVertex : topology.getVertices()) {
final List<SchedulingExecutionVertex> executionVertexGroup =
vertices.computeIfAbsent(
executionVertex.getId().getJobVertexId(), k -> new ArrayList<>());
executionVertexGroup.add(executionVertex);
}
return vertices;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.