name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ZoneReencryptionStatus_setZoneName | /**
* Set the zone name. The zone name is resolved from inode id and set during
* a listReencryptionStatus call, for the crypto admin to consume.
*/
public void setZoneName(final String name) {
Preconditions.checkNotNull(name, "zone name cannot be null");
zoneName = name;
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableIsWriteStopped | /** Returns 1 if write has been stopped. */
public void enableIsWriteStopped() {
this.properties.add(RocksDBProperty.IsWriteStopped.getRocksDBProperty());
} | 3.68 |
hadoop_HadoopExecutors_shutdown | /**
* Helper routine to shutdown a {@link ExecutorService}. Will wait up to a
* certain timeout for the ExecutorService to gracefully shutdown. If the
* ExecutorService did not shutdown and there are still tasks unfinished after
* the timeout period, the ExecutorService will be notified to forcibly shut
* down. Another timeout period will be waited before giving up. So, at most,
* a shutdown will be allowed to wait up to twice the timeout value before
* giving up.
*
* @param executorService ExecutorService to shutdown
* @param logger Logger
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
*/
public static void shutdown(ExecutorService executorService, Logger logger,
long timeout, TimeUnit unit) {
if (executorService == null) {
return;
}
try {
executorService.shutdown();
logger.debug("Gracefully shutting down executor service {}. Waiting max {} {}",
executorService, timeout, unit);
if (!executorService.awaitTermination(timeout, unit)) {
logger.debug(
"Executor service has not shutdown yet. Forcing. "
+ "Will wait up to an additional {} {} for shutdown",
timeout, unit);
executorService.shutdownNow();
}
if (executorService.awaitTermination(timeout, unit)) {
logger.debug("Succesfully shutdown executor service");
} else {
logger.error("Unable to shutdown executor service after timeout {} {}",
(2 * timeout), unit);
}
} catch (InterruptedException e) {
logger.error("Interrupted while attempting to shutdown", e);
executorService.shutdownNow();
} catch (Exception e) {
logger.warn("Exception closing executor service {}", e.getMessage());
logger.debug("Exception closing executor service", e);
throw e;
}
} | 3.68 |
framework_CalendarMonthDropHandler_emphasis | /**
* Add CSS style name for the currently emphasized day
*/
private void emphasis() {
if (currentTargetElement != null && currentTargetDay != null) {
currentTargetDay.addEmphasisStyle();
}
} | 3.68 |
dubbo_DynamicDirectory_getOriginalConsumerUrl | /**
* The original consumer url
*
* @return URL
*/
public URL getOriginalConsumerUrl() {
return this.consumerUrl;
} | 3.68 |
flink_StreamConfig_setVertexNonChainedOutputs | /**
* Sets the job vertex level non-chained outputs. The given output list must have the same order
* with {@link JobVertex#getProducedDataSets()}.
*/
public void setVertexNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
toBeSerializedConfigObjects.put(VERTEX_NONCHAINED_OUTPUTS, nonChainedOutputs);
} | 3.68 |
hbase_ProcedureManagerHost_loadUserProcedures | /**
* Load system procedures. Read the class names from configuration. Called by constructor.
*/
protected void loadUserProcedures(Configuration conf, String confKey) {
Class<?> implClass = null;
// load default procedures from configure file
String[] defaultProcClasses = conf.getStrings(confKey);
if (defaultProcClasses == null || defaultProcClasses.length == 0) return;
List<E> configured = new ArrayList<>();
for (String className : defaultProcClasses) {
className = className.trim();
ClassLoader cl = this.getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(cl);
try {
implClass = cl.loadClass(className);
configured.add(loadInstance(implClass));
LOG.info("User procedure " + className + " was loaded successfully.");
} catch (ClassNotFoundException e) {
LOG.warn("Class " + className + " cannot be found. " + e.getMessage());
} catch (IOException e) {
LOG.warn("Load procedure " + className + " failed. " + e.getMessage());
}
}
// add entire set to the collection
procedures.addAll(configured);
} | 3.68 |
graphhopper_SpatialKeyAlgo_getBits | /**
* @return the number of involved bits
*/
public int getBits() {
return allBits;
} | 3.68 |
pulsar_AbstractDispatcherMultipleConsumers_getFirstConsumerIndexOfPriority | /**
* Finds index of first consumer in list which has same priority as given targetPriority.
*
* @param targetPriority
* @return
*/
private int getFirstConsumerIndexOfPriority(int targetPriority) {
for (int i = 0; i < consumerList.size(); i++) {
if (consumerList.get(i).getPriorityLevel() == targetPriority) {
return i;
}
}
return -1;
} | 3.68 |
flink_MathUtils_roundDownToPowerOf2 | /**
* Decrements the given number down to the closest power of two. If the argument is a power of
* two, it remains unchanged.
*
* @param value The value to round down.
* @return The closest value that is a power of two and less or equal than the given value.
*/
public static int roundDownToPowerOf2(int value) {
return Integer.highestOneBit(value);
} | 3.68 |
hadoop_Quota_isMountEntry | /**
* Is the path a mount entry.
*
* @param path the path to be checked.
* @return {@code true} if path is a mount entry; {@code false} otherwise.
*/
private boolean isMountEntry(String path) {
return router.getQuotaManager().isMountEntry(path);
} | 3.68 |
hbase_RequestConverter_buildRollWALWriterRequest | /**
* Create a new RollWALWriterRequest
* @return a ReplicateWALEntryRequest
*/
public static RollWALWriterRequest buildRollWALWriterRequest() {
return RollWALWriterRequest.getDefaultInstance();
} | 3.68 |
hadoop_SubApplicationEntity_isSubApplicationEntity | /**
* Checks if the input TimelineEntity object is an SubApplicationEntity.
*
* @param te TimelineEntity object.
* @return true if input is an SubApplicationEntity, false otherwise
*/
public static boolean isSubApplicationEntity(TimelineEntity te) {
return (te != null && te instanceof SubApplicationEntity);
} | 3.68 |
hbase_RequestConverter_buildMutateRequest | /**
* Create a protocol buffer MutateRequest for a delete
* @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete)
throws IOException {
MutateRequest.Builder builder = MutateRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setMutation(
ProtobufUtil.toMutation(MutationType.DELETE, delete, MutationProto.newBuilder()));
return builder.build();
} | 3.68 |
Activiti_ExecutionGraphUtil_orderFromRootToLeaf | /**
* Takes in a collection of executions belonging to the same process instance. Orders the executions in a list, first elements are the leaf, last element is the root elements.
*/
public static List<ExecutionEntity> orderFromRootToLeaf(Collection<ExecutionEntity> executions) {
List<ExecutionEntity> orderedList = new ArrayList<ExecutionEntity>(executions.size());
// Root elements
HashSet<String> previousIds = new HashSet<String>();
for (ExecutionEntity execution : executions) {
if (execution.getParentId() == null) {
orderedList.add(execution);
previousIds.add(execution.getId());
}
}
// Non-root elements
while (orderedList.size() < executions.size()) {
for (ExecutionEntity execution : executions) {
if (!previousIds.contains(execution.getId()) && previousIds.contains(execution.getParentId())) {
orderedList.add(execution);
previousIds.add(execution.getId());
}
}
}
return orderedList;
} | 3.68 |
hbase_FavoredStochasticBalancer_generateFavoredNodesForMergedRegion | /**
* Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep
* it simple.
*/
@Override
public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents)
throws IOException {
updateFavoredNodesForRegion(merged, fnm.getFavoredNodes(mergeParents[0]));
} | 3.68 |
flink_BatchShuffleReadBufferPool_requestBuffers | /**
* Requests a collection of buffers (determined by {@link #numBuffersPerRequest}) from this
* buffer pool.
*/
public List<MemorySegment> requestBuffers() throws Exception {
List<MemorySegment> allocated = new ArrayList<>(numBuffersPerRequest);
synchronized (buffers) {
checkState(!destroyed, "Buffer pool is already destroyed.");
if (!initialized) {
initialize();
}
Deadline deadline = Deadline.fromNow(WAITING_TIME);
while (buffers.size() < numBuffersPerRequest) {
checkState(!destroyed, "Buffer pool is already destroyed.");
buffers.wait(WAITING_TIME.toMillis());
if (!deadline.hasTimeLeft()) {
return allocated; // return the empty list
}
}
while (allocated.size() < numBuffersPerRequest) {
allocated.add(buffers.poll());
}
lastBufferOperationTimestamp = System.currentTimeMillis();
}
return allocated;
} | 3.68 |
flink_FlinkRelMdCollation_project | /** Helper method to determine a {@link Project}'s collation. */
public static List<RelCollation> project(
RelMetadataQuery mq, RelNode input, List<? extends RexNode> projects) {
final SortedSet<RelCollation> collations = new TreeSet<>();
final List<RelCollation> inputCollations = mq.collations(input);
if (inputCollations == null || inputCollations.isEmpty()) {
return com.google.common.collect.ImmutableList.of();
}
final com.google.common.collect.Multimap<Integer, Integer> targets =
com.google.common.collect.LinkedListMultimap.create();
final Map<Integer, SqlMonotonicity> targetsWithMonotonicity = new HashMap<>();
for (Ord<RexNode> project : Ord.<RexNode>zip(projects)) {
if (project.e instanceof RexInputRef) {
targets.put(((RexInputRef) project.e).getIndex(), project.i);
} else if (project.e instanceof RexCall) {
final RexCall call = (RexCall) project.e;
final RexCallBinding binding =
RexCallBinding.create(
input.getCluster().getTypeFactory(), call, inputCollations);
targetsWithMonotonicity.put(project.i, call.getOperator().getMonotonicity(binding));
}
}
final List<RelFieldCollation> fieldCollations = new ArrayList<>();
loop:
for (RelCollation ic : inputCollations) {
if (ic.getFieldCollations().isEmpty()) {
continue;
}
fieldCollations.clear();
for (RelFieldCollation ifc : ic.getFieldCollations()) {
final Collection<Integer> integers = targets.get(ifc.getFieldIndex());
if (integers.isEmpty()) {
continue loop; // cannot do this collation
}
fieldCollations.add(ifc.withFieldIndex(integers.iterator().next()));
}
assert !fieldCollations.isEmpty();
collations.add(RelCollations.of(fieldCollations));
}
final List<RelFieldCollation> fieldCollationsForRexCalls = new ArrayList<>();
for (Map.Entry<Integer, SqlMonotonicity> entry : targetsWithMonotonicity.entrySet()) {
final SqlMonotonicity value = entry.getValue();
switch (value) {
case NOT_MONOTONIC:
case CONSTANT:
break;
default:
fieldCollationsForRexCalls.add(
new RelFieldCollation(
entry.getKey(), RelFieldCollation.Direction.of(value)));
break;
}
}
if (!fieldCollationsForRexCalls.isEmpty()) {
collations.add(RelCollations.of(fieldCollationsForRexCalls));
}
return com.google.common.collect.ImmutableList.copyOf(collations);
} | 3.68 |
framework_VUpload_enableUpload | /** For internal use only. May be removed or replaced in the future. */
public void enableUpload() {
fu.getElement().setPropertyBoolean("disabled", false);
enabled = true;
updateEnabledForSubmitButton();
if (submitted) {
/*
* An old request is still in progress (most likely cancelled),
* ditching that target frame to make it possible to send a new
* file. A new target frame is created later."
*/
cleanTargetFrame();
rebuildPanel();
submitted = false;
}
ensureUploadButton();
} | 3.68 |
framework_AbstractInMemoryContainer_removeFilter | /**
* Remove a specific container filter and re-filter the view (if necessary).
*
* This can be used to implement
* {@link Filterable#removeContainerFilter(Container.Filter)} .
*/
protected void removeFilter(Filter filter) {
for (Iterator<Filter> iterator = getFilters().iterator(); iterator
.hasNext();) {
Filter f = iterator.next();
if (f.equals(filter)) {
iterator.remove();
filterAll();
return;
}
}
} | 3.68 |
dubbo_ParamParserManager_providerParamParse | /**
* provider Design Description:
* <p>
* Object[] args=new Object[0];
* List<Object> argsList=new ArrayList<>;</>
* <p>
* setValueByIndex(int index,Object value);
* <p>
* args=toArray(new Object[0]);
*/
public static Object[] providerParamParse(ProviderParseContext parseContext) {
List<ArgInfo> args = parseContext.getArgInfos();
for (int i = 0; i < args.size(); i++) {
for (ParamParser paramParser : providerParamParsers) {
paramParser.parse(parseContext, args.get(i));
}
}
// TODO add param require or default & body arg size pre judge
return parseContext.getArgs().toArray(new Object[0]);
} | 3.68 |
hmily_AggregateBinder_binder | /**
* binder to AggregateBinder.
*
* @param target ta
* @param env the env
* @return aggregate binder
*/
static AggregateBinder<?> binder(final BindData<?> target, final Binder.Env env) {
DataType type = target.getType();
//如果map集合.
if (type.isMap()) {
return new MapBinder(env);
} else if (type.isCollection()) {
return new CollectionBinder(env);
} else if (type.isArray()) {
return new ArrayBinder(env);
}
return null;
} | 3.68 |
hadoop_BalanceProcedureScheduler_init | /**
* Init the scheduler.
*
* @param recoverJobs whether to recover all the jobs from journal or not.
*/
public synchronized void init(boolean recoverJobs) throws IOException {
this.runningQueue = new LinkedBlockingQueue<>();
this.delayQueue = new DelayQueue<>();
this.recoverQueue = new LinkedBlockingQueue<>();
this.jobSet = new ConcurrentHashMap<>();
// start threads.
this.roosterThread = new Rooster();
this.roosterThread.setDaemon(true);
roosterThread.start();
this.recoverThread = new Recover();
this.recoverThread.setDaemon(true);
recoverThread.start();
int workerNum = conf.getInt(WORK_THREAD_NUM, WORK_THREAD_NUM_DEFAULT);
workersPool = new ThreadPoolExecutor(workerNum, workerNum * 2, 1,
TimeUnit.MILLISECONDS, new LinkedBlockingDeque<>());
this.readerThread = new Reader();
this.readerThread.start();
// init journal.
journal = new BalanceJournalInfoHDFS();
journal.setConf(conf);
if (recoverJobs) {
recoverAllJobs();
}
} | 3.68 |
AreaShop_StackCommand_countToName | /**
* Build a name from a count, with the right length.
* @param template Template to put the name in (# to put the count there, otherwise count is appended)
* @param count Number to use
* @return name with prepended 0's
*/
private String countToName(String template, int count) {
StringBuilder counterName = new StringBuilder().append(count);
int minimumLength = plugin.getConfig().getInt("stackRegionNumberLength");
while(counterName.length() < minimumLength) {
counterName.insert(0, "0");
}
if(template.contains("#")) {
return template.replace("#", counterName);
} else {
return template + counterName;
}
} | 3.68 |
hudi_HoodieAvroUtils_convertValueForSpecificDataTypes | /**
* This method converts values for fields with certain Avro/Parquet data types that require special handling.
*
* @param fieldSchema avro field schema
* @param fieldValue avro field value
* @return field value either converted (for certain data types) or as it is.
*/
public static Object convertValueForSpecificDataTypes(Schema fieldSchema,
Object fieldValue,
boolean consistentLogicalTimestampEnabled) {
if (fieldSchema == null) {
return fieldValue;
} else if (fieldValue == null) {
checkState(isNullable(fieldSchema));
return null;
}
return convertValueForAvroLogicalTypes(resolveNullableSchema(fieldSchema), fieldValue, consistentLogicalTimestampEnabled);
} | 3.68 |
zxing_BitArray_set | /**
* Sets bit i.
*
* @param i bit to set
*/
public void set(int i) {
bits[i / 32] |= 1 << (i & 0x1F);
} | 3.68 |
hadoop_OBSCommonUtils_intOption | /**
* Get a integer option not smaller than the minimum allowed value.
*
* @param conf configuration
* @param key key to look up
* @param defVal default value
* @param min minimum value
* @return the value
* @throws IllegalArgumentException if the value is below the minimum
*/
static int intOption(final Configuration conf, final String key,
final int defVal,
final int min) {
int v = conf.getInt(key, defVal);
Preconditions.checkArgument(
v >= min,
String.format("Value of %s: %d is below the minimum value %d", key,
v, min));
LOG.debug("Value of {} is {}", key, v);
return v;
} | 3.68 |
querydsl_GeometryExpressions_setSRID | /**
* Sets the SRID on a geometry to a particular integer value.
*
* @param expr geometry
* @param srid SRID
* @param <T>
* @return converted geometry
*/
public static <T extends Geometry> GeometryExpression<T> setSRID(Expression<T> expr, int srid) {
return geometryOperation(expr.getType(), SpatialOps.SET_SRID, expr, ConstantImpl.create(srid));
} | 3.68 |
morf_MergeStatement_getTableUniqueKey | /**
* Gets a list of the fields used as the key upon which to match.
*
* @return the table unique key.
*/
public List<AliasedField> getTableUniqueKey() {
return tableUniqueKey;
} | 3.68 |
MagicPlugin_CustomProjectileAction_adjustStartLocation | // This is used by EntityProjectile when first spawning the entity
protected Location adjustStartLocation(Location location) {
if (startDistance != 0) {
Vector velocity = location.getDirection().clone().normalize();
location.add(velocity.clone().multiply(startDistance));
}
return location;
} | 3.68 |
hadoop_ProtocolProxy_getProxy | /*
* Get the proxy
*/
public T getProxy() {
return proxy;
} | 3.68 |
flink_JoinOperatorSetsBase_where | /**
* Continues a Join transformation and defines a {@link KeySelector} function for the first join
* {@link DataSet}.
*
* <p>The KeySelector function is called for each element of the first DataSet and extracts a
* single key value on which the DataSet is joined.
*
* @param keySelector The KeySelector function which extracts the key values from the DataSet on
* which it is joined.
* @return An incomplete Join transformation. Call {@link
* org.apache.flink.api.java.operators.join.JoinOperatorSetsBase.JoinOperatorSetsPredicateBase#equalTo(int...)}
* or {@link
* org.apache.flink.api.java.operators.join.JoinOperatorSetsBase.JoinOperatorSetsPredicateBase#equalTo(KeySelector)}
* to continue the Join.
* @see KeySelector
* @see DataSet
*/
public <K> JoinOperatorSetsPredicateBase where(KeySelector<I1, K> keySelector) {
TypeInformation<K> keyType =
TypeExtractor.getKeySelectorTypes(keySelector, input1.getType());
return new JoinOperatorSetsPredicateBase(
new Keys.SelectorFunctionKeys<>(keySelector, input1.getType(), keyType));
} | 3.68 |
hbase_StripeStoreFileManager_nonOpenRowCompare | /**
* Compare two keys. Keys must not be open (isOpen(row) == false).
*/
private final int nonOpenRowCompare(byte[] k1, byte[] k2) {
assert !isOpen(k1) && !isOpen(k2);
return Bytes.compareTo(k1, k2);
} | 3.68 |
hudi_WriteMarkers_createIfNotExists | /**
* Creates a marker if the marker does not exist.
* This can invoke marker-based early conflict detection when enabled for multi-writers.
*
* @param partitionPath partition path in the table
* @param fileName file name
* @param type write IO type
* @param writeConfig Hudi write configs.
* @param fileId File ID.
* @param activeTimeline Active timeline for the write operation.
* @return the marker path.
*/
public Option<Path> createIfNotExists(String partitionPath, String fileName, IOType type, HoodieWriteConfig writeConfig,
String fileId, HoodieActiveTimeline activeTimeline) {
if (writeConfig.isEarlyConflictDetectionEnable()
&& writeConfig.getWriteConcurrencyMode().isOptimisticConcurrencyControl()) {
HoodieTimeline pendingCompactionTimeline = activeTimeline.filterPendingCompactionTimeline();
HoodieTimeline pendingReplaceTimeline = activeTimeline.filterPendingReplaceTimeline();
// TODO If current is compact or clustering then create marker directly without early conflict detection.
// Need to support early conflict detection between table service and common writers.
if (pendingCompactionTimeline.containsInstant(instantTime) || pendingReplaceTimeline.containsInstant(instantTime)) {
return create(partitionPath, fileName, type, true);
}
return createWithEarlyConflictDetection(partitionPath, fileName, type, false, writeConfig, fileId, activeTimeline);
}
return create(partitionPath, fileName, type, true);
} | 3.68 |
hbase_LocalHBaseCluster_main | /**
* Test things basically work.
*/
public static void main(String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
cluster.startup();
try (Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin()) {
TableDescriptor htd =
TableDescriptorBuilder.newBuilder(TableName.valueOf(cluster.getClass().getName())).build();
admin.createTable(htd);
} finally {
cluster.shutdown();
}
} | 3.68 |
hadoop_DiskBalancerDataNode_setDataNodeName | /**
* Sets node's DNS name.
*
* @param name - Data node name
*/
public void setDataNodeName(String name) {
this.dataNodeName = name;
} | 3.68 |
rocketmq-connect_WorkerDirectTask_resetOffset | /**
* Reset the offsets for the given partition.
*
* @param offsets the map of offsets for targetPartition.
*/
@Override
public void resetOffset(Map<RecordPartition, RecordOffset> offsets) {
// no-op
} | 3.68 |
hadoop_StorageLocationChecker_check | /**
* Initiate a check on the supplied storage volumes and return
* a list of healthy volumes.
*
* StorageLocations are returned in the same order as the input
* for compatibility with existing unit tests.
*
* @param conf HDFS configuration.
* @param dataDirs list of volumes to check.
* @return returns a list of healthy volumes. Returns an empty list if
* there are no healthy volumes.
*
* @throws InterruptedException if the check was interrupted.
* @throws IOException if the number of failed volumes exceeds the
* maximum allowed or if there are no good
* volumes.
*/
public List<StorageLocation> check(
final Configuration conf,
final Collection<StorageLocation> dataDirs)
throws InterruptedException, IOException {
final HashMap<StorageLocation, Boolean> goodLocations =
new LinkedHashMap<>();
final Set<StorageLocation> failedLocations = new HashSet<>();
final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures =
Maps.newHashMap();
final LocalFileSystem localFS = FileSystem.getLocal(conf);
final CheckContext context = new CheckContext(localFS, expectedPermission);
// Start parallel disk check operations on all StorageLocations.
for (StorageLocation location : dataDirs) {
goodLocations.put(location, true);
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(location, context);
if (olf.isPresent()) {
futures.put(location, olf.get());
}
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + ". Value configured is >= "
+ "to the number of configured volumes (" + dataDirs.size() + ").");
}
final long checkStartTimeMs = timer.monotonicNow();
// Retrieve the results of the disk checks.
for (Map.Entry<StorageLocation,
ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) {
// Determine how much time we can allow for this check to complete.
// The cumulative wait time cannot exceed maxAllowedTimeForCheck.
final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs);
final long timeLeftMs = Math.max(0,
maxAllowedTimeForCheckMs - waitSoFarMs);
final StorageLocation location = entry.getKey();
try {
final VolumeCheckResult result =
entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS);
switch (result) {
case HEALTHY:
break;
case DEGRADED:
LOG.warn("StorageLocation {} appears to be degraded.", location);
break;
case FAILED:
LOG.warn("StorageLocation {} detected as failed.", location);
failedLocations.add(location);
goodLocations.remove(location);
break;
default:
LOG.error("Unexpected health check result {} for StorageLocation {}",
result, location);
}
} catch (ExecutionException|TimeoutException e) {
LOG.warn("Exception checking StorageLocation " + location,
e.getCause());
failedLocations.add(location);
goodLocations.remove(location);
}
}
if (maxVolumeFailuresTolerated == DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
if (dataDirs.size() == failedLocations.size()) {
throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + goodLocations.size()
+ ", volumes configured: " + dataDirs.size()
+ ", volumes failed: " + failedLocations.size()
+ ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
} else {
if (failedLocations.size() > maxVolumeFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + goodLocations.size()
+ ", volumes configured: " + dataDirs.size()
+ ", volumes failed: " + failedLocations.size()
+ ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
}
if (goodLocations.size() == 0) {
throw new DiskErrorException("All directories in "
+ DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
+ failedLocations);
}
return new ArrayList<>(goodLocations.keySet());
} | 3.68 |
morf_SqlServerDialect_connectionTestStatement | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#connectionTestStatement()
*/
@Override
public String connectionTestStatement() {
return "select 1";
} | 3.68 |
hadoop_RouterPermissionChecker_checkPermission | /**
* Whether a mount table entry can be accessed by the current context.
*
* @param mountTable
* MountTable being accessed
* @param access
* type of action being performed on the mount table entry
* @throws AccessControlException
* if mount table cannot be accessed
*/
public void checkPermission(MountTable mountTable, FsAction access)
throws AccessControlException {
if (isSuperUser()) {
return;
}
FsPermission mode = mountTable.getMode();
if (getUser().equals(mountTable.getOwnerName())
&& mode.getUserAction().implies(access)) {
return;
}
if (isMemberOfGroup(mountTable.getGroupName())
&& mode.getGroupAction().implies(access)) {
return;
}
if (!getUser().equals(mountTable.getOwnerName())
&& !isMemberOfGroup(mountTable.getGroupName())
&& mode.getOtherAction().implies(access)) {
return;
}
throw new AccessControlException(
"Permission denied while accessing mount table "
+ mountTable.getSourcePath()
+ ": user " + getUser() + " does not have " + access.toString()
+ " permissions.");
} | 3.68 |
flink_CopyOnWriteStateMapSnapshot_moveChainsToBackOfArray | /**
* Move the chains in snapshotData to the back of the array, and return the index of the
* first chain from the front.
*/
int moveChainsToBackOfArray() {
int index = snapshotData.length - 1;
// find the first null chain from the back
while (index >= 0) {
if (snapshotData[index] == null) {
break;
}
index--;
}
int lastNullIndex = index;
index--;
// move the chains to the back
while (index >= 0) {
CopyOnWriteStateMap.StateMapEntry<K, N, S> entry = snapshotData[index];
if (entry != null) {
snapshotData[lastNullIndex] = entry;
snapshotData[index] = null;
lastNullIndex--;
}
index--;
}
// return the index of the first chain from the front
return lastNullIndex + 1;
} | 3.68 |
hadoop_Server_loadServices | /**
* Loads services defined in <code>services</code> and
* <code>services.ext</code> and de-dups them.
*
* @return List of final services to initialize.
*
* @throws ServerException throw if the services could not be loaded.
*/
protected List<Service> loadServices() throws ServerException {
try {
Map<Class, Service> map = new LinkedHashMap<Class, Service>();
Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES));
Class[] classesExt = getConfig().getClasses(getPrefixedName(CONF_SERVICES_EXT));
List<Service> list = new ArrayList<Service>();
loadServices(classes, list);
loadServices(classesExt, list);
//removing duplicate services, strategy: last one wins
for (Service service : list) {
if (map.containsKey(service.getInterface())) {
log.debug("Replacing service [{}] implementation [{}]", service.getInterface(),
service.getClass());
}
map.put(service.getInterface(), service);
}
list = new ArrayList<Service>();
for (Map.Entry<Class, Service> entry : map.entrySet()) {
list.add(entry.getValue());
}
return list;
} catch (RuntimeException ex) {
throw new ServerException(ServerException.ERROR.S08, ex.getMessage(), ex);
}
} | 3.68 |
flink_SocketClientSink_invoke | /**
* Called when new data arrives to the sink, and forwards it to Socket.
*
* @param value The value to write to the socket.
*/
@Override
public void invoke(IN value) throws Exception {
byte[] msg = schema.serialize(value);
try {
outputStream.write(msg);
if (autoFlush) {
outputStream.flush();
}
} catch (IOException e) {
// if no re-tries are enable, fail immediately
if (maxNumRetries == 0) {
throw new IOException(
"Failed to send message '"
+ value
+ "' to socket server at "
+ hostName
+ ":"
+ port
+ ". Connection re-tries are not enabled.",
e);
}
LOG.error(
"Failed to send message '"
+ value
+ "' to socket server at "
+ hostName
+ ":"
+ port
+ ". Trying to reconnect...",
e);
// do the retries in locked scope, to guard against concurrent close() calls
// note that the first re-try comes immediately, without a wait!
synchronized (lock) {
IOException lastException = null;
retries = 0;
while (isRunning && (maxNumRetries < 0 || retries < maxNumRetries)) {
// first, clean up the old resources
try {
if (outputStream != null) {
outputStream.close();
}
} catch (IOException ee) {
LOG.error("Could not close output stream from failed write attempt", ee);
}
try {
if (client != null) {
client.close();
}
} catch (IOException ee) {
LOG.error("Could not close socket from failed write attempt", ee);
}
// try again
retries++;
try {
// initialize a new connection
createConnection();
// re-try the write
outputStream.write(msg);
// success!
return;
} catch (IOException ee) {
lastException = ee;
LOG.error(
"Re-connect to socket server and send message failed. Retry time(s): "
+ retries,
ee);
}
// wait before re-attempting to connect
lock.wait(CONNECTION_RETRY_DELAY);
}
// throw an exception if the task is still running, otherwise simply leave the
// method
if (isRunning) {
throw new IOException(
"Failed to send message '"
+ value
+ "' to socket server at "
+ hostName
+ ":"
+ port
+ ". Failed after "
+ retries
+ " retries.",
lastException);
}
}
}
} | 3.68 |
hadoop_Quota_aggregateQuota | /**
* Aggregate quota that queried from sub-clusters.
* @param path Federation path of the results.
* @param results Quota query result.
* @return Aggregated Quota.
*/
QuotaUsage aggregateQuota(String path,
Map<RemoteLocation, QuotaUsage> results) throws IOException {
long nsCount = 0;
long ssCount = 0;
long[] typeCount = new long[StorageType.values().length];
long nsQuota = HdfsConstants.QUOTA_RESET;
long ssQuota = HdfsConstants.QUOTA_RESET;
long[] typeQuota = new long[StorageType.values().length];
eachByStorageType(t -> typeQuota[t.ordinal()] = HdfsConstants.QUOTA_RESET);
boolean hasQuotaUnset = false;
boolean isMountEntry = isMountEntry(path);
for (Map.Entry<RemoteLocation, QuotaUsage> entry : results.entrySet()) {
RemoteLocation loc = entry.getKey();
QuotaUsage usage = entry.getValue();
if (isMountEntry) {
nsCount += usage.getFileAndDirectoryCount();
ssCount += usage.getSpaceConsumed();
eachByStorageType(
t -> typeCount[t.ordinal()] += usage.getTypeConsumed(t));
} else if (usage != null) {
// If quota is not set in real FileSystem, the usage
// value will return -1.
if (!RouterQuotaManager.isQuotaSet(usage)) {
hasQuotaUnset = true;
}
nsQuota = usage.getQuota();
ssQuota = usage.getSpaceQuota();
eachByStorageType(t -> typeQuota[t.ordinal()] = usage.getTypeQuota(t));
nsCount += usage.getFileAndDirectoryCount();
ssCount += usage.getSpaceConsumed();
eachByStorageType(
t -> typeCount[t.ordinal()] += usage.getTypeConsumed(t));
LOG.debug("Get quota usage for path: nsId: {}, dest: {},"
+ " nsCount: {}, ssCount: {}, typeCount: {}.",
loc.getNameserviceId(), loc.getDest(),
usage.getFileAndDirectoryCount(), usage.getSpaceConsumed(),
usage.toString(false, true, Arrays.asList(StorageType.values())));
}
}
if (isMountEntry) {
QuotaUsage quota = getGlobalQuota(path);
nsQuota = quota.getQuota();
ssQuota = quota.getSpaceQuota();
eachByStorageType(t -> typeQuota[t.ordinal()] = quota.getTypeQuota(t));
}
QuotaUsage.Builder builder =
new QuotaUsage.Builder().fileAndDirectoryCount(nsCount)
.spaceConsumed(ssCount).typeConsumed(typeCount);
if (hasQuotaUnset) {
builder.quota(HdfsConstants.QUOTA_RESET)
.spaceQuota(HdfsConstants.QUOTA_RESET);
eachByStorageType(t -> builder.typeQuota(t, HdfsConstants.QUOTA_RESET));
} else {
builder.quota(nsQuota).spaceQuota(ssQuota);
eachByStorageType(t -> builder.typeQuota(t, typeQuota[t.ordinal()]));
}
return builder.build();
} | 3.68 |
hbase_ProcedureMember_controllerConnectionFailure | /**
* The connection to the rest of the procedure group (member and coordinator) has been
* broken/lost/failed. This should fail any interested subprocedure, but not attempt to notify
* other members since we cannot reach them anymore.
* @param message description of the error
* @param cause the actual cause of the failure
* @param procName the name of the procedure we'd cancel due to the error.
*/
public void controllerConnectionFailure(final String message, final Throwable cause,
final String procName) {
LOG.error(message, cause);
if (procName == null) {
return;
}
Subprocedure toNotify = subprocs.get(procName);
if (toNotify != null) {
toNotify.cancel(message, cause);
}
} | 3.68 |
framework_VColorPickerArea_getText | /**
* Gets the caption's contents as text.
*
* @return the caption's text
*/
@Override
public String getText() {
return caption.getText();
} | 3.68 |
flink_StringData_fromString | /** Creates an instance of {@link StringData} from the given {@link String}. */
static StringData fromString(String str) {
return BinaryStringData.fromString(str);
} | 3.68 |
flink_CompactingHashTable_fillCache | /**
* utility function that inserts all entries from a bucket and its overflow buckets into the
* cache
*
* @return true if last bucket was not reached yet
* @throws IOException
*/
private boolean fillCache() throws IOException {
if (currentBucketIndex >= table.numBuckets) {
return false;
}
MemorySegment bucket = table.buckets[currentSegmentIndex];
// get the basic characteristics of the bucket
final int partitionNumber = bucket.get(currentBucketOffset + HEADER_PARTITION_OFFSET);
final InMemoryPartition<T> partition = table.partitions.get(partitionNumber);
final MemorySegment[] overflowSegments = partition.overflowSegments;
int countInSegment = bucket.getInt(currentBucketOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
int posInSegment = currentBucketOffset + BUCKET_POINTER_START_OFFSET;
int bucketOffset = currentBucketOffset;
// loop over all segments that are involved in the bucket (original bucket plus overflow
// buckets)
while (true) {
while (numInSegment < countInSegment) {
long pointer = bucket.getLong(posInSegment);
posInSegment += POINTER_LEN;
numInSegment++;
T target = table.buildSideSerializer.createInstance();
try {
target = partition.readRecordAt(pointer, target);
cache.add(target);
} catch (IOException e) {
throw new RuntimeException(
"Error deserializing record from the Hash Table: " + e.getMessage(),
e);
}
}
// this segment is done. check if there is another chained bucket
final long forwardPointer = bucket.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
break;
}
final int overflowSegNum = (int) (forwardPointer >>> 32);
bucket = overflowSegments[overflowSegNum];
bucketOffset = (int) forwardPointer;
countInSegment = bucket.getInt(bucketOffset + HEADER_COUNT_OFFSET);
posInSegment = bucketOffset + BUCKET_POINTER_START_OFFSET;
numInSegment = 0;
}
currentBucketIndex++;
if (currentBucketIndex % bucketsPerSegment == 0) {
currentSegmentIndex++;
currentBucketOffset = 0;
} else {
currentBucketOffset += HASH_BUCKET_SIZE;
}
return true;
} | 3.68 |
morf_DataValueLookup_defaultHashCode | /**
* Default hashCode implementation for instances.
*
* @param obj The object.
* @return The hashCode.
*/
public static int defaultHashCode(DataValueLookup obj) {
final int prime = 31;
int result = 1;
for (DataValue value : obj.getValues()) {
result = prime * result + value.hashCode();
}
return result;
} | 3.68 |
morf_SqlParameter_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SqlParameter other = (SqlParameter) obj;
return new EqualsBuilder()
.appendSuper(super.equals(obj))
.append(name, other.name)
.append(scale, other.scale)
.append(width, other.width)
.append(type, other.type)
.isEquals();
} | 3.68 |
framework_SelectAllConstantViewport_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "The scroll position of a table with many items should remain constant if all items are selected.";
} | 3.68 |
hbase_HRegion_batchMutate | /**
* Perform a batch of mutations.
* <p/>
* Operations in a batch are stored with highest durability specified of for all operations in a
* batch, except for {@link Durability#SKIP_WAL}.
* <p/>
* This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with
* {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[])} with
* {@link MutationBatchOperation} instance as an argument. As the processing of replay batch and
* mutation batch is very similar, lot of code is shared by providing generic methods in base
* class {@link BatchOperation}. The logic for this method and
* {@link #doMiniBatchMutate(BatchOperation)} is implemented using methods in base class which are
* overridden by derived classes to implement special behavior.
* @param batchOp contains the list of mutations
* @return an array of OperationStatus which internally contains the OperationStatusCode and the
* exceptionMessage if any.
* @throws IOException if an IO problem is encountered
*/
private OperationStatus[] batchMutate(BatchOperation<?> batchOp) throws IOException {
boolean initialized = false;
batchOp.startRegionOperation();
try {
while (!batchOp.isDone()) {
if (!batchOp.isInReplay()) {
checkReadOnly();
}
checkResources();
if (!initialized) {
this.writeRequestsCount.add(batchOp.size());
// validate and prepare batch for write, for MutationBatchOperation it also calls CP
// prePut()/preDelete()/preIncrement()/preAppend() hooks
batchOp.checkAndPrepare();
initialized = true;
}
doMiniBatchMutate(batchOp);
requestFlushIfNeeded();
}
} finally {
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this, batchOp.size());
}
batchOp.closeRegionOperation();
}
return batchOp.retCodeDetails;
} | 3.68 |
framework_MenuBarFocus_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final MenuBar bar = buildMenu();
Button focusButton = buildButton(bar);
addComponent(bar);
addComponent(focusButton);
getLayout().setSpacing(true);
} | 3.68 |
dubbo_LfuCache_get | /**
* API to return stored value using a key against the calling thread specific store.
* @param key Unique identifier for cache lookup
* @return Return stored object against key
*/
@SuppressWarnings("unchecked")
@Override
public Object get(Object key) {
return store.get(key);
} | 3.68 |
flink_MemoryMappedBoundedData_getSize | /**
* Gets the number of bytes of all written data (including the metadata in the buffer headers).
*/
@Override
public long getSize() {
long size = 0L;
for (ByteBuffer bb : fullBuffers) {
size += bb.remaining();
}
if (currentBuffer != null) {
size += currentBuffer.position();
}
return size;
} | 3.68 |
hibernate-validator_TypeResolutionHelper_getTypeResolver | /**
* @return the typeResolver
*/
public TypeResolver getTypeResolver() {
return typeResolver;
} | 3.68 |
hmily_DateUtils_parseDate | /**
* Parse date string.
*
* @param date the date
* @return the string
*/
public static String parseDate(final Date date) {
Instant instant = date.toInstant();
ZoneId zone = ZoneId.systemDefault();
LocalDateTime localDateTime = LocalDateTime.ofInstant(instant, zone);
return formatLocalDateTime(localDateTime);
} | 3.68 |
querydsl_JTSGeometryExpression_distance | /**
* Returns the shortest distance between any two Points in the two geometric objects as
* calculated in the spatial reference system of this geometric object. Because the geometries
* are closed, it is possible to find a point on each geometric object involved, such that the
* distance between these 2 points is the returned distance between their geometric objects.
*
* @param geometry other geometry
* @return distance
*/
public NumberExpression<Double> distance(Expression<? extends Geometry> geometry) {
return Expressions.numberOperation(Double.class, SpatialOps.DISTANCE, mixin, geometry);
} | 3.68 |
flink_AbstractStreamOperatorV2_getRuntimeContext | /**
* Returns a context that allows the operator to query information about the execution and also
* to interact with systems such as broadcast variables and managed state. This also allows to
* register timers.
*/
public StreamingRuntimeContext getRuntimeContext() {
return runtimeContext;
} | 3.68 |
flink_BatchTask_openChainedTasks | /**
* Opens all chained tasks, in the order as they are stored in the array. The opening process
* creates a standardized log info message.
*
* @param tasks The tasks to be opened.
* @param parent The parent task, used to obtain parameters to include in the log message.
* @throws Exception Thrown, if the opening encounters an exception.
*/
public static void openChainedTasks(List<ChainedDriver<?, ?>> tasks, AbstractInvokable parent)
throws Exception {
// start all chained tasks
for (ChainedDriver<?, ?> task : tasks) {
if (LOG.isDebugEnabled()) {
LOG.debug(constructLogString("Start task code", task.getTaskName(), parent));
}
task.openTask();
}
} | 3.68 |
hudi_AvroInternalSchemaConverter_convertToField | /** Convert an avro schema into internal type. */
public static Type convertToField(Schema schema) {
return buildTypeFromAvroSchema(schema);
} | 3.68 |
hbase_EncodedDataBlock_getSize | /**
* Find the size of minimal buffer that could store compressed data.
* @return Size in bytes of compressed data.
*/
public int getSize() {
return getEncodedData().length;
} | 3.68 |
hadoop_TimelineWriteResponse_addError | /**
* Add a single {@link TimelineWriteError} instance into the existing list.
*
* @param error
* a single {@link TimelineWriteError} instance
*/
public void addError(TimelineWriteError error) {
errors.add(error);
} | 3.68 |
morf_Deployment_writeUpgradeSteps | /**
* Add an upgrade step for each upgrade step.
*
* <p>The {@link Deployment} class ensures that all contributed domain tables are written in the database, but we need to
* ensure that the upgrade steps that were used to create are in written so that next time the application is run the upgrade
* doesn't try to recreate/upgrade them.</p>
*
* @param upgradeSteps All available upgrade steps.
* @param upgradePath Recipient for the deployment statements.
*/
private void writeUpgradeSteps(Collection<Class<? extends UpgradeStep>> upgradeSteps, UpgradePath upgradePath) {
for(Class<? extends UpgradeStep> upgradeStep : upgradeSteps) {
UUID uuid = UpgradePathFinder.readUUID(upgradeStep);
InsertStatement insertStatement = AuditRecordHelper.createAuditInsertStatement(uuid, upgradeStep.getName());
upgradePath.writeSql(connectionResources.sqlDialect().convertStatementToSQL(insertStatement));
}
} | 3.68 |
dubbo_RpcUtils_attachInvocationIdIfAsync | /**
* Idempotent operation: invocation id will be added in async operation by default
*
* @param url
* @param inv
*/
public static void attachInvocationIdIfAsync(URL url, Invocation inv) {
if (isAttachInvocationId(url, inv) && getInvocationId(inv) == null && inv instanceof RpcInvocation) {
inv.setAttachment(ID_KEY, String.valueOf(INVOKE_ID.getAndIncrement()));
}
} | 3.68 |
hbase_MiniHBaseCluster_flushcache | /**
* Call flushCache on all regions of the specified table.
*/
public void flushcache(TableName tableName) throws IOException {
for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
if (r.getTableDescriptor().getTableName().equals(tableName)) {
executeFlush(r);
}
}
}
} | 3.68 |
hbase_HBaseTestingUtility_moveRegionAndWait | /**
* Move region to destination server and wait till region is completely moved and online
* @param destRegion region to move
* @param destServer destination server of the region
*/
public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
throws InterruptedException, IOException {
HMaster master = getMiniHBaseCluster().getMaster();
// TODO: Here we start the move. The move can take a while.
getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer);
while (true) {
ServerName serverName =
master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion);
if (serverName != null && serverName.equals(destServer)) {
assertRegionOnServer(destRegion, serverName, 2000);
break;
}
Thread.sleep(10);
}
} | 3.68 |
hbase_EncryptionTest_testKeyProvider | /**
* Check that the configured key provider can be loaded and initialized, or throw an exception.
*/
public static void testKeyProvider(final Configuration conf) throws IOException {
String providerClassName =
conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
Boolean result = keyProviderResults.get(providerClassName);
if (result == null) {
try {
Encryption.getKeyProvider(conf);
keyProviderResults.put(providerClassName, true);
} catch (Exception e) { // most likely a RuntimeException
keyProviderResults.put(providerClassName, false);
throw new IOException(
"Key provider " + providerClassName + " failed test: " + e.getMessage(), e);
}
} else if (!result) {
throw new IOException("Key provider " + providerClassName + " previously failed test");
}
} | 3.68 |
rocketmq-connect_RecordOffsetManagement_awaitAllMessages | /**
* await all messages
*
* @param timeout
* @param timeUnit
* @return
*/
public boolean awaitAllMessages(long timeout, TimeUnit timeUnit) {
// Create a new message drain latch as a local variable to avoid SpotBugs warnings about inconsistent synchronization
// on an instance variable when invoking CountDownLatch::await outside a synchronized block
CountDownLatch messageDrainLatch;
synchronized (this) {
messageDrainLatch = new CountDownLatch(numUnackedMessages.get());
this.messageDrainLatch = messageDrainLatch;
}
try {
return messageDrainLatch.await(timeout, timeUnit);
} catch (InterruptedException e) {
return false;
}
} | 3.68 |
hadoop_FileSystemReleaseFilter_doFilter | /**
* It delegates the incoming request to the <code>FilterChain</code>, and
* at its completion (in a finally block) releases the filesystem instance
* back to the {@link FileSystemAccess} service.
*
* @param servletRequest servlet request.
* @param servletResponse servlet response.
* @param filterChain filter chain.
*
* @throws IOException thrown if an IO error occurs.
* @throws ServletException thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
throws IOException, ServletException {
try {
filterChain.doFilter(servletRequest, servletResponse);
} finally {
FileSystem fs = FILE_SYSTEM_TL.get();
if (fs != null) {
FILE_SYSTEM_TL.remove();
getFileSystemAccess().releaseFileSystem(fs);
}
}
} | 3.68 |
hbase_OrderedBytes_decodeBlobCopy | /**
* Decode a Blob value, byte-for-byte copy.
* @see #encodeBlobCopy(PositionedByteRange, byte[], int, int, Order)
*/
public static byte[] decodeBlobCopy(PositionedByteRange src) {
byte header = src.get();
if (header == NULL || header == DESCENDING.apply(NULL)) {
return null;
}
assert header == BLOB_COPY || header == DESCENDING.apply(BLOB_COPY);
Order ord = header == BLOB_COPY ? ASCENDING : DESCENDING;
final int length = src.getRemaining() - (ASCENDING == ord ? 0 : 1);
byte[] ret = new byte[length];
src.get(ret);
ord.apply(ret, 0, ret.length);
// DESCENDING ordered BlobCopy requires a termination bit to preserve
// sort-order semantics of null values.
if (DESCENDING == ord) src.get();
return ret;
} | 3.68 |
hadoop_ServerWebApp_contextInitialized | /**
* Initializes the <code>ServletContextListener</code> which initializes
* the Server.
*
* @param event servelt context event.
*/
@Override
public void contextInitialized(ServletContextEvent event) {
try {
init();
} catch (ServerException ex) {
event.getServletContext().log("ERROR: " + ex.getMessage());
throw new RuntimeException(ex);
}
} | 3.68 |
morf_AlteredTable_columns | /**
* @see org.alfasoftware.morf.metadata.Table#columns()
*/
@Override
public List<Column> columns() {
return columns;
} | 3.68 |
flink_ParameterTool_mergeWith | /**
* Merges two {@link ParameterTool}.
*
* @param other Other {@link ParameterTool} object
* @return The Merged {@link ParameterTool}
*/
public ParameterTool mergeWith(ParameterTool other) {
final Map<String, String> resultData =
CollectionUtil.newHashMapWithExpectedSize(data.size() + other.data.size());
resultData.putAll(data);
resultData.putAll(other.data);
final ParameterTool ret = new ParameterTool(resultData);
final HashSet<String> requestedParametersLeft = new HashSet<>(data.keySet());
requestedParametersLeft.removeAll(unrequestedParameters);
final HashSet<String> requestedParametersRight = new HashSet<>(other.data.keySet());
requestedParametersRight.removeAll(other.unrequestedParameters);
ret.unrequestedParameters.removeAll(requestedParametersLeft);
ret.unrequestedParameters.removeAll(requestedParametersRight);
return ret;
} | 3.68 |
hadoop_DistributedCache_setArchiveTimestamps | /**
* This is to check the timestamp of the archives to be localized.
* Used by internal MapReduce code.
* @param conf Configuration which stores the timestamp's
* @param timestamps comma separated list of timestamps of archives.
* The order should be the same as the order in which the archives are added.
*/
@Deprecated
public static void setArchiveTimestamps(Configuration conf, String timestamps) {
conf.set(CACHE_ARCHIVES_TIMESTAMPS, timestamps);
} | 3.68 |
framework_CssLayout_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#writeDesign(org.jsoup.nodes.Element
* , com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
// write default attributes
super.writeDesign(design, designContext);
CssLayout def = designContext.getDefaultInstance(this);
// handle children
if (!designContext.shouldWriteChildren(this, def)) {
return;
}
Element designElement = design;
for (Component child : this) {
Element childNode = designContext.createElement(child);
designElement.appendChild(childNode);
}
} | 3.68 |
hudi_HoodieLogBlock_deflate | /**
* After the content bytes is converted into the required DataStructure by a logBlock, deflate the content to release
* byte [] and relieve memory pressure when GC kicks in. NOTE: This still leaves the heap fragmented
*/
protected void deflate() {
content = Option.empty();
} | 3.68 |
hudi_CleanPlanner_getPartitionPathsForCleanByCommits | /**
* Return partition paths for cleaning by commits mode.
* @param instantToRetain Earliest Instant to retain
* @return list of partitions
* @throws IOException
*/
private List<String> getPartitionPathsForCleanByCommits(Option<HoodieInstant> instantToRetain) throws IOException {
if (!instantToRetain.isPresent()) {
LOG.info("No earliest commit to retain. No need to scan partitions !!");
return Collections.emptyList();
}
if (config.incrementalCleanerModeEnabled()) {
Option<HoodieInstant> lastClean = hoodieTable.getCleanTimeline().filterCompletedInstants().lastInstant();
if (lastClean.isPresent()) {
if (hoodieTable.getActiveTimeline().isEmpty(lastClean.get())) {
hoodieTable.getActiveTimeline().deleteEmptyInstantIfExists(lastClean.get());
} else {
HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils
.deserializeHoodieCleanMetadata(hoodieTable.getActiveTimeline().getInstantDetails(lastClean.get()).get());
if ((cleanMetadata.getEarliestCommitToRetain() != null)
&& (cleanMetadata.getEarliestCommitToRetain().length() > 0)
&& !hoodieTable.getActiveTimeline().getCommitsTimeline().isBeforeTimelineStarts(cleanMetadata.getEarliestCommitToRetain())) {
return getPartitionPathsForIncrementalCleaning(cleanMetadata, instantToRetain);
}
}
}
}
return getPartitionPathsForFullCleaning();
} | 3.68 |
hudi_BaseHoodieWriteClient_preCommit | /**
* Any pre-commit actions like conflict resolution goes here.
* @param inflightInstant instant of inflight operation.
* @param metadata commit metadata for which pre commit is being invoked.
*/
protected void preCommit(HoodieInstant inflightInstant, HoodieCommitMetadata metadata) {
// Create a Hoodie table after startTxn which encapsulated the commits and files visible.
// Important to create this after the lock to ensure the latest commits show up in the timeline without need for reload
HoodieTable table = createTable(config, hadoopConf);
resolveWriteConflict(table, metadata, this.pendingInflightAndRequestedInstants);
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations4 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
* <p>
* Bracket should be generated for subexpression "b+100". Even without explicit
* {@link org.alfasoftware.morf.sql.SqlUtils#bracket(MathsField)} call.
* </p>
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations4() {
String result = testDialect.getSqlFrom(field("a").divideBy(field("b").plus(literal(100))));
assertEquals(expectedSqlForMathOperations4(), result);
} | 3.68 |
morf_SqlDialect_getSqlForTrim | /**
* Converts the TRIM function into SQL.
*
* @param function the function to convert.
* @return a string representation of the SQL.
*/
protected String getSqlForTrim(Function function) {
return "TRIM(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
hbase_TraceUtil_tracedFuture | /**
* Trace an asynchronous operation.
*/
public static <T> CompletableFuture<T> tracedFuture(Supplier<CompletableFuture<T>> action,
String spanName) {
Span span = createSpan(spanName);
try (Scope ignored = span.makeCurrent()) {
CompletableFuture<T> future = action.get();
endSpan(future, span);
return future;
}
} | 3.68 |
framework_UIDL_getStringArrayVariableAsSet | /**
* Gets the value of the named String[] variable as a Set of Strings.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public Set<String> getStringArrayVariableAsSet(final String name) {
final HashSet<String> s = new HashSet<>();
JsArrayString a = var().getJSStringArray(name);
for (int i = 0; i < a.length(); i++) {
s.add(a.get(i));
}
return s;
} | 3.68 |
flink_ShuffleMaster_unregisterJob | /**
* Unregisters the target job from this shuffle master, which means the corresponding job has
* reached a global termination state and all the allocated resources except for the cluster
* partitions can be cleared.
*
* @param jobID ID of the target job to be unregistered.
*/
default void unregisterJob(JobID jobID) {} | 3.68 |
hadoop_RMStateStoreUtils_readRMDelegationTokenIdentifierData | /**
* Returns the RM Delegation Token data from the {@link DataInputStream} as a
* {@link RMDelegationTokenIdentifierData}. It can handle both the current
* and old (non-protobuf) formats.
*
* @param fsIn The {@link DataInputStream} containing RM Delegation Token data
* @return An {@link RMDelegationTokenIdentifierData} containing the read in
* RM Delegation Token
* @throws IOException an I/O exception has occurred.
*/
public static RMDelegationTokenIdentifierData
readRMDelegationTokenIdentifierData(DataInputStream fsIn)
throws IOException {
RMDelegationTokenIdentifierData identifierData =
new RMDelegationTokenIdentifierData();
try {
identifierData.readFields(fsIn);
} catch (InvalidProtocolBufferException e) {
LOG.warn("Recovering old formatted token");
fsIn.reset();
YARNDelegationTokenIdentifier identifier =
new RMDelegationTokenIdentifier();
identifier.readFieldsInOldFormat(fsIn);
identifierData.setIdentifier(identifier);
identifierData.setRenewDate(fsIn.readLong());
}
return identifierData;
} | 3.68 |
zxing_GeoParsedResult_getLongitude | /**
* @return longitude in degrees
*/
public double getLongitude() {
return longitude;
} | 3.68 |
hbase_Timer_updateMillis | /**
* Update the timer with the given duration in milliseconds
* @param durationMillis the duration of the event in ms
*/
default void updateMillis(long durationMillis) {
update(durationMillis, TimeUnit.NANOSECONDS);
} | 3.68 |
hbase_BackupAdminImpl_finalizeDelete | /**
* Updates incremental backup set for every backupRoot
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
* @param table backup system table
* @throws IOException if a table operation fails
*/
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
throws IOException {
for (String backupRoot : tablesMap.keySet()) {
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableMap =
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
if (entry.getValue() == null) {
// No more backups for a table
incrTableSet.remove(entry.getKey());
}
}
if (!incrTableSet.isEmpty()) {
table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
} else { // empty
table.deleteIncrementalBackupTableSet(backupRoot);
}
}
} | 3.68 |
hudi_SchedulerConfGenerator_getSparkSchedulingConfigs | /**
* Helper to set Spark Scheduling Configs dynamically.
*
* @param cfg Config for HoodieDeltaStreamer
*/
public static Map<String, String> getSparkSchedulingConfigs(HoodieStreamer.Config cfg) throws Exception {
scala.Option<String> scheduleModeKeyOption = new SparkConf().getOption(SPARK_SCHEDULER_MODE_KEY);
final Option<String> sparkSchedulerMode =
scheduleModeKeyOption.isDefined() ? Option.of(scheduleModeKeyOption.get()) : Option.empty();
Map<String, String> additionalSparkConfigs = new HashMap<>(1);
if (sparkSchedulerMode.isPresent() && SPARK_SCHEDULER_FAIR_MODE.equals(sparkSchedulerMode.get())
&& cfg.continuousMode && cfg.tableType.equals(HoodieTableType.MERGE_ON_READ.name())) {
String sparkSchedulingConfFile = generateAndStoreConfig(cfg.deltaSyncSchedulingWeight,
cfg.compactSchedulingWeight, cfg.deltaSyncSchedulingMinShare, cfg.compactSchedulingMinShare,
cfg.clusterSchedulingWeight, cfg.clusterSchedulingMinShare);
LOG.warn("Spark scheduling config file " + sparkSchedulingConfFile);
additionalSparkConfigs.put(SparkConfigs.SPARK_SCHEDULER_ALLOCATION_FILE_KEY(), sparkSchedulingConfFile);
} else {
LOG.warn("Job Scheduling Configs will not be in effect as spark.scheduler.mode "
+ "is not set to FAIR at instantiation time. Continuing without scheduling configs");
}
return additionalSparkConfigs;
} | 3.68 |
hibernate-validator_PlatformResourceBundleLocator_determineAvailabilityOfResourceBundleControl | /**
* Check whether ResourceBundle.Control is available, which is needed for bundle aggregation. If not, we'll skip
* resource aggregation.
* <p>
* It is *not* available
* <ul>
* <li>in the Google App Engine environment</li>
* <li>when running HV as Java 9 named module (which would be the case when adding a module-info descriptor to the
* HV JAR)</li>
* </ul>
*
* @see <a href="http://code.google.com/appengine/docs/java/jrewhitelist.html">GAE JRE whitelist</a>
* @see <a href="https://hibernate.atlassian.net/browse/HV-1023">HV-1023</a>
* @see <a href="http://download.java.net/java/jdk9/docs/api/java/util/ResourceBundle.Control.html">ResourceBundle.Control</a>
*/
private static boolean determineAvailabilityOfResourceBundleControl() {
try {
ResourceBundle.Control dummyControl = AggregateResourceBundleControl.CONTROL;
if ( dummyControl == null ) {
return false;
}
Method getModule = run( GetMethod.action( Class.class, "getModule" ) );
// not on Java 9
if ( getModule == null ) {
return true;
}
// on Java 9, check whether HV is a named module
Object module = getModule.invoke( PlatformResourceBundleLocator.class );
Method isNamedMethod = run( GetMethod.action( module.getClass(), "isNamed" ) );
boolean isNamed = (Boolean) isNamedMethod.invoke( module );
return !isNamed;
}
catch (Throwable e) {
LOG.info( MESSAGES.unableToUseResourceBundleAggregation() );
return false;
}
} | 3.68 |
hadoop_TFile_compareTo | /**
* Compare an entry with a RawComparable object. This is useful when
* Entries are stored in a collection, and we want to compare a user
* supplied key.
*/
@Override
public int compareTo(RawComparable key) {
return reader.compareKeys(keyBuffer, 0, getKeyLength(), key.buffer(),
key.offset(), key.size());
} | 3.68 |
hbase_AsyncBufferedMutatorBuilder_disableWriteBufferPeriodicFlush | /**
* Disable the periodical flush, i.e, set the timeout to 0.
*/
default AsyncBufferedMutatorBuilder disableWriteBufferPeriodicFlush() {
return setWriteBufferPeriodicFlush(0, TimeUnit.NANOSECONDS);
} | 3.68 |
hbase_BitSetNode_canGrow | // ========================================================================
// Grow/Merge Helpers
// ========================================================================
public boolean canGrow(long procId) {
if (procId <= start) {
return getEnd() - procId < MAX_NODE_SIZE;
} else {
return procId - start < MAX_NODE_SIZE;
}
} | 3.68 |
framework_StaticSection_getCell | /**
* Returns the cell in this section that corresponds to the given
* column.
*
* @param column
* the column
* @return the cell for the given column
*
* @throws IllegalArgumentException
* if no cell was found for the column
*/
public CELL getCell(Column<?, ?> column) {
return internalGetCell(section.getInternalIdForColumn(column));
} | 3.68 |
hadoop_FederationPolicyInitializationContext_getHomeSubcluster | /**
* Returns the current home sub-cluster. Useful for default policy behaviors.
*
* @return the home sub-cluster.
*/
public SubClusterId getHomeSubcluster() {
return homeSubcluster;
} | 3.68 |
flink_BinaryArrayWriter_reset | /** First, reset. */
@Override
public void reset() {
this.cursor = fixedSize;
for (int i = 0; i < nullBitsSizeInBytes; i += 8) {
segment.putLong(i, 0L);
}
this.segment.putInt(0, numElements);
} | 3.68 |
hadoop_AbfsHttpOperation_getConnResponseMessage | /**
* Gets the connection response message.
* @return response message.
* @throws IOException
*/
String getConnResponseMessage() throws IOException {
return connection.getResponseMessage();
} | 3.68 |
querydsl_JPAExpressions_type | /**
* Create a type(path) expression
*
* @param path entity
* @return type(path)
*/
public static StringExpression type(EntityPath<?> path) {
return Expressions.stringOperation(JPQLOps.TYPE, path);
} | 3.68 |
hbase_ReflectionUtils_logThreadInfo | /**
* Log the current thread stacks at INFO level.
* @param log the logger that logs the stack trace
* @param title a descriptive title for the call stacks
* @param minInterval the minimum time from the last
*/
public static void logThreadInfo(Logger log, String title, long minInterval) {
boolean dumpStack = false;
if (log.isInfoEnabled()) {
synchronized (ReflectionUtils.class) {
long now = EnvironmentEdgeManager.currentTime();
if (now - previousLogTime >= minInterval * 1000) {
previousLogTime = now;
dumpStack = true;
}
}
if (dumpStack) {
try {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title);
log.info(buffer.toString(Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException ignored) {
log.warn(
"Could not write thread info about '" + title + "' due to a string encoding issue.");
}
}
}
} | 3.68 |
pulsar_ConsumerConfiguration_getMessageListener | /**
* @return the configured {@link MessageListener} for the consumer
*/
public MessageListener<byte[]> getMessageListener() {
return messageListener;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.