name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_DatabaseMetaDataProvider_loadAllTableNames | /**
* Creates a map of all table names,
* indexed by their case-agnostic names.
*
* @return Map of real table names.
*/
protected Map<AName, RealName> loadAllTableNames() {
final ImmutableMap.Builder<AName, RealName> tableNameMappings = ImmutableMap.builder();
try {
final DatabaseMetaData databaseMetaData = connection.getMetaData();
try (ResultSet tableResultSet = databaseMetaData.getTables(null, schemaName, null, tableTypesForTables())) {
while (tableResultSet.next()) {
RealName tableName = readTableName(tableResultSet);
try {
String tableSchemaName = tableResultSet.getString(TABLE_SCHEM);
String tableType = tableResultSet.getString(TABLE_TYPE);
boolean systemTable = isSystemTable(tableName);
boolean ignoredTable = isIgnoredTable(tableName);
if (log.isDebugEnabled()) {
log.debug("Found table [" + tableName + "] of type [" + tableType + "] in schema [" + tableSchemaName + "]"
+ (systemTable ? " - SYSTEM TABLE" : "") + (ignoredTable ? " - IGNORED" : ""));
}
if (!systemTable && !ignoredTable) {
tableNameMappings.put(tableName, tableName);
}
}
catch (SQLException e) {
throw new RuntimeSqlException("Error reading metadata for table ["+tableName+"]", e);
}
}
return tableNameMappings.build();
}
}
catch (SQLException e) {
throw new RuntimeSqlException(e);
}
} | 3.68 |
hadoop_UnmanagedApplicationManager_getAMRMClientRelayer | /**
* Returns the rmProxy relayer of this UAM.
*
* @return rmProxy relayer of the UAM
*/
public AMRMClientRelayer getAMRMClientRelayer() {
return this.rmProxyRelayer;
} | 3.68 |
hbase_CacheConfig_shouldEvictOnClose | /**
* @return true if blocks should be evicted from the cache when an HFile reader is closed, false
* if not
*/
public boolean shouldEvictOnClose() {
return this.evictOnClose;
} | 3.68 |
hbase_Scan_addFamily | /**
* Get all columns from the specified family.
* <p>
* Overrides previous calls to addColumn for this family.
* @param family family name
*/
public Scan addFamily(byte[] family) {
familyMap.remove(family);
familyMap.put(family, null);
return this;
} | 3.68 |
flink_AdaptiveScheduler_computeReactiveModeVertexParallelismStore | /**
* Creates the parallelism store for a set of vertices, optionally with a flag to leave the
* vertex parallelism unchanged. If the flag is set, the parallelisms must be valid for
* execution.
*
* <p>We need to set parallelism to the max possible value when requesting resources, but when
* executing the graph we should respect what we are actually given.
*
* @param vertices The vertices to store parallelism information for
* @param adjustParallelism Whether to adjust the parallelism
* @param defaultMaxParallelismFunc a function for computing a default max parallelism if none
* is specified on a given vertex
* @return The parallelism store.
*/
@VisibleForTesting
static VertexParallelismStore computeReactiveModeVertexParallelismStore(
Iterable<JobVertex> vertices,
Function<JobVertex, Integer> defaultMaxParallelismFunc,
boolean adjustParallelism) {
DefaultVertexParallelismStore store = new DefaultVertexParallelismStore();
for (JobVertex vertex : vertices) {
// if no max parallelism was configured by the user, we calculate and set a default
final int maxParallelism =
vertex.getMaxParallelism() == JobVertex.MAX_PARALLELISM_DEFAULT
? defaultMaxParallelismFunc.apply(vertex)
: vertex.getMaxParallelism();
// If the parallelism has already been adjusted, respect what has been configured in the
// vertex. Otherwise, scale it to the max parallelism to attempt to be "as parallel as
// possible"
final int parallelism;
if (adjustParallelism) {
parallelism = maxParallelism;
} else {
parallelism = vertex.getParallelism();
}
VertexParallelismInformation parallelismInfo =
new DefaultVertexParallelismInfo(
parallelism,
maxParallelism,
// Allow rescaling if the new desired max parallelism
// is not less than what was declared here during scheduling.
// This prevents the situation where more resources are requested
// based on the computed default, when actually fewer are necessary.
(newMax) ->
newMax >= maxParallelism
? Optional.empty()
: Optional.of(
"Cannot lower max parallelism in Reactive mode."));
store.setParallelismInfo(vertex.getID(), parallelismInfo);
}
return store;
} | 3.68 |
hmily_HmilyTacLocalParticipantExecutor_cancel | /**
* Do cancel.
*
* @param participant hmily participant
*/
public static void cancel(final HmilyParticipant participant) {
List<HmilyParticipantUndo> undoList = HmilyParticipantUndoCacheManager.getInstance().get(participant.getParticipantId());
for (HmilyParticipantUndo undo : undoList) {
boolean success = UndoHook.INSTANCE.run(undo);
if (success) {
cleanUndo(undo);
}
}
cleanHmilyParticipant(participant);
} | 3.68 |
flink_ExceptionUtils_tryEnrichOutOfMemoryError | /**
* Tries to enrich OutOfMemoryErrors being part of the passed root Throwable's cause tree.
*
* <p>This method improves error messages for direct and metaspace {@link OutOfMemoryError}. It
* adds description about the possible causes and ways of resolution.
*
* @param root The Throwable of which the cause tree shall be traversed.
* @param jvmMetaspaceOomNewErrorMessage The message being used for JVM metaspace-related
* OutOfMemoryErrors. Passing <code>null</code> will disable handling this class of error.
* @param jvmDirectOomNewErrorMessage The message being used for direct memory-related
* OutOfMemoryErrors. Passing <code>null</code> will disable handling this class of error.
* @param jvmHeapSpaceOomNewErrorMessage The message being used for Heap space-related
* OutOfMemoryErrors. Passing <code>null</code> will disable handling this class of error.
*/
public static void tryEnrichOutOfMemoryError(
@Nullable Throwable root,
@Nullable String jvmMetaspaceOomNewErrorMessage,
@Nullable String jvmDirectOomNewErrorMessage,
@Nullable String jvmHeapSpaceOomNewErrorMessage) {
updateDetailMessage(
root,
t -> {
if (isMetaspaceOutOfMemoryError(t)) {
return jvmMetaspaceOomNewErrorMessage;
} else if (isDirectOutOfMemoryError(t)) {
return jvmDirectOomNewErrorMessage;
} else if (isHeapSpaceOutOfMemoryError(t)) {
return jvmHeapSpaceOomNewErrorMessage;
}
return null;
});
} | 3.68 |
hadoop_ResourceUsageMetrics_setVirtualMemoryUsage | /**
* Set the virtual memory usage.
*/
public void setVirtualMemoryUsage(long usage) {
virtualMemoryUsage = usage;
} | 3.68 |
hbase_ScheduledChore_initialChore | /**
* Override to run a task before we start looping.
* @return true if initial chore was successful
*/
protected boolean initialChore() {
// Default does nothing
return true;
} | 3.68 |
framework_Table_getUpdatedRowCount | /**
* Subclass and override this to enable partial row updates, bypassing the
* normal caching and lazy loading mechanism. This is useful for updating
* the state of certain rows, e.g. in the TreeTable the collapsed state of a
* single node is updated using this mechanism.
*
* @return the number of rows to update, starting at the index returned by
* {@link #getFirstUpdatedItemIndex()}. For plain table it is always
* 0.
*/
protected int getUpdatedRowCount() {
return 0;
} | 3.68 |
rocketmq-connect_ConfigManagementService_configure | /**
* Configure class with the given key-value pairs
*
* @param config can be DistributedConfig or StandaloneConfig
*/
default void configure(WorkerConfig config) {
} | 3.68 |
framework_TouchScrollDelegate_isMoved | /**
* Has user moved the touch.
*
* @return
*/
public boolean isMoved() {
return moved;
} | 3.68 |
hbase_ClientTokenUtil_obtainAndCacheToken | /**
* Obtain an authentication token for the given user and add it to the user's credentials.
* @param conn The HBase cluster connection
* @param user The user for whom to obtain the token
* @throws IOException If making a remote call to the authentication service fails
* @throws InterruptedException If executing as the given user is interrupted
*/
public static void obtainAndCacheToken(final Connection conn, User user)
throws IOException, InterruptedException {
try {
Token<AuthenticationTokenIdentifier> token = obtainToken(conn, user);
if (token == null) {
throw new IOException("No token returned for user " + user.getName());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName());
}
user.addToken(token);
} catch (IOException | InterruptedException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new UndeclaredThrowableException(e,
"Unexpected exception obtaining token for user " + user.getName());
}
} | 3.68 |
hbase_HFileReaderImpl_getCachedBlock | /**
* Retrieve block from cache. Validates the retrieved block's type vs {@code expectedBlockType}
* and its encoding vs. {@code expectedDataBlockEncoding}. Unpacks the block as necessary.
*/
private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock,
boolean updateCacheMetrics, BlockType expectedBlockType,
DataBlockEncoding expectedDataBlockEncoding) throws IOException {
// Check cache for block. If found return.
BlockCache cache = cacheConf.getBlockCache().orElse(null);
if (cache != null) {
HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock,
updateCacheMetrics, expectedBlockType);
if (cachedBlock != null) {
if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) {
HFileBlock compressedBlock = cachedBlock;
cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader);
// In case of compressed block after unpacking we can release the compressed block
if (compressedBlock != cachedBlock) {
compressedBlock.release();
}
}
try {
validateBlockType(cachedBlock, expectedBlockType);
} catch (IOException e) {
returnAndEvictBlock(cache, cacheKey, cachedBlock);
throw e;
}
if (expectedDataBlockEncoding == null) {
return cachedBlock;
}
DataBlockEncoding actualDataBlockEncoding = cachedBlock.getDataBlockEncoding();
// Block types other than data blocks always have
// DataBlockEncoding.NONE. To avoid false negative cache misses, only
// perform this check if cached block is a data block.
if (
cachedBlock.getBlockType().isData()
&& !actualDataBlockEncoding.equals(expectedDataBlockEncoding)
) {
// This mismatch may happen if a Scanner, which is used for say a
// compaction, tries to read an encoded block from the block cache.
// The reverse might happen when an EncodedScanner tries to read
// un-encoded blocks which were cached earlier.
//
// Because returning a data block with an implicit BlockType mismatch
// will cause the requesting scanner to throw a disk read should be
// forced here. This will potentially cause a significant number of
// cache misses, so update so we should keep track of this as it might
// justify the work on a CompoundScanner.
if (
!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE)
&& !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)
) {
// If the block is encoded but the encoding does not match the
// expected encoding it is likely the encoding was changed but the
// block was not yet evicted. Evictions on file close happen async
// so blocks with the old encoding still linger in cache for some
// period of time. This event should be rare as it only happens on
// schema definition change.
LOG.info(
"Evicting cached block with key {} because data block encoding mismatch; "
+ "expected {}, actual {}, path={}",
cacheKey, actualDataBlockEncoding, expectedDataBlockEncoding, path);
// This is an error scenario. so here we need to release the block.
returnAndEvictBlock(cache, cacheKey, cachedBlock);
}
return null;
}
return cachedBlock;
}
}
return null;
} | 3.68 |
hbase_SegmentScanner_backwardSeek | /**
* Seek the scanner at or before the row of specified Cell, it firstly tries to seek the scanner
* at or after the specified Cell, return if peek KeyValue of scanner has the same row with
* specified Cell, otherwise seek the scanner at the first Cell of the row which is the previous
* row of specified KeyValue
* @param key seek Cell
* @return true if the scanner is at the valid KeyValue, false if such Cell does not exist
*/
@Override
public boolean backwardSeek(Cell key) throws IOException {
if (closed) {
return false;
}
seek(key); // seek forward then go backward
if (peek() == null || segment.compareRows(peek(), key) > 0) {
return seekToPreviousRow(key);
}
return true;
} | 3.68 |
hbase_VersionInfo_getUrl | /**
* Get the subversion URL for the root hbase directory.
* @return the url
*/
public static String getUrl() {
return Version.url;
} | 3.68 |
hadoop_Histogram_getCDF | /**
* Produces a discrete approximation of the CDF. The user provides the points
* on the {@code Y} axis he wants, and we give the corresponding points on the
* {@code X} axis, plus the minimum and maximum from the data.
*
* @param scale
* the denominator applied to every element of buckets. For example,
* if {@code scale} is {@code 1000}, a {@code buckets} element of 500
* will specify the median in that output slot.
* @param buckets
* an array of int, all less than scale and each strictly greater
* than its predecessor if any. We don't check these requirements.
* @return a {@code long[]}, with two more elements than {@code buckets} has.
* The first resp. last element is the minimum resp. maximum value
* that was ever {@code enter}ed. The rest of the elements correspond
* to the elements of {@code buckets} and carry the first element
* whose rank is no less than {@code #content elements * scale /
* bucket}.
*
*/
public long[] getCDF(int scale, int[] buckets) {
if (totalCount == 0) {
return null;
}
long[] result = new long[buckets.length + 2];
// fill in the min and the max
result[0] = content.firstEntry().getKey();
result[buckets.length + 1] = content.lastEntry().getKey();
Iterator<Map.Entry<Long, Long>> iter = content.entrySet().iterator();
long cumulativeCount = 0;
int bucketCursor = 0;
// Loop invariant: the item at buckets[bucketCursor] can still be reached
// from iter, and the number of logged elements no longer available from
// iter is cumulativeCount.
//
// cumulativeCount/totalCount is therefore strictly less than
// buckets[bucketCursor]/scale .
while (iter.hasNext()) {
long targetCumulativeCount = buckets[bucketCursor] * totalCount / scale;
Map.Entry<Long, Long> elt = iter.next();
cumulativeCount += elt.getValue();
while (cumulativeCount >= targetCumulativeCount) {
result[bucketCursor + 1] = elt.getKey();
++bucketCursor;
if (bucketCursor < buckets.length) {
targetCumulativeCount = buckets[bucketCursor] * totalCount / scale;
} else {
break;
}
}
if (bucketCursor == buckets.length) {
break;
}
}
return result;
} | 3.68 |
flink_BinaryRowWriter_setNullAt | /** Default not null. */
@Override
public void setNullAt(int pos) {
setNullBit(pos);
segment.putLong(getFieldOffset(pos), 0L);
} | 3.68 |
hadoop_SinglePendingCommit_destinationPath | /**
* Build the destination path of the object.
* @return the path
* @throws IllegalStateException if the URI is invalid
*/
public Path destinationPath() {
Preconditions.checkState(StringUtils.isNotEmpty(uri), "Empty uri");
try {
return new Path(new URI(uri));
} catch (URISyntaxException e) {
throw new IllegalStateException("Cannot parse URI " + uri);
}
} | 3.68 |
pulsar_PulsarAdminException_clone | /**
* This method is meant to be overriden by all subclasses.
* We cannot make it 'abstract' because it would be a breaking change in the public API.
* @return a new PulsarAdminException
*/
protected PulsarAdminException clone() {
return new PulsarAdminException(getMessage(), getCause(), httpError, statusCode);
} | 3.68 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_prepare | // return false if the scan has already been resumed. See the comment above for ScanResumerImpl
// for more details.
synchronized boolean prepare(ScanResponse resp, int numberOfCompleteRows) {
if (state == ScanResumerState.RESUMED) {
// user calls resume before we actually suspend the scan, just continue;
return false;
}
state = ScanResumerState.SUSPENDED;
this.resp = resp;
this.numberOfCompleteRows = numberOfCompleteRows;
// if there are no more results in region then the scanner at RS side will be closed
// automatically so we do not need to renew lease.
if (resp.getMoreResultsInRegion()) {
// schedule renew lease task
scheduleRenewLeaseTask();
}
return true;
} | 3.68 |
framework_AbstractComponent_getWidthUnits | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Sizeable#getWidthUnits()
*/
@Override
public Unit getWidthUnits() {
return widthUnit;
} | 3.68 |
hbase_ZKConfig_getClientZKQuorumServersString | /**
* Get the client ZK Quorum servers string
* @param conf the configuration to read
* @return Client quorum servers, or null if not specified
*/
public static String getClientZKQuorumServersString(Configuration conf) {
setZooKeeperClientSystemProperties(HConstants.ZK_CFG_PROPERTY_PREFIX, conf);
String clientQuromServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);
if (clientQuromServers == null) {
return null;
}
int defaultClientPort =
conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT);
String clientZkClientPort =
Integer.toString(conf.getInt(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT, defaultClientPort));
// Build the ZK quorum server string with "server:clientport" list, separated by ','
final String[] serverHosts = StringUtils.getStrings(clientQuromServers);
return buildZKQuorumServerString(serverHosts, clientZkClientPort);
} | 3.68 |
flink_TaskDeploymentDescriptor_getAttemptNumber | /** Returns the attempt number of the subtask. */
public int getAttemptNumber() {
return executionId.getAttemptNumber();
} | 3.68 |
querydsl_GeometryExpression_overlaps | /**
* Returns 1 (TRUE) if this geometric object “spatially overlaps” anotherGeometry.
*
* @param geometry other geometry
* @return true, if overlaps
*/
public BooleanExpression overlaps(Expression<? extends Geometry> geometry) {
return Expressions.booleanOperation(SpatialOps.OVERLAPS, mixin, geometry);
} | 3.68 |
flink_CommonTestUtils_assertThrows | /** Checks whether an exception with a message occurs when running a piece of code. */
public static void assertThrows(
String msg, Class<? extends Exception> expected, Callable<?> code) {
try {
Object result = code.call();
Assert.fail("Previous method call should have failed but it returned: " + result);
} catch (Exception e) {
assertThat(e, instanceOf(expected));
assertThat(e.getMessage(), containsString(msg));
}
} | 3.68 |
hibernate-validator_PlatformResourceBundleLocator_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
shardingsphere-elasticjob_TaskContext_getIdForUnassignedSlave | /**
* Get unassigned task ID before job execute.
*
* @param id task ID
* @return unassigned task ID before job execute
*/
public static String getIdForUnassignedSlave(final String id) {
return id.replaceAll(TaskContext.from(id).getSlaveId(), UNASSIGNED_SLAVE_ID);
} | 3.68 |
morf_SchemaChangeSequence_applyToSchema | /**
* Applies the changes to the given schema.
*
* @param initialSchema The schema to apply changes to.
* @return the resulting schema after applying changes in this sequence
*/
public Schema applyToSchema(Schema initialSchema) {
Schema currentSchema = initialSchema;
for (UpgradeStepWithChanges changesForStep : allChanges) {
for (SchemaChange change : changesForStep.getChanges()) {
try {
currentSchema = change.apply(currentSchema);
} catch (RuntimeException rte) {
throw new RuntimeException("Failed to apply change [" + change + "] from upgrade step " + changesForStep.getUpgradeClass(), rte);
}
}
}
return currentSchema;
} | 3.68 |
hadoop_SuccessData_serializer | /**
* Get a JSON serializer for this class.
* @return a serializer.
*/
public static JsonSerialization<SuccessData> serializer() {
return new JsonSerialization<>(SuccessData.class, false, false);
} | 3.68 |
flink_CachingLookupFunction_open | /**
* Open the {@link CachingLookupFunction}.
*
* <p>In order to reduce the memory usage of the cache, {@link LookupCacheManager} is used to
* provide a shared cache instance across subtasks of this function. Here we use {@link
* #functionIdentifier()} as the id of the cache, which is generated by MD5 of serialized bytes
* of this function. As different subtasks of the function will generate the same MD5, this
* could promise that they will be served with the same cache instance.
*
* @see #functionIdentifier()
*/
@Override
public void open(FunctionContext context) throws Exception {
// Get the shared cache from manager
cacheIdentifier = functionIdentifier();
cache = LookupCacheManager.getInstance().registerCacheIfAbsent(cacheIdentifier, cache);
// Register metrics
cacheMetricGroup =
new InternalCacheMetricGroup(
context.getMetricGroup(), LOOKUP_CACHE_METRIC_GROUP_NAME);
if (!(cache instanceof LookupFullCache)) {
loadCounter = new SimpleCounter();
cacheMetricGroup.loadCounter(loadCounter);
numLoadFailuresCounter = new SimpleCounter();
cacheMetricGroup.numLoadFailuresCounter(numLoadFailuresCounter);
} else {
initializeFullCache(((LookupFullCache) cache), context);
}
// Initialize cache and the delegating function
cache.open(cacheMetricGroup);
if (delegate != null) {
delegate.open(context);
}
} | 3.68 |
hbase_MetaTableAccessor_fullScanTables | /**
* Performs a full scan of <code>hbase:meta</code> for tables.
* @param connection connection we're using
* @param visitor Visitor invoked against each row in tables family.
*/
public static void fullScanTables(Connection connection,
final ClientMetaTableAccessor.Visitor visitor) throws IOException {
scanMeta(connection, null, null, QueryType.TABLE, visitor);
} | 3.68 |
hbase_TableMapReduceUtil_addHBaseDependencyJars | /**
* Add HBase and its dependencies (only) to the job configuration.
* <p>
* This is intended as a low-level API, facilitating code reuse between this class and its mapred
* counterpart. It also of use to external tools that need to build a MapReduce job that interacts
* with HBase but want fine-grained control over the jars shipped to the cluster.
* </p>
* @param conf The Configuration object to extend with dependencies.
* @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil
* @see <a href="https://issues.apache.org/jira/browse/PIG-3285">PIG-3285</a>
*/
public static void addHBaseDependencyJars(Configuration conf) throws IOException {
addDependencyJarsForClasses(conf,
// explicitly pull a class from each module
org.apache.hadoop.hbase.HConstants.class, // hbase-common
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded
org.apache.hadoop.hbase.client.Put.class, // hbase-client
org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server
org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat
org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat
org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce
org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics
org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api
org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication
org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http
org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure
org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper
org.apache.hbase.thirdparty.com.google.common.collect.Lists.class, // hb-shaded-miscellaneous
org.apache.hbase.thirdparty.com.google.gson.GsonBuilder.class, // hbase-shaded-gson
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.class, // hb-sh-protobuf
org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty
org.apache.hadoop.hbase.unsafe.HBasePlatformDependent.class, // hbase-unsafe
org.apache.zookeeper.ZooKeeper.class, // zookeeper
com.codahale.metrics.MetricRegistry.class, // metrics-core
org.apache.commons.lang3.ArrayUtils.class, // commons-lang
io.opentelemetry.api.trace.Span.class, // opentelemetry-api
io.opentelemetry.semconv.trace.attributes.SemanticAttributes.class, // opentelemetry-semconv
io.opentelemetry.context.Context.class); // opentelemetry-context
} | 3.68 |
hadoop_DynoInfraUtils_getNameNodeWebUri | /**
* Get the URI that can be used to access the launched NameNode's web UI, e.g.
* for JMX calls.
*
* @param nameNodeProperties The set of properties representing the
* information about the launched NameNode.
* @return The URI to the web UI.
*/
static URI getNameNodeWebUri(Properties nameNodeProperties) {
return URI.create(String.format("http://%s:%s/",
nameNodeProperties.getProperty(DynoConstants.NN_HOSTNAME),
nameNodeProperties.getProperty(DynoConstants.NN_HTTP_PORT)));
} | 3.68 |
shardingsphere-elasticjob_JobConfigurationPOJO_toJobConfiguration | /**
* Convert to job configuration.
*
* @return job configuration
*/
public JobConfiguration toJobConfiguration() {
JobConfiguration result = JobConfiguration.newBuilder(jobName, shardingTotalCount)
.cron(cron).timeZone(timeZone).shardingItemParameters(shardingItemParameters).jobParameter(jobParameter)
.monitorExecution(monitorExecution).failover(failover).misfire(misfire)
.maxTimeDiffSeconds(maxTimeDiffSeconds).reconcileIntervalMinutes(reconcileIntervalMinutes)
.jobShardingStrategyType(jobShardingStrategyType).jobExecutorThreadPoolSizeProviderType(jobExecutorThreadPoolSizeProviderType)
.jobErrorHandlerType(jobErrorHandlerType).jobListenerTypes(jobListenerTypes.toArray(new String[]{})).description(description)
.disabled(disabled).overwrite(overwrite).label(label).staticSharding(staticSharding).build();
jobExtraConfigurations.stream().map(YamlConfiguration::toConfiguration).forEach(result.getExtraConfigurations()::add);
for (Object each : props.keySet()) {
result.getProps().setProperty(each.toString(), props.get(each.toString()).toString());
}
return result;
} | 3.68 |
pulsar_ManagedLedgerConfig_getEnsembleSize | /**
* @return the ensembleSize
*/
public int getEnsembleSize() {
return ensembleSize;
} | 3.68 |
hbase_IncrementalBackupManager_getLogFilesForNewBackup | /**
* For each region server: get all log files newer than the last timestamps but not newer than the
* newest timestamps.
* @param olderTimestamps the timestamp for each region server of the last backup.
* @param newestTimestamps the timestamp for each region server that the backup should lead to.
* @param conf the Hadoop and Hbase configuration
* @param savedStartCode the startcode (timestamp) of last successful backup.
* @return a list of log files to be backed up
* @throws IOException exception
*/
private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
Map<String, Long> newestTimestamps, Configuration conf, String savedStartCode)
throws IOException {
LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps
+ "\n newestTimestamps: " + newestTimestamps);
Path walRootDir = CommonFSUtils.getWALRootDir(conf);
Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
FileSystem fs = walRootDir.getFileSystem(conf);
NewestLogFilter pathFilter = new NewestLogFilter();
List<String> resultLogFiles = new ArrayList<>();
List<String> newestLogs = new ArrayList<>();
/*
* The old region servers and timestamps info we kept in backup system table may be out of sync
* if new region server is added or existing one lost. We'll deal with it here when processing
* the logs. If data in backup system table has more hosts, just ignore it. If the .logs
* directory includes more hosts, the additional hosts will not have old timestamps to compare
* with. We'll just use all the logs in that directory. We always write up-to-date region server
* and timestamp info to backup system table at the end of successful backup.
*/
FileStatus[] rss;
Path p;
String host;
Long oldTimeStamp;
String currentLogFile;
long currentLogTS;
// Get the files in .logs.
rss = fs.listStatus(logDir);
for (FileStatus rs : rss) {
p = rs.getPath();
host = BackupUtils.parseHostNameFromLogFile(p);
if (host == null) {
continue;
}
FileStatus[] logs;
oldTimeStamp = olderTimestamps.get(host);
// It is possible that there is no old timestamp in backup system table for this host if
// this region server is newly added after our last backup.
if (oldTimeStamp == null) {
logs = fs.listStatus(p);
} else {
pathFilter.setLastBackupTS(oldTimeStamp);
logs = fs.listStatus(p, pathFilter);
}
for (FileStatus log : logs) {
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
}
continue;
}
currentLogFile = log.getPath().toString();
resultLogFiles.add(currentLogFile);
currentLogTS = BackupUtils.getCreationTime(log.getPath());
// If newestTimestamps.get(host) is null, means that
// either RS (host) has been restarted recently with different port number
// or RS is down (was decommisioned). In any case, we treat this
// log file as eligible for inclusion into incremental backup log list
Long ts = newestTimestamps.get(host);
if (ts == null) {
LOG.warn("ORPHAN log found: " + log + " host=" + host);
LOG.debug("Known hosts (from newestTimestamps):");
for (String s : newestTimestamps.keySet()) {
LOG.debug(s);
}
}
if (ts == null || currentLogTS > ts) {
newestLogs.add(currentLogFile);
}
}
}
// Include the .oldlogs files too.
FileStatus[] oldlogs = fs.listStatus(oldLogDir);
for (FileStatus oldlog : oldlogs) {
p = oldlog.getPath();
currentLogFile = p.toString();
if (AbstractFSWALProvider.isMetaFile(p)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip .meta log file: " + currentLogFile);
}
continue;
}
host = BackupUtils.parseHostFromOldLog(p);
if (host == null) {
continue;
}
currentLogTS = BackupUtils.getCreationTime(p);
oldTimeStamp = olderTimestamps.get(host);
/*
* It is possible that there is no old timestamp in backup system table for this host. At the
* time of our last backup operation, this rs did not exist. The reason can be one of the two:
* 1. The rs already left/crashed. Its logs were moved to .oldlogs. 2. The rs was added after
* our last backup.
*/
if (oldTimeStamp == null) {
if (currentLogTS < Long.parseLong(savedStartCode)) {
// This log file is really old, its region server was before our last backup.
continue;
} else {
resultLogFiles.add(currentLogFile);
}
} else if (currentLogTS > oldTimeStamp) {
resultLogFiles.add(currentLogFile);
}
// It is possible that a host in .oldlogs is an obsolete region server
// so newestTimestamps.get(host) here can be null.
// Even if these logs belong to a obsolete region server, we still need
// to include they to avoid loss of edits for backup.
Long newTimestamp = newestTimestamps.get(host);
if (newTimestamp == null || currentLogTS > newTimestamp) {
newestLogs.add(currentLogFile);
}
}
// remove newest log per host because they are still in use
resultLogFiles.removeAll(newestLogs);
return resultLogFiles;
} | 3.68 |
graphhopper_Frequency_getId | /**
* Frequency entries have no ID in GTFS so we define one based on the fields in the frequency entry.
*
* It is possible to have two identical frequency entries in the GTFS, which under our understanding of the situation
* would mean that two sets of vehicles were randomly running the same trip at the same headway, but uncorrelated
* with each other, which is almost certain to be an error.
*/
public String getId() {
StringBuilder sb = new StringBuilder();
sb.append(trip_id);
sb.append('_');
sb.append(convertToGtfsTime(start_time));
sb.append("_to_");
sb.append(convertToGtfsTime(end_time));
sb.append("_every_");
sb.append(String.format(Locale.getDefault(), "%dm%02ds", headway_secs / 60, headway_secs % 60));
if (exact_times == 1) sb.append("_exact");
return sb.toString();
} | 3.68 |
hadoop_AuxServiceRecord_launchTime | /**
* The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.
**/
public AuxServiceRecord launchTime(Date time) {
this.launchTime = time == null ? null : (Date) time.clone();
return this;
} | 3.68 |
hibernate-validator_AbstractConstraintValidatorManagerImpl_resolveAssignableTypes | /**
* Tries to reduce all assignable classes down to a single class.
*
* @param assignableTypes The set of all classes which are assignable to the class of the value to be validated and
* which are handled by at least one of the validators for the specified constraint.
*/
private void resolveAssignableTypes(List<Type> assignableTypes) {
if ( assignableTypes.size() == 0 || assignableTypes.size() == 1 ) {
return;
}
List<Type> typesToRemove = new ArrayList<>();
do {
typesToRemove.clear();
Type type = assignableTypes.get( 0 );
for ( int i = 1; i < assignableTypes.size(); i++ ) {
if ( TypeHelper.isAssignable( type, assignableTypes.get( i ) ) ) {
typesToRemove.add( type );
}
else if ( TypeHelper.isAssignable( assignableTypes.get( i ), type ) ) {
typesToRemove.add( assignableTypes.get( i ) );
}
}
assignableTypes.removeAll( typesToRemove );
} while ( typesToRemove.size() > 0 );
} | 3.68 |
framework_VMenuBar_setSelected | /**
* Set the currently selected item of this menu.
*
* @param item
*/
public void setSelected(CustomMenuItem item) {
// If we had something selected, unselect
if (item != selected && selected != null) {
selected.setSelected(false);
}
// If we have a valid selection, select it
if (item != null) {
item.setSelected(true);
}
selected = item;
} | 3.68 |
hbase_SnapshotScannerHDFSAclHelper_addTableAcl | /**
* Add table user acls
* @param tableName the table
* @param users the table users with READ permission
* @return false if an error occurred, otherwise true
*/
public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
try {
long start = EnvironmentEdgeManager.currentTime();
if (users.size() > 0) {
HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
operationType);
}
LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
EnvironmentEdgeManager.currentTime() - start);
return true;
} catch (Exception e) {
LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
return false;
}
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_getJobRootNodeData | /**
* Get job root node data.
*
* @return data of job node
*/
public String getJobRootNodeData() {
return regCenter.get("/" + jobName);
} | 3.68 |
hbase_HBaseZKTestingUtility_setupClusterTestDir | /**
* Creates a directory for the cluster, under the test data
*/
protected void setupClusterTestDir() {
if (clusterTestDir != null) {
return;
}
// Using randomUUID ensures that multiple clusters can be launched by
// a same test, if it stops & starts them
Path testDir = getDataTestDir("cluster_" + getRandomUUID().toString());
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
boolean b = deleteOnExit();
if (b) {
clusterTestDir.deleteOnExit();
}
LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
} | 3.68 |
hbase_HBaseTestingUtility_createWal | /**
* Create an unmanaged WAL. Be sure to close it when you're through.
*/
public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
throws IOException {
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
} | 3.68 |
hadoop_MawoConfiguration_getZKAddress | /**
* Get ZooKeeper Address.
* @return value of ZooKeeper.address
*/
public String getZKAddress() {
return configsMap.get(ZK_ADDRESS);
} | 3.68 |
hadoop_OBSDataBlocks_flush | /**
* Flush operation will flush to disk.
*
* @throws IOException IOE raised on FileOutputStream
*/
@Override
void flush() throws IOException {
super.flush();
out.flush();
} | 3.68 |
hadoop_NativeTaskOutputFiles_getOutputFile | /**
* Return the path to local map output file created earlier
*/
public Path getOutputFile() throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.68 |
hadoop_FlowActivityColumnPrefix_getColumnPrefix | /**
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
} | 3.68 |
hadoop_AzureBlobFileSystem_getCanonicalServiceName | /**
* If Delegation tokens are enabled, the canonical service name of
* this filesystem is the filesystem URI.
* @return either the filesystem URI as a string, or null.
*/
@Override
public String getCanonicalServiceName() {
String name = null;
if (delegationTokenManager != null) {
name = delegationTokenManager.getCanonicalServiceName();
}
return name != null ? name : super.getCanonicalServiceName();
} | 3.68 |
hibernate-validator_AbstractMethodOverrideCheck_getEnclosingTypeElementQualifiedName | /**
* Find a {@link String} representation of qualified name ({@link Name}) of corresponding {@link TypeElement} that
* contains a given {@link ExecutableElement}.
*
* @param currentMethod a method
* @return a class/interface qualified name represented by {@link String} to which a method belongs to
*/
protected String getEnclosingTypeElementQualifiedName(ExecutableElement currentMethod) {
return getEnclosingTypeElement( currentMethod ).getQualifiedName().toString();
} | 3.68 |
shardingsphere-elasticjob_JobShutdownHookPlugin_start | /**
* <p>
* Called when the associated <code>Scheduler</code> is started, in order
* to let the plug-in know it can now make calls into the scheduler if it
* needs to.
* </p>
*/
@Override
public void start() {
} | 3.68 |
hbase_ThriftConnection_getAdmin | /**
* Get a ThriftAdmin, ThriftAdmin is NOT thread safe
* @return a ThriftAdmin
* @throws IOException IOException
*/
@Override
public Admin getAdmin() throws IOException {
Pair<THBaseService.Client, TTransport> client = clientBuilder.getClient();
return new ThriftAdmin(client.getFirst(), client.getSecond(), conf);
} | 3.68 |
morf_InsertStatement_from | /**
* Specifies the table to source the data from
*
* @param sourceTable the table to source the data from
* @return a statement with the changes applied.
*
*/
public InsertStatement from(TableReference sourceTable) {
return copyOnWriteOrMutate(
b -> b.from(sourceTable),
() -> {
if (selectStatement != null) {
throw new UnsupportedOperationException("Cannot specify both a source table and a source SelectStatement");
}
if (!fields.isEmpty()) {
throw new UnsupportedOperationException("Cannot specify both a source table and a list of fields");
}
if (!values.isEmpty()) {
throw new UnsupportedOperationException("Cannot specify both a source table and a set of literal field values.");
}
this.fromTable = sourceTable;
}
);
} | 3.68 |
druid_CharsetConvert_decode | /**
* 字符串解码
*
* @param s String
* @return String
* @throws UnsupportedEncodingException
*/
public String decode(String s) throws UnsupportedEncodingException {
if (enable && !isEmpty(s)) {
s = new String(s.getBytes(serverEncoding), clientEncoding);
}
return s;
} | 3.68 |
flink_AbstractBytesMultiMap_checkSkipReadForPointer | /** For pointer needing update, skip unaligned part (4 bytes) for convenient updating. */
private void checkSkipReadForPointer(AbstractPagedInputView source) throws IOException {
// skip if there is no enough size.
// Note: Use currentSegmentLimit instead of segmentSize.
int available = source.getCurrentSegmentLimit() - source.getCurrentPositionInSegment();
if (available < ELEMENT_POINT_LENGTH) {
source.advance();
}
} | 3.68 |
hbase_HRegionServer_setupWALAndReplication | /**
* Setup WAL log and replication if enabled. Replication setup is done in here because it wants to
* be hooked up to WAL.
*/
private void setupWALAndReplication() throws IOException {
WALFactory factory = new WALFactory(conf, serverName, this);
// TODO Replication make assumptions here based on the default filesystem impl
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString());
Path logDir = new Path(walRootDir, logName);
LOG.debug("logDir={}", logDir);
if (this.walFs.exists(logDir)) {
throw new RegionServerRunningException(
"Region server has already created directory at " + this.serverName.toString());
}
// Always create wal directory as now we need this when master restarts to find out the live
// region servers.
if (!this.walFs.mkdirs(logDir)) {
throw new IOException("Can not create wal directory " + logDir);
}
// Instantiate replication if replication enabled. Pass it the log directories.
createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory);
WALActionsListener walEventListener = getWALEventTrackerListener(conf);
if (walEventListener != null && factory.getWALProvider() != null) {
factory.getWALProvider().addWALActionsListener(walEventListener);
}
this.walFactory = factory;
} | 3.68 |
framework_AbstractSplitPanelElement_getFirstComponent | /**
* Gets the first component of a split panel and wraps it in given class.
*
* @param clazz
* Components element class
* @return First component wrapped in given class
*/
public <T extends AbstractElement> T getFirstComponent(Class<T> clazz) {
return getContainedComponent(clazz, byFirstContainer);
} | 3.68 |
hadoop_SuccessData_joinMap | /**
* Join any map of string to value into a string, sorting the keys first.
* @param map map to join
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return a string for reporting.
*/
protected static String joinMap(Map<String, ?> map,
String prefix,
String middle, String suffix) {
if (map == null) {
return "";
}
List<String> list = new ArrayList<>(map.keySet());
Collections.sort(list);
StringBuilder sb = new StringBuilder(list.size() * 32);
for (String k : list) {
sb.append(prefix)
.append(k)
.append(middle)
.append(map.get(k))
.append(suffix);
}
return sb.toString();
} | 3.68 |
morf_TableOutputter_spreadsheetifyName | /**
* Converts camel capped names to something we can show in a spreadsheet.
*
* @param name Name to convert.
* @return A human readable version of the name wtih camel caps replaced by spaces.
*/
private String spreadsheetifyName(String name) {
return StringUtils.capitalize(name).replaceAll("([A-Z][a-z])", " $1").trim();
} | 3.68 |
hbase_ScannerModel_setFilter | /**
* @param filter the filter specification
*/
public void setFilter(String filter) {
this.filter = filter;
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setVersions | /**
* Set minimum and maximum versions to keep.
* @param minVersions minimal number of versions
* @param maxVersions maximum number of versions
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVersions) {
if (minVersions <= 0) {
// TODO: Allow minVersion and maxVersion of 0 to be the way you say "Keep all versions".
// Until there is support, consider 0 or < 0 -- a configuration error.
throw new IllegalArgumentException("Minimum versions must be positive");
}
if (maxVersions < minVersions) {
throw new IllegalArgumentException(
"Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions
+ ", as maximum versions must be >= minimum versions.");
}
setMinVersions(minVersions);
setMaxVersions(maxVersions);
return this;
} | 3.68 |
flink_CompletedOperationCache_containsOperation | /** Returns whether this cache contains an operation under the given operation key. */
public boolean containsOperation(final K operationKey) {
return registeredOperationTriggers.containsKey(operationKey)
|| completedOperations.getIfPresent(operationKey) != null;
} | 3.68 |
framework_VaadinService_generateConnectorId | /**
* Generates a unique id to use for a newly attached connector.
*
* @see ConnectorIdGenerator
* @see #initConnectorIdGenerator(List)
*
* @since 8.1
*
* @param session
* the session to which the connector has been attached, not
* <code>null</code>
* @param connector
* the attached connector for which to generate an id, not
* <code>null</code>
* @return a string id that is unique within the session, not
* <code>null</code>
*/
public String generateConnectorId(VaadinSession session,
ClientConnector connector) {
assert session.getService() == this;
String connectorId = connectorIdGenerator.generateConnectorId(
new ConnectorIdGenerationEvent(session, connector));
assert connectorId != null;
return connectorId;
} | 3.68 |
hbase_RegionState_toDescriptiveString | /**
* A slower (but more easy-to-read) stringification
*/
public String toDescriptiveString() {
long relTime = EnvironmentEdgeManager.currentTime() - stamp;
return hri.getRegionNameAsString() + " state=" + state + ", ts=" + new Date(stamp) + " ("
+ (relTime / 1000) + "s ago)" + ", server=" + serverName;
} | 3.68 |
flink_PendingCheckpointStats_toFailedCheckpoint | /**
* Reports a failed pending checkpoint.
*
* @param failureTimestamp Timestamp of the failure.
* @param cause Optional cause of the failure.
*/
FailedCheckpointStats toFailedCheckpoint(long failureTimestamp, @Nullable Throwable cause) {
return new FailedCheckpointStats(
checkpointId,
triggerTimestamp,
props,
numberOfSubtasks,
new HashMap<>(taskStats),
currentNumAcknowledgedSubtasks,
currentCheckpointedSize,
currentStateSize,
currentProcessedData,
currentPersistedData,
unalignedCheckpoint,
failureTimestamp,
latestAcknowledgedSubtask,
cause);
} | 3.68 |
hbase_ArrayBackedTag_getValueOffset | /** Returns Offset of actual tag bytes within the backed buffer */
@Override
public int getValueOffset() {
return this.offset + INFRASTRUCTURE_SIZE;
} | 3.68 |
framework_CompositeValidator_getErrorMessage | /**
* Gets the error message for the composite validator. If the error message
* is null, original error messages of the sub-validators are used instead.
*/
public String getErrorMessage() {
if (errorMessage != null) {
return errorMessage;
}
// TODO Return composite error message
return null;
} | 3.68 |
hudi_AbstractTableFileSystemView_mergeCompactionPendingFileSlices | /**
* Helper to merge last 2 file-slices. These 2 file-slices do not have compaction done yet.
*
* @param lastSlice Latest File slice for a file-group
* @param penultimateSlice Penultimate file slice for a file-group in commit timeline order
*/
private static FileSlice mergeCompactionPendingFileSlices(FileSlice lastSlice, FileSlice penultimateSlice) {
FileSlice merged = new FileSlice(penultimateSlice.getPartitionPath(), penultimateSlice.getBaseInstantTime(),
penultimateSlice.getFileId());
if (penultimateSlice.getBaseFile().isPresent()) {
merged.setBaseFile(penultimateSlice.getBaseFile().get());
}
// Add Log files from penultimate and last slices
penultimateSlice.getLogFiles().forEach(merged::addLogFile);
lastSlice.getLogFiles().forEach(merged::addLogFile);
return merged;
} | 3.68 |
AreaShop_Utils_millisToTicks | /**
* Convert milliseconds to ticks.
* @param milliseconds Milliseconds to convert
* @return milliseconds divided by 50 (20 ticks per second)
*/
public static long millisToTicks(long milliseconds) {
return milliseconds / 50;
} | 3.68 |
hmily_XaResourceWrapped_rollback0 | /**
* 子类实现. Rollback 0.
*
* @param xid the xid
* @throws XAException the xa exception
*/
void rollback0(final Xid xid) throws XAException {
} | 3.68 |
flink_BinaryInputFormat_createStatistics | /**
* Fill in the statistics. The last modification time and the total input size are prefilled.
*
* @param files The files that are associated with this block input format.
* @param stats The pre-filled statistics.
*/
protected SequentialStatistics createStatistics(
List<FileStatus> files, FileBaseStatistics stats) throws IOException {
if (files.isEmpty()) {
return null;
}
BlockInfo blockInfo = new BlockInfo();
long totalCount = 0;
for (FileStatus file : files) {
// invalid file
if (file.getLen() < blockInfo.getInfoSize()) {
continue;
}
FileSystem fs = file.getPath().getFileSystem();
try (FSDataInputStream fdis = fs.open(file.getPath(), blockInfo.getInfoSize())) {
fdis.seek(file.getLen() - blockInfo.getInfoSize());
blockInfo.read(new DataInputViewStreamWrapper(fdis));
totalCount += blockInfo.getAccumulatedRecordCount();
}
}
final float avgWidth =
totalCount == 0 ? 0 : ((float) stats.getTotalInputSize() / totalCount);
return new SequentialStatistics(
stats.getLastModificationTime(), stats.getTotalInputSize(), avgWidth, totalCount);
} | 3.68 |
flink_LimitedConnectionsFileSystem_getMaxNumOpenInputStreams | /** Gets the maximum number of concurrently open input streams. */
public int getMaxNumOpenInputStreams() {
return maxNumOpenInputStreams;
} | 3.68 |
hbase_TablePermission_tableFieldsEqual | /**
* Check if fields of table in table permission equals.
* @param tp to be checked table permission
* @return true if equals, false otherwise
*/
public boolean tableFieldsEqual(TablePermission tp) {
if (tp == null) {
return false;
}
boolean tEq = (table == null && tp.table == null) || (table != null && table.equals(tp.table));
boolean fEq = (family == null && tp.family == null) || Bytes.equals(family, tp.family);
boolean qEq =
(qualifier == null && tp.qualifier == null) || Bytes.equals(qualifier, tp.qualifier);
return tEq && fEq && qEq;
} | 3.68 |
pulsar_ModularLoadManagerStrategy_onActiveBrokersChange | /**
* Triggered when active brokers change.
*/
default void onActiveBrokersChange(Set<String> activeBrokers) {
} | 3.68 |
hbase_MapReduceBackupCopyJob_getSubTaskPercntgInWholeTask | /**
* Get the current copy task percentage within the whole task if multiple copies are needed.
* @return the current copy task percentage
*/
public float getSubTaskPercntgInWholeTask() {
return subTaskPercntgInWholeTask;
} | 3.68 |
hadoop_ConsumerRaisingIOE_andThen | /**
* after calling {@link #accept(Object)},
* invoke the next consumer in the chain.
* @param next next consumer
* @return the chain.
*/
default ConsumerRaisingIOE<T> andThen(
ConsumerRaisingIOE<? super T> next) {
return (T t) -> {
accept(t);
next.accept(t);
};
} | 3.68 |
graphhopper_GtfsStorage_postInit | // TODO: Refactor initialization
public void postInit() {
LocalDate latestStartDate = LocalDate.ofEpochDay(this.gtfsFeeds.values().stream().mapToLong(f -> f.getStartDate().toEpochDay()).max().getAsLong());
LocalDate earliestEndDate = LocalDate.ofEpochDay(this.gtfsFeeds.values().stream().mapToLong(f -> f.getEndDate().toEpochDay()).min().getAsLong());
LOGGER.info("Calendar range covered by all feeds: {} till {}", latestStartDate, earliestEndDate);
faresByFeed = new HashMap<>();
this.gtfsFeeds.forEach((feed_id, feed) -> faresByFeed.put(feed_id, feed.fares));
} | 3.68 |
framework_Tree_writeItems | /**
* Recursively writes the root items and their children to a design.
*
* @since 7.5.0
* @param design
* the element into which to insert the items
* @param context
* the DesignContext instance used in writing
*/
@Override
protected void writeItems(Element design, DesignContext context) {
for (Object itemId : rootItemIds()) {
writeItem(design, itemId, context);
}
} | 3.68 |
flink_MutableRecordAndPosition_setPosition | /** Sets the position without setting a record. */
public void setPosition(long offset, long recordSkipCount) {
this.offset = offset;
this.recordSkipCount = recordSkipCount;
} | 3.68 |
hudi_BufferedRandomAccessFile_endOfBufferReached | /**
* @return whether currentPosition has reached the end of valid buffer.
*/
private boolean endOfBufferReached() {
return this.currentPosition >= this.validLastPosition;
} | 3.68 |
framework_AbstractComponentConnector_onDropTargetDetached | /**
* Invoked when a {@link DropTargetExtensionConnector} has been removed from
* this component.
* <p>
* By default, does nothing.
* <p>
* This is a framework internal method, and should not be invoked manually.
*
* @since 8.1
* @see #onDropTargetAttached()
*/
public void onDropTargetDetached() {
} | 3.68 |
pulsar_ConnectionPool_createConnection | /**
* Resolve DNS asynchronously and attempt to connect to any IP address returned by DNS server.
*/
private CompletableFuture<Channel> createConnection(InetSocketAddress logicalAddress,
InetSocketAddress unresolvedPhysicalAddress) {
CompletableFuture<List<InetSocketAddress>> resolvedAddress;
try {
if (isSniProxy) {
URI proxyURI = new URI(clientConfig.getProxyServiceUrl());
resolvedAddress =
resolveName(InetSocketAddress.createUnresolved(proxyURI.getHost(), proxyURI.getPort()));
} else {
resolvedAddress = resolveName(unresolvedPhysicalAddress);
}
return resolvedAddress.thenCompose(
inetAddresses -> connectToResolvedAddresses(
logicalAddress,
unresolvedPhysicalAddress,
inetAddresses.iterator(),
isSniProxy ? unresolvedPhysicalAddress : null)
);
} catch (URISyntaxException e) {
log.error("Invalid Proxy url {}", clientConfig.getProxyServiceUrl(), e);
return FutureUtil
.failedFuture(new InvalidServiceURL("Invalid url " + clientConfig.getProxyServiceUrl(), e));
}
} | 3.68 |
flink_TableStats_merge | /**
* Merges two table stats. When the stats are unknown, whatever the other are, we need return
* unknown stats. See {@link #UNKNOWN}.
*
* @param other The other table stats to merge.
* @return The merged table stats.
*/
public TableStats merge(TableStats other, @Nullable Set<String> partitionKeys) {
if (this.rowCount < 0 || other.rowCount < 0) {
return TableStats.UNKNOWN;
}
long rowCount =
this.rowCount >= 0 && other.rowCount >= 0
? this.rowCount + other.rowCount
: UNKNOWN.rowCount;
return new TableStats(rowCount, mergeColumnStates(other, partitionKeys));
} | 3.68 |
querydsl_MetaDataExporter_setGeneratedAnnotationClass | /**
* Set the fully qualified class name of the "generated" annotation added ot the generated sources
*
* @param generatedAnnotationClass the fully qualified class name of the <em>Single-Element Annotation</em> (with {@code String} element) to be used on
* the generated sources, or {@code null} (defaulting to {@code javax.annotation.Generated} or
* {@code javax.annotation.processing.Generated} depending on the java version).
* @see <a href="https://docs.oracle.com/javase/specs/jls/se8/html/jls-9.html#jls-9.7.3">Single-Element Annotation</a>
*/
public void setGeneratedAnnotationClass(@Nullable String generatedAnnotationClass) {
module.bindInstance(CodegenModule.GENERATED_ANNOTATION_CLASS, GeneratedAnnotationResolver.resolve(generatedAnnotationClass));
} | 3.68 |
hbase_Cacheable_release | /**
* Decrease its reference count, and if no reference then free the memory of this object, its
* backend is usually a {@link org.apache.hadoop.hbase.nio.ByteBuff}, and we will put its NIO
* ByteBuffers back to {@link org.apache.hadoop.hbase.io.ByteBuffAllocator}
*/
default boolean release() {
return false;
} | 3.68 |
framework_AbsoluteLayout_getBottomValue | /**
* Gets the 'bottom' attributes value using current units.
*
* @return The value of the 'bottom' attribute, null if not set
* @see #getBottomUnits()
*/
public Float getBottomValue() {
return bottomValue;
} | 3.68 |
hadoop_TimelineStateStore_serviceStart | /**
* Start the state storage for use
*
* @throws IOException
*/
@Override
public void serviceStart() throws IOException {
startStorage();
} | 3.68 |
hbase_BitComparator_parseFrom | /**
* Parse a serialized representation of {@link BitComparator}
* @param pbBytes A pb serialized {@link BitComparator} instance
* @return An instance of {@link BitComparator} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static BitComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BitComparator proto;
try {
proto = ComparatorProtos.BitComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
BitwiseOp bitwiseOp = BitwiseOp.valueOf(proto.getBitwiseOp().name());
return new BitComparator(proto.getComparable().getValue().toByteArray(), bitwiseOp);
} | 3.68 |
hbase_TableRecordReaderImpl_next | /**
* @param key HStoreKey as input key.
* @param value MapWritable as input value
* @return true if there was more data
*/
public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
Result result;
try {
try {
result = this.scanner.next();
if (logScannerActivity) {
rowcount++;
if (rowcount >= logPerRowCount) {
long now = EnvironmentEdgeManager.currentTime();
LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
timestamp = now;
rowcount = 0;
}
}
} catch (IOException e) {
// do not retry if the exception tells us not to do so
if (e instanceof DoNotRetryIOException) {
throw e;
}
// try to handle all other IOExceptions by restarting
// the scanner, if the second call fails, it will be rethrown
LOG.debug("recovered from " + StringUtils.stringifyException(e));
if (lastSuccessfulRow == null) {
LOG.warn("We are restarting the first next() invocation,"
+ " if your mapper has restarted a few other times like this"
+ " then you should consider killing this job and investigate"
+ " why it's taking so long.");
}
if (lastSuccessfulRow == null) {
restart(startRow);
} else {
restart(lastSuccessfulRow);
this.scanner.next(); // skip presumed already mapped row
}
result = this.scanner.next();
}
if (result != null && result.size() > 0) {
key.set(result.getRow());
lastSuccessfulRow = key.get();
value.copyFrom(result);
return true;
}
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
long now = EnvironmentEdgeManager.currentTime();
LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
LOG.info(ioe.toString(), ioe);
String lastRow =
lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow);
LOG.info("lastSuccessfulRow=" + lastRow);
}
throw ioe;
}
} | 3.68 |
flink_Plan_getDataSinks | /**
* Gets all the data sinks of this job.
*
* @return All sinks of the program.
*/
public Collection<? extends GenericDataSinkBase<?>> getDataSinks() {
return this.sinks;
} | 3.68 |
flink_MurmurHashUtils_hashUnsafeBytes | /**
* Hash unsafe bytes.
*
* @param base base unsafe object
* @param offset offset for unsafe object
* @param lengthInBytes length in bytes
* @return hash code
*/
public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes) {
return hashUnsafeBytes(base, offset, lengthInBytes, DEFAULT_SEED);
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withAtomicLongMaximum | /**
* Add a maximum statistic to dynamically return the
* latest value of the source.
* @param key key of this statistic
* @param source atomic long maximum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicLongMaximum(String key,
AtomicLong source) {
withLongFunctionMaximum(key, s -> source.get());
return this;
} | 3.68 |
hadoop_SelectBinding_opt | /**
* Resolve an option.
* @param builderOptions the options which came in from the openFile builder.
* @param fsConf configuration of the owning FS.
* @param base base option (no s3a: prefix)
* @param defVal default value. Must not be null.
* @param trim should the result be trimmed.
* @return the possibly trimmed value.
*/
static String opt(Configuration builderOptions,
Configuration fsConf,
String base,
String defVal,
boolean trim) {
String r = builderOptions.get(base, fsConf.get(base, defVal));
return trim ? r.trim() : r;
} | 3.68 |
pulsar_Commands_readChecksum | /**
* Read the checksum and advance the reader index in the buffer.
*
* <p>Note: This method assume the checksum presence was already verified before.
*/
public static int readChecksum(ByteBuf buffer) {
buffer.skipBytes(2); //skip magic bytes
return buffer.readInt();
} | 3.68 |
hadoop_MutableStat_resetMinMax | /**
* Reset the all time min max of the metric
*/
public void resetMinMax() {
minMax.reset();
} | 3.68 |
hudi_CompactionUtils_getPendingCompactionOperations | /**
* Get pending compaction operations for both major and minor compaction.
*/
public static Stream<Pair<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>>> getPendingCompactionOperations(
HoodieInstant instant, HoodieCompactionPlan compactionPlan) {
List<HoodieCompactionOperation> ops = compactionPlan.getOperations();
if (null != ops) {
return ops.stream().map(op -> Pair.of(new HoodieFileGroupId(op.getPartitionPath(), op.getFileId()),
Pair.of(instant.getTimestamp(), op)));
} else {
return Stream.empty();
}
} | 3.68 |
flink_AdaptiveScheduler_computeVertexParallelismStore | /**
* Creates the parallelism store that should be used for determining scheduling requirements,
* which may choose different parallelisms than set in the {@link JobGraph} depending on the
* execution mode.
*
* @param jobGraph The job graph for execution.
* @param executionMode The mode of scheduler execution.
* @return The parallelism store.
*/
private static VertexParallelismStore computeVertexParallelismStore(
JobGraph jobGraph, SchedulerExecutionMode executionMode) {
if (executionMode == SchedulerExecutionMode.REACTIVE) {
return computeReactiveModeVertexParallelismStore(
jobGraph.getVertices(), SchedulerBase::getDefaultMaxParallelism, true);
}
return SchedulerBase.computeVertexParallelismStore(jobGraph);
} | 3.68 |
zxing_Detector_getBullsEyeCorners | /**
* Finds the corners of a bull-eye centered on the passed point.
* This returns the centers of the diagonal points just outside the bull's eye
* Returns [topRight, bottomRight, bottomLeft, topLeft]
*
* @param pCenter Center point
* @return The corners of the bull-eye
* @throws NotFoundException If no valid bull-eye can be found
*/
private ResultPoint[] getBullsEyeCorners(Point pCenter) throws NotFoundException {
Point pina = pCenter;
Point pinb = pCenter;
Point pinc = pCenter;
Point pind = pCenter;
boolean color = true;
for (nbCenterLayers = 1; nbCenterLayers < 9; nbCenterLayers++) {
Point pouta = getFirstDifferent(pina, color, 1, -1);
Point poutb = getFirstDifferent(pinb, color, 1, 1);
Point poutc = getFirstDifferent(pinc, color, -1, 1);
Point poutd = getFirstDifferent(pind, color, -1, -1);
//d a
//
//c b
if (nbCenterLayers > 2) {
float q = distance(poutd, pouta) * nbCenterLayers / (distance(pind, pina) * (nbCenterLayers + 2));
if (q < 0.75 || q > 1.25 || !isWhiteOrBlackRectangle(pouta, poutb, poutc, poutd)) {
break;
}
}
pina = pouta;
pinb = poutb;
pinc = poutc;
pind = poutd;
color = !color;
}
if (nbCenterLayers != 5 && nbCenterLayers != 7) {
throw NotFoundException.getNotFoundInstance();
}
compact = nbCenterLayers == 5;
// Expand the square by .5 pixel in each direction so that we're on the border
// between the white square and the black square
ResultPoint pinax = new ResultPoint(pina.getX() + 0.5f, pina.getY() - 0.5f);
ResultPoint pinbx = new ResultPoint(pinb.getX() + 0.5f, pinb.getY() + 0.5f);
ResultPoint pincx = new ResultPoint(pinc.getX() - 0.5f, pinc.getY() + 0.5f);
ResultPoint pindx = new ResultPoint(pind.getX() - 0.5f, pind.getY() - 0.5f);
// Expand the square so that its corners are the centers of the points
// just outside the bull's eye.
return expandSquare(new ResultPoint[]{pinax, pinbx, pincx, pindx},
2 * nbCenterLayers - 3,
2 * nbCenterLayers);
} | 3.68 |
dubbo_ConfigurationUtils_getDynamicGlobalConfiguration | /**
* For compact single instance
*
* @deprecated Replaced to {@link ConfigurationUtils#getDynamicGlobalConfiguration(ScopeModel)}
*/
@Deprecated
public static Configuration getDynamicGlobalConfiguration() {
return ApplicationModel.defaultModel()
.getDefaultModule()
.modelEnvironment()
.getDynamicGlobalConfiguration();
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.