name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_StageConfig_withOperations | /**
* Set builder value.
* @param value new value
* @return this
*/
public StageConfig withOperations(final ManifestStoreOperations value) {
checkOpen();
operations = value;
return this;
} | 3.68 |
flink_JobManagerRunnerResult_getInitializationFailure | /**
* This method returns the initialization failure.
*
* @return the initialization failure
* @throws IllegalStateException if the result is not an initialization failure
*/
public Throwable getInitializationFailure() {
Preconditions.checkState(isInitializationFailure());
return failure;
} | 3.68 |
pulsar_SimpleLoadManagerImpl_getTotalAllocatedQuota | /**
* Get the sum of allocated resource for the list of namespace bundles.
*/
private ResourceQuota getTotalAllocatedQuota(Set<String> bundles) {
ResourceQuota totalQuota = new ResourceQuota();
for (String bundle : bundles) {
ResourceQuota quota = this.getResourceQuota(bundle);
totalQuota.add(quota);
}
return totalQuota;
} | 3.68 |
hadoop_AbfsHttpOperation_isNullInputStream | /**
* Check null stream, this is to pass findbugs's redundant check for NULL
* @param stream InputStream
*/
private boolean isNullInputStream(InputStream stream) {
return stream == null ? true : false;
} | 3.68 |
flink_NetworkBufferPool_getAvailableFuture | /** Returns a future that is completed when there are free segments in this pool. */
@Override
public CompletableFuture<?> getAvailableFuture() {
return availabilityHelper.getAvailableFuture();
} | 3.68 |
hbase_MasterProcedureScheduler_getServerQueue | // ============================================================================
// Server Queue Lookup Helpers
// ============================================================================
private ServerQueue getServerQueue(ServerName serverName, ServerProcedureInterface proc) {
final int index = getBucketIndex(serverBuckets, serverName.hashCode());
ServerQueue node = AvlTree.get(serverBuckets[index], serverName, SERVER_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
int priority;
if (proc != null) {
priority = MasterProcedureUtil.getServerPriority(proc);
} else {
priority = 1;
}
node = new ServerQueue(serverName, priority, locking.getServerLock(serverName));
serverBuckets[index] = AvlTree.insert(serverBuckets[index], node);
return node;
} | 3.68 |
dubbo_CollectionUtils_isEmptyMap | /**
* Return {@code true} if the supplied Map is {@code null} or empty.
* Otherwise, return {@code false}.
*
* @param map the Map to check
* @return whether the given Map is empty
*/
public static boolean isEmptyMap(Map map) {
return map == null || map.size() == 0;
} | 3.68 |
morf_UpgradeStatusTableServiceImpl_resultSetProcessor | /**
* Returns a {@link ResultSetProcessor} which converts the first value in the current row of a {@link ResultSet}
* to an {@link UpgradeStatus}.
*/
private ResultSetProcessor<UpgradeStatus> resultSetProcessor() {
return new ResultSetProcessor<UpgradeStatus>() {
@Override
public UpgradeStatus process(ResultSet resultSet) throws SQLException {
resultSet.next();
return UpgradeStatus.valueOf(resultSet.getString(1));
}
};
} | 3.68 |
streampipes_SQLStatementUtils_checkRegEx | /**
* Checks if the input string is allowed (regEx match and length > 0)
*
* @param input String which is getting matched with the regEx
* @param regExIdentifier Information about the use of the input. Gets included in the exception message
* @throws SpRuntimeException If {@code input} does not match with {@link DbDescription#getAllowedRegEx()}
* or if the length of {@code input} is 0
*/
public static final void checkRegEx(String input, String regExIdentifier, DbDescription dbDescription)
throws SpRuntimeException {
if (!input.matches(dbDescription.getAllowedRegEx()) || input.length() == 0) {
throw new SpRuntimeException(regExIdentifier + " '" + input
+ "' not allowed (allowed: '" + dbDescription.getAllowedRegEx() + "') with a min length of 1");
}
} | 3.68 |
hbase_HFileCleaner_stopHFileDeleteThreads | /**
* Stop threads for hfile deletion
*/
private void stopHFileDeleteThreads() {
running = false;
LOG.debug("Stopping file delete threads");
for (Thread thread : threads) {
thread.interrupt();
}
} | 3.68 |
framework_TestWidgetConnector_register | // Called by generated sub class
protected void register(String widgetClass, Invoker creator) {
creators.put(widgetClass, creator);
} | 3.68 |
flink_RecordWriter_close | /** Closes the writer. This stops the flushing thread (if there is one). */
public void close() {
// make sure we terminate the thread in any case
if (outputFlusher != null) {
outputFlusher.terminate();
try {
outputFlusher.join();
} catch (InterruptedException e) {
// ignore on close
// restore interrupt flag to fast exit further blocking calls
Thread.currentThread().interrupt();
}
}
} | 3.68 |
hbase_AccessController_hasPermission | /**
* @deprecated since 2.2.0 and will be removed in 4.0.0. Use
* {@link Admin#hasUserPermissions(String, List)} instead.
* @see Admin#hasUserPermissions(String, List)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-22117">HBASE-22117</a>
*/
@Deprecated
@Override
public void hasPermission(RpcController controller, HasPermissionRequest request,
RpcCallback<HasPermissionResponse> done) {
// Converts proto to a TablePermission object.
TablePermission tPerm = AccessControlUtil.toTablePermission(request.getTablePermission());
// Check input user name
if (!request.hasUserName()) {
throw new IllegalStateException("Input username cannot be empty");
}
final String inputUserName = request.getUserName().toStringUtf8();
AccessControlProtos.HasPermissionResponse response = null;
try {
User caller = RpcServer.getRequestUser().orElse(null);
List<Permission> permissions = Lists.newArrayList(tPerm);
preHasUserPermissions(caller, inputUserName, permissions);
boolean hasPermission =
regionEnv.getConnection().getAdmin().hasUserPermissions(inputUserName, permissions).get(0);
response = ResponseConverter.buildHasPermissionResponse(hasPermission);
} catch (IOException ioe) {
ResponseConverter.setControllerException(controller, ioe);
}
done.run(response);
} | 3.68 |
hudi_HoodieAvroUtils_unwrapAvroValueWrapper | /**
* Unwraps Avro value wrapper into Java value.
*
* @param avroValueWrapper A wrapped value with Avro type wrapper.
* @return Java value.
*/
public static Comparable<?> unwrapAvroValueWrapper(Object avroValueWrapper) {
if (avroValueWrapper == null) {
return null;
} else if (avroValueWrapper instanceof DateWrapper) {
return LocalDate.ofEpochDay(((DateWrapper) avroValueWrapper).getValue());
} else if (avroValueWrapper instanceof DecimalWrapper) {
Schema valueSchema = DecimalWrapper.SCHEMA$.getField("value").schema();
return AVRO_DECIMAL_CONVERSION.fromBytes(((DecimalWrapper) avroValueWrapper).getValue(), valueSchema, valueSchema.getLogicalType());
} else if (avroValueWrapper instanceof TimestampMicrosWrapper) {
return microsToInstant(((TimestampMicrosWrapper) avroValueWrapper).getValue());
} else if (avroValueWrapper instanceof BooleanWrapper) {
return ((BooleanWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof IntWrapper) {
return ((IntWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof LongWrapper) {
return ((LongWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof FloatWrapper) {
return ((FloatWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof DoubleWrapper) {
return ((DoubleWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof BytesWrapper) {
return ((BytesWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof StringWrapper) {
return ((StringWrapper) avroValueWrapper).getValue();
} else if (avroValueWrapper instanceof GenericRecord) {
// NOTE: This branch could be hit b/c Avro records could be reconstructed
// as {@code GenericRecord)
// TODO add logical type decoding
GenericRecord record = (GenericRecord) avroValueWrapper;
return (Comparable<?>) record.get("value");
} else {
throw new UnsupportedOperationException(String.format("Unsupported type of the value (%s)", avroValueWrapper.getClass()));
}
}
} | 3.68 |
flink_RegisterApplicationMasterResponseReflector_getContainersFromPreviousAttempts | /**
* Checks if a YARN application still has registered containers. If the application master
* registered at the ResourceManager for the first time, this list will be empty. If the
* application master registered a repeated time (after a failure and recovery), this list will
* contain the containers that were previously allocated.
*
* @param response The response object from the registration at the ResourceManager.
* @return A list with containers from previous application attempt.
*/
List<Container> getContainersFromPreviousAttempts(
final RegisterApplicationMasterResponse response) {
return getContainersFromPreviousAttemptsUnsafe(response);
} | 3.68 |
framework_ContainerHierarchicalWrapper_setChildrenAllowed | /**
* <p>
* Sets the given Item's capability to have children. If the Item identified
* with the itemId already has children and the areChildrenAllowed is false
* this method fails and <code>false</code> is returned; the children must
* be first explicitly removed with
* {@link #setParent(Object itemId, Object newParentId)} or
* {@link Container#removeItem(Object itemId)}.
* </p>
*
* @param itemId
* the ID of the Item in the container whose child capability is
* to be set.
* @param childrenAllowed
* the boolean value specifying if the Item can have children or
* not.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
*/
@Override
public boolean setChildrenAllowed(Object itemId, boolean childrenAllowed) {
// If the wrapped container implements the method directly, use it
if (hierarchical) {
return ((Container.Hierarchical) container)
.setChildrenAllowed(itemId, childrenAllowed);
}
// Check that the item is in the container
if (!containsId(itemId)) {
return false;
}
// Update status
if (childrenAllowed) {
noChildrenAllowed.remove(itemId);
} else {
noChildrenAllowed.add(itemId);
}
return true;
} | 3.68 |
hadoop_TaskPool_setStatisticsContext | /**
* Set the statistics context for this thread.
*/
private void setStatisticsContext() {
if (ioStatisticsContext != null) {
IOStatisticsContext.setThreadIOStatisticsContext(ioStatisticsContext);
}
} | 3.68 |
pulsar_AbstractHierarchicalLedgerManager_getLedgerId | // get ledger from all level nodes
long getLedgerId(String... levelNodes) throws IOException {
return StringUtils.stringToHierarchicalLedgerId(levelNodes);
} | 3.68 |
hbase_MetricsTableRequests_updateDeleteBatch | /**
* Update the batch Delete time histogram
* @param t time it took
*/
public void updateDeleteBatch(long t) {
if (isEnableTableLatenciesMetrics()) {
deleteBatchTimeHistogram.update(t);
}
} | 3.68 |
hudi_HoodieRepairTool_checkBackupPathAgainstBasePath | /**
* Verifies the backup path against table base path.
* If the backup path is within the table base path, throws an error.
*
* @return {@code 0} if successful; {@code -1} otherwise.
*/
int checkBackupPathAgainstBasePath() {
if (cfg.backupPath == null) {
LOG.error("Backup path is not configured");
return -1;
}
if (cfg.backupPath.contains(cfg.basePath)) {
LOG.error(String.format("Cannot use backup path %s: it resides in the base path %s",
cfg.backupPath, cfg.basePath));
return -1;
}
return 0;
} | 3.68 |
hmily_EtcdPassiveConfig_fileName | /**
* File name string.
*
* @return the string
*/
public String fileName() {
return key + "." + fileExtension;
} | 3.68 |
morf_Function_rowNumber | /**
*
* @return an instance or ROW_NUMBER function
*/
public static Function rowNumber() {
return new Function(FunctionType.ROW_NUMBER);
} | 3.68 |
streampipes_ImageZipAdapter_stop | /**
* Stops the running thread that publishes the images
*/
public void stop() {
task.interrupt();
running = false;
} | 3.68 |
flink_TypeInferenceUtil_validateArgumentCount | /**
* Validates argument counts.
*
* @param argumentCount expected argument count
* @param actualCount actual argument count
* @param throwOnFailure if true, the function throws a {@link ValidationException} if the
* actual value does not meet the expected argument count
* @return a boolean indicating if expected argument counts match the actual counts
*/
public static boolean validateArgumentCount(
ArgumentCount argumentCount, int actualCount, boolean throwOnFailure) {
final int minCount = argumentCount.getMinCount().orElse(0);
if (actualCount < minCount) {
if (throwOnFailure) {
throw new ValidationException(
String.format(
"Invalid number of arguments. At least %d arguments expected but %d passed.",
minCount, actualCount));
}
return false;
}
final int maxCount = argumentCount.getMaxCount().orElse(Integer.MAX_VALUE);
if (actualCount > maxCount) {
if (throwOnFailure) {
throw new ValidationException(
String.format(
"Invalid number of arguments. At most %d arguments expected but %d passed.",
maxCount, actualCount));
}
return false;
}
if (!argumentCount.isValidCount(actualCount)) {
if (throwOnFailure) {
throw new ValidationException(
String.format(
"Invalid number of arguments. %d arguments passed.", actualCount));
}
return false;
}
return true;
} | 3.68 |
flink_StreamTaskCancellationContext_alwaysRunning | /**
* Factory for a context that always returns {@code false} when {@link #isCancelled()} is
* called.
*
* @return context
*/
static StreamTaskCancellationContext alwaysRunning() {
return () -> false;
} | 3.68 |
morf_AbstractSelectStatement_getTable | /**
* Gets the first table
*
* @return the table
*/
public TableReference getTable() {
return table;
} | 3.68 |
hadoop_RouterQuotaManager_updateQuota | /**
* Update quota in cache. The usage will be preserved.
* @param path Mount table path.
* @param quota Corresponding quota value.
*/
public void updateQuota(String path, RouterQuotaUsage quota) {
writeLock.lock();
try {
RouterQuotaUsage.Builder builder = new RouterQuotaUsage.Builder()
.quota(quota.getQuota()).spaceQuota(quota.getSpaceQuota());
RouterQuotaUsage current = this.cache.get(path);
if (current != null) {
builder.fileAndDirectoryCount(current.getFileAndDirectoryCount())
.spaceConsumed(current.getSpaceConsumed());
}
this.cache.put(path, builder.build());
} finally {
writeLock.unlock();
}
} | 3.68 |
hbase_ScannerContext_getMetrics | /**
* Get the metrics instance. Should only be called after a call to {@link #isTrackingMetrics()}
* has been made to confirm that metrics are indeed being tracked.
* @return {@link ServerSideScanMetrics} instance that is tracking metrics for this scan
*/
public ServerSideScanMetrics getMetrics() {
assert isTrackingMetrics();
return this.metrics;
} | 3.68 |
graphhopper_RoutingExampleTC_createGraphHopperInstance | // see RoutingExample for more details
static GraphHopper createGraphHopperInstance(String ghLoc) {
GraphHopper hopper = new GraphHopper();
hopper.setOSMFile(ghLoc);
hopper.setGraphHopperLocation("target/routing-tc-graph-cache");
Profile profile = new Profile("car").setVehicle("car")
// enabling turn costs means OSM turn restriction constraints like 'no_left_turn' will be taken into account
.setTurnCosts(true)
// we can also set u_turn_costs (in seconds). by default no u-turns are allowed, but with this setting
// we will consider u-turns at all junctions with a 40s time penalty
.putHint("u_turn_costs", 40);
hopper.setProfiles(profile);
// enable CH for our profile. since turn costs are enabled this will take more time and memory to prepare than
// without turn costs.
hopper.getCHPreparationHandler().setCHProfiles(new CHProfile(profile.getName()));
hopper.importOrLoad();
return hopper;
} | 3.68 |
hadoop_S3ClientFactory_getHeaders | /**
* Get the map of headers.
* @return (mutable) header map
*/
public Map<String, String> getHeaders() {
return headers;
} | 3.68 |
AreaShop_GeneralRegion_getLandlord | /**
* Get the landlord of this region (the player that receives any revenue from this region).
* @return The UUID of the landlord of this region
*/
public UUID getLandlord() {
String landlord = getStringSetting("general.landlord");
if(landlord != null && !landlord.isEmpty()) {
try {
return UUID.fromString(landlord);
} catch(IllegalArgumentException e) {
// Incorrect UUID
}
}
String landlordName = getStringSetting("general.landlordName");
if(landlordName != null && !landlordName.isEmpty()) {
@SuppressWarnings("deprecation")
OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(landlordName);
if(offlinePlayer != null) {
return offlinePlayer.getUniqueId();
}
}
return null;
} | 3.68 |
druid_SQLFunctionBuilder_length | // for character function
public SQLMethodInvokeExpr length(SQLExpr expr) {
return new SQLMethodInvokeExpr("length", null, expr);
} | 3.68 |
rocketmq-connect_Worker_maintainTaskState | /**
* maintain task state
*
* @throws Exception
*/
public void maintainTaskState() throws Exception {
Map<String, List<ConnectKeyValue>> connectorConfig = new HashMap<>();
synchronized (latestTaskConfigs) {
connectorConfig.putAll(latestTaskConfigs);
}
// STEP 0 cleaned error Stopped Task
clearErrorOrStopedTask();
// STEP 1: check running tasks and put to error status
checkRunningTasks(connectorConfig);
// get new Tasks
Map<String, List<ConnectKeyValue>> newTasks = newTasks(connectorConfig);
// STEP 2: try to create new tasks
startTask(newTasks);
// STEP 3: check all pending state
checkPendingTask();
// STEP 4 check stopping tasks
checkStoppingTasks();
// STEP 5 check error tasks
checkErrorTasks();
// STEP 6 check errorTasks and stopped tasks
checkStoppedTasks();
} | 3.68 |
hbase_ProcedureStoreTracker_isEmpty | /** Returns true, if no procedure is active, else false. */
public boolean isEmpty() {
for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
if (!entry.getValue().isEmpty()) {
return false;
}
}
return true;
} | 3.68 |
hadoop_BaseRecord_generateMashupKey | /**
* Generates a cache key from a map of values.
*
* @param keys Map of values.
* @return String mashup of key values.
*/
protected static String generateMashupKey(final Map<String, String> keys) {
StringBuilder builder = new StringBuilder();
for (Object value : keys.values()) {
if (builder.length() > 0) {
builder.append("-");
}
builder.append(value);
}
return builder.toString();
} | 3.68 |
flink_SinkTransformationTranslator_addFailOverRegion | /**
* Adds a batch exchange that materializes the output first. This is a no-op in STREAMING.
*/
private <I> DataStream<I> addFailOverRegion(DataStream<I> input) {
return new DataStream<>(
executionEnvironment,
new PartitionTransformation<>(
input.getTransformation(),
new ForwardPartitioner<>(),
StreamExchangeMode.BATCH));
} | 3.68 |
hbase_MetricsAssignmentManager_incrementOperationCounter | /*
* TODO: Remove. This may not be required as assign and unassign operations are tracked separately
* Increment the count of assignment operation (assign/unassign).
*/
public void incrementOperationCounter() {
assignmentManagerSource.incrementOperationCounter();
} | 3.68 |
hadoop_FederationStateStoreUtils_setUsername | /**
* Sets a specific username for <code>HikariDataSource</code> SQL connections.
*
* @param dataSource the <code>HikariDataSource</code> connections
* @param userNameDB the value to set
*/
public static void setUsername(HikariDataSource dataSource,
String userNameDB) {
if (userNameDB != null) {
dataSource.setUsername(userNameDB);
LOG.debug("Setting non NULL Username for Store connection");
} else {
LOG.debug("NULL Username specified for Store connection, so ignoring");
}
} | 3.68 |
flink_HiveParserDDLSemanticAnalyzer_getColPath | // get the column path
// return column name if exists, column could be DOT separated.
// example: lintString.$elem$.myint
// return table name for column name if no column has been specified.
public static String getColPath(
HiveParserASTNode node,
String dbName,
String tableName,
Map<String, String> partSpec) {
// if this ast has only one child, then no column name specified.
if (node.getChildCount() == 1) {
return tableName;
}
HiveParserASTNode columnNode = null;
// Second child node could be partitionspec or column
if (node.getChildCount() > 1) {
if (partSpec == null) {
columnNode = (HiveParserASTNode) node.getChild(1);
} else {
columnNode = (HiveParserASTNode) node.getChild(2);
}
}
if (columnNode != null) {
if (dbName == null) {
return tableName + "." + QualifiedNameUtil.getFullyQualifiedName(columnNode);
} else {
return tableName.substring(dbName.length() + 1)
+ "."
+ QualifiedNameUtil.getFullyQualifiedName(columnNode);
}
} else {
return tableName;
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithDbLink | /**
* Tests the generation of SQL string for a query with a DB-link.
*/
@Test
public void testSelectWithDbLink() {
assumeTrue("for dialects with no EXCEPT operation support the test will be skipped.", expectedSelectWithDbLink() != null);
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(null, TEST_TABLE, DBLINK_NAME));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedSelectWithDbLink(), result);
} | 3.68 |
hadoop_BaseTableRW_getResultScanner | /**
* @param hbaseConf used to read settings that override defaults
* @param conn used to create table from
* @param scan that specifies what you want to read from this table.
* @return scanner for the table.
* @throws IOException if any exception occurs while getting the scanner.
*/
public ResultScanner getResultScanner(Configuration hbaseConf,
Connection conn, Scan scan) throws IOException {
Table table = conn.getTable(getTableName(hbaseConf));
return table.getScanner(scan);
} | 3.68 |
hmily_TableMetaData_isPrimaryKey | /**
* Judge column whether primary key.
*
* @param columnIndex column index
* @return true if the column is primary key, otherwise false
*/
public boolean isPrimaryKey(final int columnIndex) {
return columnIndex < columnNames.size() && columns.get(columnNames.get(columnIndex)).isPrimaryKey();
} | 3.68 |
hudi_BaseHoodieWriteClient_scheduleLogCompactionAtInstant | /**
* Schedules a new log compaction instant with passed-in instant time.
* @param instantTime Log Compaction Instant Time
* @param extraMetadata Extra Metadata to be stored
*/
public boolean scheduleLogCompactionAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.LOG_COMPACT).isPresent();
} | 3.68 |
morf_ResultSetIterator_hasNext | /**
* @see java.util.Iterator#hasNext()
*/
@Override
public boolean hasNext() {
return hasNext;
} | 3.68 |
flink_MemoryMappedBoundedData_createWithRegionSize | /**
* Creates new MemoryMappedBoundedData, creating a memory mapped file at the given path. Each
* mapped region (= ByteBuffer) will be of the given size.
*/
public static MemoryMappedBoundedData createWithRegionSize(
Path memMappedFilePath, int regionSize) throws IOException {
final FileChannel fileChannel =
FileChannel.open(
memMappedFilePath,
StandardOpenOption.READ,
StandardOpenOption.WRITE,
StandardOpenOption.CREATE_NEW);
return new MemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize);
} | 3.68 |
flink_HsSubpartitionMemoryDataManager_trimHeadingReleasedBuffers | /**
* Remove all released buffer from head of queue until buffer queue is empty or meet un-released
* buffer.
*/
@GuardedBy("subpartitionLock")
private void trimHeadingReleasedBuffers(Deque<HsBufferContext> bufferQueue) {
while (!bufferQueue.isEmpty() && bufferQueue.peekFirst().isReleased()) {
bufferQueue.removeFirst();
}
} | 3.68 |
framework_DateResolution_getResolutionsLowerThan | /**
* Returns the resolutions that are lower than the given resolution,
* starting from the given resolution. In other words passing DAY to this
* methods returns HOUR,MINUTE,SECOND.
*
* @param r
* The resolution to start from
* @return An iterable for the resolutions lower than r
*/
public static List<DateResolution> getResolutionsLowerThan(
DateResolution r) {
List<DateResolution> resolutions = new ArrayList<>();
DateResolution[] values = DateResolution.values();
for (int i = r.ordinal() - 1; i >= 0; i--) {
resolutions.add(values[i]);
}
return resolutions;
} | 3.68 |
morf_ColumnBean_isPrimaryKey | /**
* @see org.alfasoftware.morf.metadata.Column#isPrimaryKey()
*/
@Override
public boolean isPrimaryKey() {
return primaryKey;
} | 3.68 |
flink_HiveTableUtil_makePartitionFilter | /**
* Generates a filter string for partition columns from the given filter expressions.
*
* @param partColOffset The number of non-partition columns -- used to shift field reference
* index
* @param partColNames The names of all partition columns
* @param expressions The filter expressions in CNF form
* @return an Optional filter string equivalent to the expressions, which is empty if the
* expressions can't be handled
*/
public static Optional<String> makePartitionFilter(
int partColOffset,
List<String> partColNames,
List<Expression> expressions,
HiveShim hiveShim) {
List<String> filters = new ArrayList<>(expressions.size());
ExpressionExtractor extractor =
new ExpressionExtractor(partColOffset, partColNames, hiveShim);
for (Expression expression : expressions) {
String str = expression.accept(extractor);
if (str == null) {
return Optional.empty();
}
filters.add(str);
}
return Optional.of(String.join(" and ", filters));
} | 3.68 |
framework_NestedMethodProperty_writeObject | /* Special serialization to handle method references */
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
// getMethods and setMethod are reconstructed on read based on
// propertyName
} | 3.68 |
flink_HsSubpartitionMemoryDataManager_releaseSubpartitionBuffers | /**
* Release this subpartition's buffers in a decision.
*
* @param toRelease All buffers that need to be released belong to this subpartition in a
* decision.
*/
@SuppressWarnings("FieldAccessNotGuarded")
// Note that: runWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
public void releaseSubpartitionBuffers(List<BufferIndexAndChannel> toRelease) {
runWithLock(
() ->
toRelease.forEach(
(indexAndChannel) -> {
int bufferIndex = indexAndChannel.getBufferIndex();
HsBufferContext bufferContext =
bufferIndexToContexts.get(bufferIndex);
if (bufferContext != null) {
checkAndMarkBufferReadable(bufferContext);
releaseBuffer(bufferIndex);
}
}));
} | 3.68 |
hbase_LruAdaptiveBlockCache_getBlock | /**
* Get the buffer of the block with the specified name.
* @param cacheKey block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block (used to avoid
* double counting cache misses when doing double-check locking)
* @param updateCacheMetrics Whether to update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> {
// It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside
// this block. because if retain outside the map#computeIfPresent, the evictBlock may remove
// the block and release, then we're retaining a block with refCnt=0 which is disallowed.
// see HBASE-22422.
val.getBuffer().retain();
return val;
});
if (cb == null) {
if (!repeat && updateCacheMetrics) {
stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
// If there is another block cache then try and read there.
// However if this is a retry ( second time in double checked locking )
// And it's already a miss then the l2 will also be a miss.
if (victimHandler != null && !repeat) {
// The handler will increase result's refCnt for RPC, so need no extra retain.
Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
// Promote this to L1.
if (result != null) {
if (caching) {
cacheBlock(cacheKey, result, /* inMemory = */ false);
}
}
return result;
}
return null;
}
if (updateCacheMetrics) {
stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
cb.access(count.incrementAndGet());
return cb.getBuffer();
} | 3.68 |
framework_Responsive_makeResponsive | /**
* Enable responsive width and height range styling for the target component
* or UI instance.
*
* @param components
* The components which should be able to respond to width and/or
* height changes.
*/
public static void makeResponsive(Component... components) {
for (Component c : components) {
if (c instanceof AbstractClientConnector) {
new Responsive().extend((AbstractClientConnector) c);
}
}
} | 3.68 |
morf_DatabaseMetaDataProviderUtils_getAutoIncrementStartValue | /**
* Get the auto increment start value (if available) from the column comments
*
* @param columnComment The column comment
* @return the auto increment start value
*/
public static int getAutoIncrementStartValue(String columnComment) {
if (StringUtils.isNotEmpty(columnComment)) {
Matcher matcher = AUTONUM_START_REGEX.matcher(columnComment);
if (matcher.find()) {
return Integer.parseInt(matcher.group(1));
}
}
return -1;
} | 3.68 |
framework_LegacyLocatorStrategy_getWidgetFromPath | /**
* Locates the widget based on a String locator.
*
* @param path
* The String locator that identifies the widget.
* @param baseWidget
* the widget to which the path is relative, null if relative to
* root
* @return The Widget identified by the String locator or null if the widget
* could not be identified.
*/
@SuppressWarnings("unchecked")
private Widget getWidgetFromPath(String path, Widget baseWidget) {
Widget w = baseWidget;
String[] parts = path.split(PARENTCHILD_SEPARATOR);
for (int i = 0; i < parts.length; i++) {
String part = parts[i];
if (part.equals(ROOT_ID)) {
w = RootPanel.get();
} else if (part.isEmpty()) {
if (w == null) {
w = client.getUIConnector().getWidget();
}
} else if (w == null) {
String id = part;
// Must be old static pid (PID_S*)
ServerConnector connector = ConnectorMap.get(client)
.getConnector(id);
if (connector == null) {
// Lookup by component id
// TODO Optimize this
connector = findConnectorById(client.getUIConnector(),
id.substring(5));
}
if (connector instanceof ComponentConnector) {
w = ((ComponentConnector) connector).getWidget();
} else {
// Not found
return null;
}
} else if (part.startsWith("domChild[")) {
// The target widget has been found and the rest identifies the
// element
break;
} else if (w instanceof Iterable) {
// W identifies a widget that contains other widgets, as it
// should. Try to locate the child
Iterable<?> parent = (Iterable<?>) w;
// Part is of type "VVerticalLayout[0]", split this into
// VVerticalLayout and 0
String[] split = part.split("\\[", 2);
String widgetClassName = split[0];
String indexString = split[1].substring(0,
split[1].length() - 1);
int widgetPosition;
try {
widgetPosition = Integer.parseInt(indexString);
} catch (NumberFormatException e) {
// We've probably been fed a new-style Vaadin locator with a
// string-form predicate, that doesn't match anything in the
// search space.
return null;
}
// AbsolutePanel in GridLayout has been removed -> skip it
if (w instanceof VGridLayout
&& "AbsolutePanel".equals(widgetClassName)) {
continue;
}
// FlowPane in CSSLayout has been removed -> skip it
if (w instanceof VCssLayout
&& "VCssLayout$FlowPane".equals(widgetClassName)) {
continue;
}
// ChildComponentContainer and VOrderedLayout$Slot have been
// replaced with Slot
if (w instanceof VAbstractOrderedLayout
&& ("ChildComponentContainer".equals(widgetClassName)
|| "VOrderedLayout$Slot"
.equals(widgetClassName))) {
widgetClassName = "Slot";
}
if (w instanceof VTabsheetPanel && widgetPosition != 0) {
// TabSheetPanel now only contains 1 connector => the index
// is always 0 which indicates the widget in the active tab
widgetPosition = 0;
}
if (w instanceof VOverlay
&& "VCalendarPanel".equals(widgetClassName)) {
// Vaadin 7.1 adds a wrapper for datefield popups
parent = (Iterable<?>) ((Iterable<?>) parent).iterator()
.next();
}
/*
* The new grid and ordered layouts do not contain
* ChildComponentContainer widgets. This is instead simulated by
* constructing a path step that would find the desired widget
* from the layout and injecting it as the next search step
* (which would originally have found the widget inside the
* ChildComponentContainer)
*/
if ((w instanceof VGridLayout)
&& "ChildComponentContainer".equals(widgetClassName)
&& i + 1 < parts.length) {
HasWidgets layout = (HasWidgets) w;
String nextPart = parts[i + 1];
String[] nextSplit = nextPart.split("\\[", 2);
String nextWidgetClassName = nextSplit[0];
// Find the n:th child and count the number of children with
// the same type before it
int nextIndex = 0;
for (Widget child : layout) {
boolean matchingType = nextWidgetClassName
.equals(Util.getSimpleName(child));
if (matchingType && widgetPosition == 0) {
// This is the n:th child that we looked for
break;
} else if (widgetPosition < 0) {
// Error if we're past the desired position without
// a match
return null;
} else if (matchingType) {
// If this was another child of the expected type,
// increase the count for the next step
nextIndex++;
}
// Don't count captions
if (!(child instanceof VCaption)) {
widgetPosition--;
}
}
// Advance to the next step, this time checking for the
// actual child widget
parts[i + 1] = nextWidgetClassName + '[' + nextIndex + ']';
continue;
}
// Locate the child
Iterable<? extends Widget> iterable;
/*
* VWindow and VContextMenu workarounds for backwards
* compatibility
*/
if (widgetClassName.equals("VWindow")) {
List<WindowConnector> windows = client.getUIConnector()
.getSubWindows();
List<VWindow> windowWidgets = new ArrayList<>(
windows.size());
for (WindowConnector wc : windows) {
windowWidgets.add(wc.getWidget());
}
iterable = windowWidgets;
} else if (widgetClassName.equals("VContextMenu")) {
return client.getContextMenu();
} else {
iterable = (Iterable<? extends Widget>) parent;
}
boolean ok = false;
// Find the widgetPosition:th child of type "widgetClassName"
for (Widget child : iterable) {
String simpleName2 = Util.getSimpleName(child);
if (!widgetClassName.equals(simpleName2)
&& child instanceof Slot) {
/*
* Support legacy tests without any selector for the
* Slot widget (i.e. /VVerticalLayout[0]/VButton[0]) by
* directly checking the stuff inside the slot
*/
child = ((Slot) child).getWidget();
simpleName2 = Util.getSimpleName(child);
}
if (widgetClassName.equals(simpleName2)) {
if (widgetPosition == 0) {
w = child;
ok = true;
break;
}
widgetPosition--;
}
}
if (!ok) {
// Did not find the child
return null;
}
} else {
// W identifies something that is not a "HasWidgets". This
// should not happen as all widget containers should implement
// HasWidgets.
return null;
}
}
return w;
} | 3.68 |
hadoop_Lz4Codec_createDecompressor | /**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
return new Lz4Decompressor(bufferSize);
} | 3.68 |
flink_StaticFileServerHandler_sendNotModified | /**
* Send the "304 Not Modified" response. This response can be used when the file timestamp is
* the same as what the browser is sending up.
*
* @param ctx The channel context to write the response to.
*/
public static void sendNotModified(ChannelHandlerContext ctx) {
FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, NOT_MODIFIED);
setDateHeader(response);
// close the connection as soon as the error message is sent.
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} | 3.68 |
hadoop_ManifestCommitter_enterStage | /**
* Callback on stage entry.
* Sets {@link #activeStage} and updates the
* common context.
* @param stage new stage
*/
@Override
public void enterStage(String stage) {
activeStage = stage;
AuditingIntegration.enterStage(stage);
} | 3.68 |
MagicPlugin_MagicController_isMeleeWeapon | /**
* Checks if an item is a melee material, as specified by the {@code melee}
* list in {@code materials.yml}. This is primarily used to detect if left
* clicking an entity should indicate melee damage or a spell being cast.
*
* @param item The item to check.
* @return Whether or not this is a melee weapon.
*/
public boolean isMeleeWeapon(ItemStack item) {
return item != null && meleeMaterials.testItem(item);
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_incrementPendingBytes | /**
* Increment the total amount of pending bytes for all streams. When any stream's pending bytes changes
* method should be called.
* @param delta The amount to increment by.
*/
final void incrementPendingBytes(int delta) {
totalPendingBytes += delta;
// Notification of writibilty change should be delayed until the end of the top level event.
// This is to ensure the flow controller is more consistent state before calling external listener methods.
} | 3.68 |
hadoop_StagingCommitter_buildWorkPath | /**
* Get the work path for a task.
* @param context job/task complex
* @param uuid UUID
* @return a path or null if the context is not of a task
* @throws IOException failure to build the path
*/
private static Path buildWorkPath(JobContext context, String uuid)
throws IOException {
if (context instanceof TaskAttemptContext) {
return taskAttemptWorkingPath((TaskAttemptContext) context, uuid);
} else {
return null;
}
} | 3.68 |
hbase_StoreFileScanner_seekAtOrAfter | /** Returns false if not found or if k is after the end. */
public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException {
int result = s.seekTo(k);
if (result < 0) {
if (result == HConstants.INDEX_KEY_MAGIC) {
// using faked key
return true;
}
// Passed KV is smaller than first KV in file, work from start of file
return s.seekTo();
} else if (result > 0) {
// Passed KV is larger than current KV in file, if there is a next
// it is the "after", if not then this scanner is done.
return s.next();
}
// Seeked to the exact key
return true;
} | 3.68 |
morf_AbstractSqlDialectTest_testYYYYMMDDToDate | /**
* Test that YYYYMMDDToDate functionality behaves as expected.
*/
@Test
public void testYYYYMMDDToDate() {
String result = testDialect.getSqlFrom(yyyymmddToDate(new FieldLiteral("20100101")));
assertEquals(expectedYYYYMMDDToDate(), result);
} | 3.68 |
incubator-hugegraph-toolchain_SplicingIdGenerator_parse | /**
* Parse a single id into multiple parts with ID_SPLITOR
*
* @param id the id object to be parsed
* @return parsed string id parts
*/
public static String[] parse(Id id) {
return IdUtil.unescape(id.asString(), ID_SPLITOR_STR, ESCAPE_STR);
} | 3.68 |
hadoop_ProducerConsumer_take | /**
* Blocking take from ProducerConsumer output queue that can be interrupted.
*
* @throws InterruptedException if interrupted before an element becomes
* available.
* @return item returned by processor's processItem().
*/
public WorkReport<R> take() throws InterruptedException {
WorkReport<R> report = outputQueue.take();
workCnt.decrementAndGet();
return report;
} | 3.68 |
framework_VAbsoluteLayout_setWrapperStyleNames | /**
* Sets the style names of the wrapper. Will be prefixed with the
* v-absolutelayout-wrapper prefix
*
* @param stylenames
* The wrapper style names
*/
public void setWrapperStyleNames(String... stylenames) {
extraStyleNames = stylenames;
updateStyleNames();
} | 3.68 |
morf_SqlDateUtils_castAsDateNullIfZero | /**
* Returns null if the supplied expression is zero, otherwise returns the expression casted to a date.
*
* @param expression the expression to evaluate
* @return null or cast expression as a date
*/
public static AliasedField castAsDateNullIfZero(AliasedField expression) {
return castAsDate(nullLiteralIfZero(expression));
} | 3.68 |
framework_VAbstractCalendarPanel_handleNavigationMonthMode | /**
* Handle the keyboard navigation when the resolution is set to MONTH.
*
* @param keycode
* The keycode to handle
* @param ctrl
* Was the ctrl key pressed?
* @param shift
* Was the shift key pressed?
* @return {@code true} if the navigation was handled successfully,
* {@code false} otherwise
*/
protected boolean handleNavigationMonthMode(int keycode, boolean ctrl,
boolean shift) {
// Ctrl selection not supported
if (ctrl) {
return false;
} else if (keycode == getPreviousKey()) {
focusNextYear(1); // Add 1 year
return true;
} else if (keycode == getForwardKey()) {
focusNextMonth(); // Add 1 month
return true;
} else if (keycode == getNextKey()) {
focusPreviousYear(1); // Subtract 1 year
return true;
} else if (keycode == getBackwardKey()) {
focusPreviousMonth(); // Subtract 1 month
return true;
} else if (keycode == getSelectKey()) {
value = (Date) focusedDate.clone();
onSubmit();
return true;
} else if (keycode == getResetKey()) {
// Restore showing value the selected value
focusedDate.setTime(value.getTime());
renderCalendar();
return true;
} else if (keycode == getCloseKey()) {
onCancel();
// TODO fire close event
return true;
}
return false;
} | 3.68 |
morf_AbstractSqlDialectTest_getTestDialect | /**
* @return the testDialect
*/
public SqlDialect getTestDialect() {
return testDialect;
} | 3.68 |
hadoop_AbstractRESTRequestInterceptor_init | /**
* Initializes the {@link RESTRequestInterceptor}.
*/
@Override
public void init(String userName) {
this.user = RouterServerUtil.setupUser(userName);
if (this.nextInterceptor != null) {
this.nextInterceptor.init(userName);
}
} | 3.68 |
framework_ContainerHierarchicalWrapper_getContainerPropertyIds | /*
* Gets the ID's of all Properties stored in the Container Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Collection<?> getContainerPropertyIds() {
return container.getContainerPropertyIds();
} | 3.68 |
hbase_ShutdownHookManager_addShutdownHook | // priority is ignored in hadoop versions earlier than 2.0
@Override
public void addShutdownHook(Thread shutdownHookThread, int priority) {
Runtime.getRuntime().addShutdownHook(shutdownHookThread);
} | 3.68 |
hbase_RSGroupAdminClient_moveServers | /**
* Move given set of servers to the specified target RegionServer group.
*/
public void moveServers(Set<Address> servers, String targetGroup) throws IOException {
Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
for (Address el : servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
.setPort(el.getPort()).build());
}
MoveServersRequest request =
MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts).build();
try {
stub.moveServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.68 |
rocketmq-connect_Serdes_Double | /**
* A serde for nullable {@code Double} type.
*/
static public Serde<Double> Double() {
return new DoubleSerde();
} | 3.68 |
morf_AbstractSelectStatement_getFromSelects | /**
* @return the fromSelects
*/
public List<SelectStatement> getFromSelects() {
return fromSelects;
} | 3.68 |
morf_DataSetUtils_statementParameters | /**
* Build a set of parameters for a single call of a parameterised SQL
* statement.
*
* @see RecordBuilder
* @return A {@link RecordBuilder}.
*/
public static StatementParametersBuilder statementParameters() {
return new StatementParametersBuilderImpl();
} | 3.68 |
querydsl_TypeResolver_resolve | /**
* Resolve type declared in declaringType for context
*
* @param type type to be resolved
* @param declaringType declaration context of type
* @param context target context of type
* @return resolved type
*/
public static Type resolve(Type type, Type declaringType, EntityType context) {
Type resolved = unwrap(type);
String varName = getVarName(resolved);
if (varName != null) {
resolved = resolveVar(resolved, varName, declaringType, context);
} else if (!resolved.getParameters().isEmpty()) {
resolved = resolveWithParameters(resolved, declaringType, context);
}
// rewrap entity type
if (type instanceof EntityType) {
if (!unwrap(type).equals(resolved)) {
resolved = new EntityType(resolved, ((EntityType) type).getSuperTypes());
} else {
// reset to original type
resolved = type;
}
}
return resolved;
} | 3.68 |
morf_ChangePrimaryKeyColumns_apply | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema apply(Schema schema) {
return applyChange(schema, oldPrimaryKeyColumns, newPrimaryKeyColumns);
} | 3.68 |
hadoop_DoubleValueSum_reset | /**
* reset the aggregator
*/
public void reset() {
sum = 0;
} | 3.68 |
hudi_BaseHoodieTableServiceClient_scheduleClustering | /**
* Schedules a new clustering instant.
*
* @param extraMetadata Extra Metadata to be stored
*/
public Option<String> scheduleClustering(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleClusteringAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
} | 3.68 |
flink_BackendRestorerProcedure_createAndRestore | /**
* Creates a new state backend and restores it from the provided set of state snapshot
* alternatives.
*
* @param restoreOptions list of prioritized state snapshot alternatives for recovery.
* @return the created (and restored) state backend.
* @throws Exception if the backend could not be created or restored.
*/
@Nonnull
public T createAndRestore(@Nonnull List<? extends Collection<S>> restoreOptions)
throws Exception {
if (restoreOptions.isEmpty()) {
restoreOptions = Collections.singletonList(Collections.emptyList());
}
int alternativeIdx = 0;
Exception collectedException = null;
while (alternativeIdx < restoreOptions.size()) {
Collection<S> restoreState = restoreOptions.get(alternativeIdx);
++alternativeIdx;
// IMPORTANT: please be careful when modifying the log statements because they are used
// for validation in
// the automatic end-to-end tests. Those tests might fail if they are not aligned with
// the log message!
if (restoreState.isEmpty()) {
LOG.debug("Creating {} with empty state.", logDescription);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace(
"Creating {} and restoring with state {} from alternative ({}/{}).",
logDescription,
restoreState,
alternativeIdx,
restoreOptions.size());
} else {
LOG.debug(
"Creating {} and restoring with state from alternative ({}/{}).",
logDescription,
alternativeIdx,
restoreOptions.size());
}
}
try {
return attemptCreateAndRestore(restoreState);
} catch (Exception ex) {
collectedException = ExceptionUtils.firstOrSuppressed(ex, collectedException);
if (backendCloseableRegistry.isClosed()) {
throw new FlinkException(
"Stopping restore attempts for already cancelled task.",
collectedException);
}
LOG.warn(
"Exception while restoring {} from alternative ({}/{}), will retry while more "
+ "alternatives are available.",
logDescription,
alternativeIdx,
restoreOptions.size(),
ex);
}
}
throw new FlinkException(
"Could not restore "
+ logDescription
+ " from any of the "
+ restoreOptions.size()
+ " provided restore options.",
collectedException);
} | 3.68 |
graphhopper_TranslationMap_getWithFallBack | /**
* Returns the Translation object for the specified locale and falls back to English if the
* locale was not found.
*/
public Translation getWithFallBack(Locale locale) {
Translation tr = get(locale.toString());
if (tr == null) {
tr = get(locale.getLanguage());
if (tr == null)
tr = get("en");
}
return tr;
} | 3.68 |
pulsar_BytesSchemaVersion_get | /**
* Get the data from the Bytes.
* @return The underlying byte array
*/
public byte[] get() {
return this.bytes;
} | 3.68 |
hibernate-validator_ClassHierarchyHelper_getDirectlyImplementedInterfaces | /**
* Gets all interfaces (and recursively their super-interfaces) which the
* given class directly implements. Interfaces implemented by super-classes
* are not contained.
*
* @param clazz the class for which to retrieve the implemented interfaces
* @param <T> the type of the class
*
* @return Set of all interfaces implemented by the class represented by
* this hierarchy. The empty list is returned if it does not
* implement any interfaces.
*/
public static <T> Set<Class<? super T>> getDirectlyImplementedInterfaces(Class<T> clazz) {
Contracts.assertNotNull( clazz );
Set<Class<? super T>> classes = newHashSet();
getImplementedInterfaces( clazz, classes );
return classes;
} | 3.68 |
flink_InMemoryPartition_setPartitionNumber | /**
* overwrites partition number and should only be used on compaction partition
*
* @param number new partition
*/
public void setPartitionNumber(int number) {
this.partitionNumber = number;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_findPredecessor | /**
* Find the predecessor node for the given key at the given level. The key is in the memory
* segment positioning at the given offset.
*
* @param keySegment memory segment which contains the key.
* @param keyOffset offset of the key in the memory segment.
* @param level the level.
* @return node id before the key at the given level.
*/
private long findPredecessor(MemorySegment keySegment, int keyOffset, int level) {
return SkipListUtils.findPredecessor(
keySegment, keyOffset, level, levelIndexHeader, spaceAllocator);
} | 3.68 |
hbase_ProtobufMagic_lengthOfPBMagic | /** Returns Length of {@link #PB_MAGIC} */
public static int lengthOfPBMagic() {
return PB_MAGIC.length;
} | 3.68 |
hadoop_S3AReadOpContext_withInputPolicy | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public S3AReadOpContext withInputPolicy(final S3AInputPolicy value) {
inputPolicy = value;
return this;
} | 3.68 |
pulsar_ReaderListener_reachedEndOfTopic | /**
* Get the notification when a topic is terminated.
*
* @param reader
* the Reader object associated with the terminated topic
*/
default void reachedEndOfTopic(Reader reader) {
// By default ignore the notification
} | 3.68 |
hbase_RequestConverter_buildSetTableStateInMetaRequest | /**
* Creates a protocol buffer SetTableStateInMetaRequest
* @param state table state to update in Meta
* @return a SetTableStateInMetaRequest
*/
public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final TableState state) {
return SetTableStateInMetaRequest.newBuilder().setTableState(state.convert())
.setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build();
} | 3.68 |
framework_VTabsheet_getPreviousVisibleTab | /**
* Find the previous tab that is visible on the server. Being scrolled
* out of view or clipped on the client does not make a difference.
* Returns -1 if none is found.
*
* @param i
* the index to start the search from
* @return the index of the first visible tab to the left from the
* starting point, or {@code -1} if not found
*
* @see Tab#isHiddenOnServer()
* @see VTabsheet#scrolledOutOfView(int)
* @see VTabsheet#isClipped(Tab)
*/
private int getPreviousVisibleTab(int i) {
i = Math.min(i, getTabCount());
do {
i--;
} while (i >= 0 && getTab(i).isHiddenOnServer());
return i;
} | 3.68 |
hadoop_AbfsInputStream_length | /**
* Returns the length of the file that this stream refers to. Note that the length returned is the length
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
* they wont be reflected in the returned length.
*
* @return length of the file.
* @throws IOException if the stream is closed
*/
public long length() throws IOException {
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
return contentLength;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_beforeMarshalling | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeMarshalling(Context.BeforeMarshalling context,
ExecutionAttributes executionAttributes) {
span.beforeMarshalling(context, executionAttributes);
} | 3.68 |
flink_StringUtils_arrayAwareToString | /**
* Converts the given object into a string representation by calling {@link Object#toString()}
* and formatting (possibly nested) arrays and {@code null}.
*
* <p>See {@link Arrays#deepToString(Object[])} for more information about the used format.
*/
public static String arrayAwareToString(Object o) {
final String arrayString = Arrays.deepToString(new Object[] {o});
return arrayString.substring(1, arrayString.length() - 1);
} | 3.68 |
hadoop_DecayRpcSchedulerDetailedMetrics_getProcessingName | /**
* @return Returns the rate name inside the metric.
* @param priority input priority.
*/
public String getProcessingName(int priority) {
return "DecayRPCSchedulerPriority."+priority+".RpcProcessingTime";
} | 3.68 |
hbase_RpcCallContext_getRequestUserName | /** Returns Current request's user name or not present if none ongoing. */
default Optional<String> getRequestUserName() {
return getRequestUser().map(User::getShortName);
} | 3.68 |
framework_Escalator_moveViewportAndContent | /**
* Adjust the scroll position and move the contained rows.
* <p>
* The difference between using this method and simply scrolling is that
* this method "takes the rows and spacers with it" and renders them
* appropriately. The viewport may be scrolled any arbitrary amount, and
* the contents are moved appropriately, but always snapped into a
* plausible place.
* <p>
* <dl>
* <dt>Example 1</dt>
* <dd>An Escalator with default row height 20px. Adjusting the scroll
* position with 7.5px will move the viewport 7.5px down, but leave the
* row where it is.</dd>
* <dt>Example 2</dt>
* <dd>An Escalator with default row height 20px. Adjusting the scroll
* position with 27.5px will move the viewport 27.5px down, and place
* the row at 20px.</dd>
* </dl>
*
* @param yDelta
* the delta of pixels by which to move the viewport and
* content. A positive value moves everything downwards,
* while a negative value moves everything upwards
*/
public void moveViewportAndContent(final double yDelta) {
if (yDelta == 0) {
return;
}
double newTop = tBodyScrollTop + yDelta;
verticalScrollbar.setScrollPos(newTop);
final double defaultRowHeight = getDefaultRowHeight();
double rowPxDelta = yDelta - (yDelta % defaultRowHeight);
int rowIndexDelta = (int) (yDelta / defaultRowHeight);
if (!WidgetUtil.pixelValuesEqual(rowPxDelta, 0)) {
Collection<SpacerContainer.SpacerImpl> spacers = spacerContainer
.getSpacersAfterPx(tBodyScrollTop,
SpacerInclusionStrategy.PARTIAL);
for (SpacerContainer.SpacerImpl spacer : spacers) {
spacer.setPositionDiff(0, rowPxDelta);
spacer.setRowIndex(spacer.getRow() + rowIndexDelta);
}
for (TableRowElement tr : visualRowOrder) {
setRowPosition(tr, 0, getRowTop(tr) + rowPxDelta);
}
}
setBodyScrollPosition(tBodyScrollLeft, newTop);
} | 3.68 |
hadoop_LocalJobOutputFiles_getOutputFile | /**
* Return the path to local map output file created earlier
*/
public Path getOutputFile() throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.68 |
pulsar_PulsarAdminImpl_source | /**
* @return the sources management object
* @deprecated in favor of {@link #sources()}
*/
@Deprecated
public Source source() {
return (Source) sources;
} | 3.68 |
hbase_BaseSourceImpl_removeMetric | /**
* Remove a named gauge.
* @param key the key of the gauge to remove
*/
@Override
public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
JmxCacheBuster.clearJmxCache();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.