name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_SharedUtil_upperCaseUnderscoreToHumanFriendly | /**
* Converts an UPPER_CASE_STRING to a human friendly format (Upper Case
* String).
* <p>
* Splits words on {@code _}. Examples:
* <p>
* {@literal MY_BEAN_CONTAINER} becomes {@literal My Bean Container}
* {@literal AWESOME_URL_FACTORY} becomes {@literal Awesome Url Factory}
* {@literal SOMETHING} becomes {@literal Something}
*
* @since 7.7.4
* @param upperCaseUnderscoreString
* The input string in UPPER_CASE_UNDERSCORE format
* @return A human friendly version of the input
*/
public static String upperCaseUnderscoreToHumanFriendly(
String upperCaseUnderscoreString) {
String[] parts = upperCaseUnderscoreString.replaceFirst("^_*", "")
.split("_");
for (int i = 0; i < parts.length; i++) {
parts[i] = capitalize(parts[i].toLowerCase(Locale.ROOT));
}
return join(parts, " ");
} | 3.68 |
hadoop_Duration_getLimitExceeded | /**
* return true if the limit has been exceeded
* @return true if a limit was set and the current time
* exceeds it.
*/
public boolean getLimitExceeded() {
return limit >= 0 && ((now() - start) > limit);
} | 3.68 |
hbase_HRegionServer_closeMetaTableRegions | /**
* Close meta region if we carry it
* @param abort Whether we're running an abort.
*/
private void closeMetaTableRegions(final boolean abort) {
HRegion meta = null;
this.onlineRegionsLock.writeLock().lock();
try {
for (Map.Entry<String, HRegion> e : onlineRegions.entrySet()) {
RegionInfo hri = e.getValue().getRegionInfo();
if (hri.isMetaRegion()) {
meta = e.getValue();
}
if (meta != null) {
break;
}
}
} finally {
this.onlineRegionsLock.writeLock().unlock();
}
if (meta != null) {
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
}
} | 3.68 |
hmily_HmilyLockCacheManager_getInstance | /**
* Hmily lock cache manager.
*
* @return Hmily lock cache manager instance
*/
public static HmilyLockCacheManager getInstance() {
return INSTANCE;
} | 3.68 |
hbase_SnapshotScannerHDFSAclHelper_getUsersWithNamespaceReadAction | /**
* Return users with namespace read permission
* @param namespace the namespace
* @param includeGlobal true if include users with global read action
* @return users with namespace read permission
* @throws IOException if an error occurred
*/
Set<String> getUsersWithNamespaceReadAction(String namespace, boolean includeGlobal)
throws IOException {
Set<String> users =
getUsersWithReadAction(PermissionStorage.getNamespacePermissions(conf, namespace));
if (includeGlobal) {
users.addAll(getUsersWithGlobalReadAction());
}
return users;
} | 3.68 |
hadoop_BooleanWritable_compareTo | /**
*/
@Override
public int compareTo(BooleanWritable o) {
boolean a = this.value;
boolean b = o.value;
return ((a == b) ? 0 : (a == false) ? -1 : 1);
} | 3.68 |
dubbo_IOUtils_writeLines | /**
* write lines.
*
* @param file file.
* @param lines lines.
* @throws IOException If an I/O error occurs
*/
public static void writeLines(File file, String[] lines) throws IOException {
if (file == null) {
throw new IOException("File is null.");
}
writeLines(new FileOutputStream(file), lines);
} | 3.68 |
hadoop_AccessTokenProvider_setConf | /**
* Set the conf.
*
* @param configuration New configuration.
*/
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
} | 3.68 |
morf_InsertStatement_getValues | /**
* @return List of literal field values to insert to when not selecting data from another table.
*/
public List<AliasedField> getValues() {
return values;
} | 3.68 |
hbase_RegionStates_getOrCreateServer | /**
* Be judicious calling this method. Do it on server register ONLY otherwise you could mess up
* online server accounting. TOOD: Review usage and convert to {@link #getServerNode(ServerName)}
* where we can.
*/
public ServerStateNode getOrCreateServer(final ServerName serverName) {
return serverMap.computeIfAbsent(serverName, key -> new ServerStateNode(key));
} | 3.68 |
hadoop_AbstractS3ACommitter_commitJobInternal | /**
* Internal Job commit operation: where the S3 requests are made
* (potentially in parallel).
* @param commitContext commit context
* @param pending pending commits
* @throws IOException any failure
*/
protected void commitJobInternal(
final CommitContext commitContext,
final ActiveCommit pending)
throws IOException {
trackDurationOfInvocation(committerStatistics,
COMMITTER_COMMIT_JOB.getSymbol(),
() -> commitPendingUploads(commitContext, pending));
} | 3.68 |
flink_TableSink_getTableSchema | /**
* Returns the schema of the consumed table.
*
* @return The {@link TableSchema} of the consumed table.
*/
default TableSchema getTableSchema() {
final String[] fieldNames = getFieldNames();
final TypeInformation[] legacyFieldTypes = getFieldTypes();
if (fieldNames == null || legacyFieldTypes == null) {
throw new TableException("Table sink does not implement a table schema.");
}
return new TableSchema(fieldNames, legacyFieldTypes);
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperationsForExistingDataFix3 | /**
* Regression test that checks if the DSL with Math expressions, that is used
* in ReportingSchema module produces expected SQL.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperationsForExistingDataFix3() {
AliasedField dsl = max(field("assetLocationDate").multiplyBy(literal(100000)).plus(field("assetLocationTime")));
String sql = testDialect.getSqlFrom(dsl);
assertEquals(expectedSqlForMathOperationsForExistingDataFix3(), sql);
} | 3.68 |
flink_ExceptionUtils_tryRethrowException | /**
* Tries to throw the given exception if not null.
*
* @param e exception to throw if not null.
* @throws Exception
*/
public static void tryRethrowException(@Nullable Exception e) throws Exception {
if (e != null) {
throw e;
}
} | 3.68 |
flink_ZooKeeperUtils_splitZooKeeperPath | /**
* Splits the given ZooKeeper path into its parts.
*
* @param path path to split
* @return splited path
*/
public static String[] splitZooKeeperPath(String path) {
return path.split("/");
} | 3.68 |
hbase_NamespacesInstanceModel_addProperty | /**
* Add property to the namespace.
* @param key attribute name
* @param value attribute value
*/
public void addProperty(String key, String value) {
if (properties == null) {
properties = new HashMap<>();
}
properties.put(key, value);
} | 3.68 |
hbase_DeadServer_cleanOldServerName | /**
* @param newServerName Server to match port and hostname against.
* @param deadServerIterator Iterator primed so can call 'next' on it.
* @return True if <code>newServerName</code> and current primed iterator ServerName have same
* host and port and we removed old server from iterator and from processing list.
*/
private boolean cleanOldServerName(ServerName newServerName,
Iterator<ServerName> deadServerIterator) {
ServerName sn = deadServerIterator.next();
if (ServerName.isSameAddress(sn, newServerName)) {
// Remove from dead servers list. Don't remove from the processing list --
// let the SCP do it when it is done.
deadServerIterator.remove();
return true;
}
return false;
} | 3.68 |
flink_MutableHashTable_clearPartitions | /**
* This method clears all partitions currently residing (partially) in memory. It releases all
* memory and deletes all spilled partitions.
*
* <p>This method is intended for a hard cleanup in the case that the join is aborted.
*/
protected void clearPartitions() {
for (int i = this.partitionsBeingBuilt.size() - 1; i >= 0; --i) {
final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i);
try {
p.clearAllMemory(this.availableMemory);
} catch (Exception e) {
LOG.error("Error during partition cleanup.", e);
}
}
this.partitionsBeingBuilt.clear();
} | 3.68 |
hadoop_SchedulerHealth_getAggregateAllocationCount | /**
* Get the aggregate of all the allocations count.
*
* @return aggregate allocation count
*/
public Long getAggregateAllocationCount() {
return getAggregateOperationCount(Operation.ALLOCATION);
} | 3.68 |
pulsar_AuthenticationDataProvider_getTlsTrustStoreStream | /**
*
* @return an input-stream of the trust store, or null if the trust-store provided at
* {@link ClientConfigurationData#getTlsTrustStorePath()}
*/
default InputStream getTlsTrustStoreStream() {
return null;
} | 3.68 |
hbase_StoreFileScanner_seekToPreviousRowWithoutHint | /**
* This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires two seeks
* and one reseek. The extra expense/seek is with the intent of speeding up subsequent calls by
* using the {@link StoreFileScanner#seekToPreviousRowWithHint} which this method seeds the state
* for by setting {@link StoreFileScanner#previousRow}
*/
private boolean seekToPreviousRowWithoutHint(Cell originalKey) throws IOException {
// Rewind to the cell before the beginning of this row
Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(originalKey);
if (!seekBefore(keyAtBeginningOfRow)) {
return false;
}
// Rewind before this row and save what we find as a seek hint
Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell());
seekBeforeAndSaveKeyToPreviousRow(firstKeyOfPreviousRow);
// Seek back to the start of the previous row
if (!reseekAtOrAfter(firstKeyOfPreviousRow)) {
return false;
}
// If after skipping newer Kvs, we're still in what we thought was the previous
// row, then we can exit
if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) {
return true;
}
// Skipping newer kvs resulted in skipping the entire row that we thought was the
// previous row. If we've set a seek hint, then we can use that to go backwards
// further
if (previousRow != null) {
return seekToPreviousRowWithHint();
}
// If we've made it here, then we weren't able to set a seek hint. This can happen
// only if we're at the beginning of the storefile i.e. there is no row before this
// one
return false;
} | 3.68 |
rocketmq-connect_AbstractStateManagementService_remove | /**
* remove connector
*
* @param connector
*/
private synchronized void remove(String connector) {
ConnAndTaskStatus.CacheEntry<ConnectorStatus> removed = connAndTaskStatus.getConnectors().remove(connector);
if (removed != null) {
removed.delete();
}
Map<Integer, ConnAndTaskStatus.CacheEntry<TaskStatus>> tasks = connAndTaskStatus.getTasks().remove(connector);
if (tasks != null) {
for (ConnAndTaskStatus.CacheEntry<TaskStatus> taskEntry : tasks.values()) {
taskEntry.delete();
}
}
} | 3.68 |
flink_StreamExecutionEnvironment_fromData | /**
* Creates a new data stream that contains the given elements. The framework will determine the
* type according to the based type user supplied. The elements should be the same or be the
* subclass to the based type. The sequence of elements must not be empty.
*
* <p>NOTE: This creates a non-parallel data stream source by default (parallelism of one).
* Adjustment of parallelism is supported via {@code setParallelism()} on the result.
*
* @param type The based class type in the collection.
* @param data The array of elements to create the data stream from.
* @param <OUT> The type of the returned data stream
* @return The data stream representing the given array of elements
*/
@SafeVarargs
public final <OUT> DataStreamSource<OUT> fromData(Class<OUT> type, OUT... data) {
if (data.length == 0) {
throw new IllegalArgumentException(
"fromElements needs at least one element as argument");
}
TypeInformation<OUT> typeInfo;
try {
typeInfo = TypeExtractor.getForClass(type);
} catch (Exception e) {
throw new RuntimeException(
"Could not create TypeInformation for type "
+ type.getName()
+ "; please specify the TypeInformation manually via "
+ "StreamExecutionEnvironment#fromData(Collection, TypeInformation)",
e);
}
return fromData(Arrays.asList(data), typeInfo);
} | 3.68 |
hbase_BlockCacheUtil_getLoadedCachedBlocksByFile | /**
* Get a {@link CachedBlocksByFile} instance and load it up by iterating content in
* {@link BlockCache}.
* @param conf Used to read configurations
* @param bc Block Cache to iterate.
* @return Laoded up instance of CachedBlocksByFile
*/
public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf,
final BlockCache bc) {
CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf);
for (CachedBlock cb : bc) {
if (cbsbf.update(cb)) break;
}
return cbsbf;
} | 3.68 |
hadoop_TypedBytesInput_readRawFloat | /**
* Reads the raw bytes following a <code>Type.FLOAT</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawFloat() throws IOException {
byte[] bytes = new byte[5];
bytes[0] = (byte) Type.FLOAT.code;
in.readFully(bytes, 1, 4);
return bytes;
} | 3.68 |
morf_AbstractConnectionResources_getXADataSource | /**
* @return {@link XADataSource} created for this {@link ConnectionResources}
*/
public final XADataSource getXADataSource() {
Preconditions.checkNotNull(getDatabaseType(), "Cannot create XADataSource without defined DatabaseType");
return findDatabaseType().getXADataSource(getJdbcUrl(), getUserName(), getPassword());
} | 3.68 |
pulsar_ConsumerInterceptors_onAckTimeoutSend | /**
* This is called when a redelivery from an acknowledge timeout occurs.
* <p>
* This method calls {@link ConsumerInterceptor#onAckTimeoutSend(Consumer, Set)
* onAckTimeoutSend(Consumer, Set<MessageId>)} method for each interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer the consumer which contains the interceptors.
* @param messageIds set of message IDs being redelivery due an acknowledge timeout.
*/
public void onAckTimeoutSend(Consumer<T> consumer, Set<MessageId> messageIds) {
for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize; i++) {
try {
interceptors.get(i).onAckTimeoutSend(consumer, messageIds);
} catch (Throwable e) {
log.warn("Error executing interceptor onAckTimeoutSend callback", e);
}
}
} | 3.68 |
flink_CliFrontend_info | /**
* Executes the info action.
*
* @param args Command line arguments for the info action.
*/
protected void info(String[] args) throws Exception {
LOG.info("Running 'info' command.");
final Options commandOptions = CliFrontendParser.getInfoCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, true);
final ProgramOptions programOptions = ProgramOptions.create(commandLine);
// evaluate help flag
if (commandLine.hasOption(HELP_OPTION.getOpt())) {
CliFrontendParser.printHelpForInfo();
return;
}
// -------- build the packaged program -------------
LOG.info("Building program from JAR file");
PackagedProgram program = null;
try {
LOG.info("Creating program plan dump");
final CustomCommandLine activeCommandLine =
validateAndGetActiveCommandLine(checkNotNull(commandLine));
final Configuration effectiveConfiguration =
getEffectiveConfiguration(
activeCommandLine,
commandLine,
programOptions,
getJobJarAndDependencies(programOptions));
program = buildProgram(programOptions, effectiveConfiguration);
int parallelism = programOptions.getParallelism();
if (ExecutionConfig.PARALLELISM_DEFAULT == parallelism) {
parallelism = getDefaultParallelism(effectiveConfiguration);
}
Pipeline pipeline =
PackagedProgramUtils.getPipelineFromProgram(
program, effectiveConfiguration, parallelism, true);
String jsonPlan =
FlinkPipelineTranslationUtil.translateToJSONExecutionPlan(
program.getUserCodeClassLoader(), pipeline);
if (jsonPlan != null) {
System.out.println(
"----------------------- Execution Plan -----------------------");
System.out.println(jsonPlan);
System.out.println(
"--------------------------------------------------------------");
} else {
System.out.println("JSON plan could not be generated.");
}
String description = program.getDescription();
if (description != null) {
System.out.println();
System.out.println(description);
} else {
System.out.println();
System.out.println("No description provided.");
}
} finally {
if (program != null) {
program.close();
}
}
} | 3.68 |
flink_FlinkHints_getQueryBlockAliasHints | /**
* Get all query block alias hints.
*
* <p>Because query block alias hints will be propagated from root to leaves, so maybe one node
* will contain multi alias hints. But only the first one is the real query block name where
* this node is.
*/
public static List<RelHint> getQueryBlockAliasHints(List<RelHint> allHints) {
return allHints.stream()
.filter(hint -> hint.hintName.equals(FlinkHints.HINT_ALIAS))
.collect(Collectors.toList());
} | 3.68 |
hbase_QuotaObserverChore_isDisableSpaceViolationPolicy | /**
* Method to check whether we are dealing with DISABLE {@link SpaceViolationPolicy}. In such a
* case, currPolicy or/and targetPolicy will be having DISABLE policy.
* @param currPolicy currently set space violation policy
* @param targetPolicy new space violation policy
* @return true if is DISABLE space violation policy; otherwise false
*/
private boolean isDisableSpaceViolationPolicy(final SpaceViolationPolicy currPolicy,
final SpaceViolationPolicy targetPolicy) {
return SpaceViolationPolicy.DISABLE == currPolicy
|| SpaceViolationPolicy.DISABLE == targetPolicy;
} | 3.68 |
hudi_RocksDBDAO_getRocksDB | /**
* Create RocksDB if not initialized.
*/
private RocksDB getRocksDB() {
return rocksDB;
} | 3.68 |
flink_JobGraph_setExecutionConfig | /**
* Sets the execution config. This method eagerly serialized the ExecutionConfig for future RPC
* transport. Further modification of the referenced ExecutionConfig object will not affect this
* serialized copy.
*
* @param executionConfig The ExecutionConfig to be serialized.
* @throws IOException Thrown if the serialization of the ExecutionConfig fails
*/
public void setExecutionConfig(ExecutionConfig executionConfig) throws IOException {
checkNotNull(executionConfig, "ExecutionConfig must not be null.");
setSerializedExecutionConfig(new SerializedValue<>(executionConfig));
} | 3.68 |
framework_HierarchyMapper_getChildrenStream | /**
* The method to recursively fetch the children of given parent. Used with
* {@link Stream#flatMap} to expand a stream of parent nodes into a
* flattened hierarchy.
*
* @param parent
* the parent node
* @param includeParent
* {@code true} to include the parent in the stream;
* {@code false} if not
* @return the stream of all children under the parent
*/
private Stream<T> getChildrenStream(T parent, boolean includeParent) {
List<T> childList = Collections.emptyList();
if (isExpanded(parent)) {
childList = getDirectChildren(parent).collect(Collectors.toList());
if (childList.isEmpty()) {
removeChildren(parent == null ? null
: getDataProvider().getId(parent));
} else {
registerChildren(parent, childList);
}
}
return combineParentAndChildStreams(parent,
childList.stream().flatMap(this::getChildrenStream),
includeParent);
} | 3.68 |
zxing_OneDimensionalCodeWriter_checkNumeric | /**
* @param contents string to check for numeric characters
* @throws IllegalArgumentException if input contains characters other than digits 0-9.
*/
protected static void checkNumeric(String contents) {
if (!NUMERIC.matcher(contents).matches()) {
throw new IllegalArgumentException("Input should only contain digits 0-9");
}
} | 3.68 |
hudi_HoodieExampleDataGenerator_generateUpdates | /**
* Generates new updates, randomly distributed across the keys above. There can be duplicates within the returned
* list
*
* @param commitTime Commit Timestamp
* @param n Number of updates (including dups)
* @return list of hoodie record updates
*/
public List<HoodieRecord<T>> generateUpdates(String commitTime, Integer n) {
List<HoodieRecord<T>> updates = new ArrayList<>();
for (int i = 0; i < n; i++) {
KeyPartition kp = existingKeys.get(RAND.nextInt(numExistingKeys - 1));
HoodieRecord<T> record = generateUpdateRecord(kp.key, commitTime);
updates.add(record);
}
return updates;
} | 3.68 |
framework_FieldGroup_build | /**
* Creates a field based on the given data type.
* <p>
* The data type is the type that we want to edit using the field. The field
* type is the type of field we want to create, can be {@link Field} if any
* Field is good.
* </p>
*
* @param caption
* The caption for the new field
* @param dataType
* The data model type that we want to edit using the field
* @param fieldType
* The type of field that we want to create
* @return A Field capable of editing the given type
* @throws BindException
* If the field could not be created
*/
protected <T extends Field> T build(String caption, Class<?> dataType,
Class<T> fieldType) throws BindException {
T field = getFieldFactory().createField(dataType, fieldType);
if (field == null) {
throw new BindException(
"Unable to build a field of type " + fieldType.getName()
+ " for editing " + dataType.getName());
}
field.setCaption(caption);
return field;
} | 3.68 |
framework_CvalChecker_deleteCache | /*
* used in tests
*/
static void deleteCache(String productName) {
Preferences p = Preferences.userNodeForPackage(CvalInfo.class);
p.remove(productName);
} | 3.68 |
hbase_SimpleServerRpcConnection_isIdle | /* Return true if the connection has no outstanding rpc */
boolean isIdle() {
return rpcCount.sum() == 0;
} | 3.68 |
morf_SqlDialect_dropTables | /**
* Creates SQL to drop the named tables.
*
* @param ifExists Should check if table exists before dropping
* @param cascade If supported by the dialect, will drop tables/views that depend on any of the provided tables
* @param tables The tables to drop
* @return The SQL statements as strings.
*/
public Collection<String> dropTables(List<Table> tables, boolean ifExists, boolean cascade) {
return ImmutableList.of(
"DROP TABLE "
+ (ifExists ? "IF EXISTS " : "")
+ tables.stream().map(table -> schemaNamePrefix(table) + table.getName()).collect(Collectors.joining(", "))
+ (cascade ? " CASCADE" : "")
);
} | 3.68 |
hbase_BackupManager_getIncrementalBackupTableSet | /**
* Return the current tables covered by incremental backup.
* @return set of tableNames
* @throws IOException exception
*/
public Set<TableName> getIncrementalBackupTableSet() throws IOException {
return systemTable.getIncrementalBackupTableSet(backupInfo.getBackupRootDir());
} | 3.68 |
hadoop_NamenodePriorityComparator_compareModDates | /**
* Compare the modification dates.
*
* @param o1 Context 1.
* @param o2 Context 2.
* @return Comparison between dates.
*/
private int compareModDates(FederationNamenodeContext o1,
FederationNamenodeContext o2) {
// Reverse sort, lowest position is highest priority.
return (int) (o2.getDateModified() - o1.getDateModified());
} | 3.68 |
hbase_BucketAllocator_fullBuckets | /**
* How many buckets have been completely filled by blocks for this bucket size. These buckets
* can't accept any more blocks unless some existing are freed.
*/
public int fullBuckets() {
return fullBuckets;
} | 3.68 |
flink_Tuple23_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple23)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple23 tuple = (Tuple23) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
if (f20 != null ? !f20.equals(tuple.f20) : tuple.f20 != null) {
return false;
}
if (f21 != null ? !f21.equals(tuple.f21) : tuple.f21 != null) {
return false;
}
if (f22 != null ? !f22.equals(tuple.f22) : tuple.f22 != null) {
return false;
}
return true;
} | 3.68 |
flink_Transformation_getUid | /**
* Returns the user-specified ID of this transformation.
*
* @return The unique user-specified ID of this transformation.
*/
public String getUid() {
return uid;
} | 3.68 |
hadoop_SingleFilePerBlockCache_deleteCacheFiles | /**
* Delete cache files as part of the close call.
*/
private void deleteCacheFiles() {
int numFilesDeleted = 0;
for (Entry entry : blocks.values()) {
boolean lockAcquired =
entry.takeLock(Entry.LockType.WRITE, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT,
PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
if (!lockAcquired) {
LOG.error("Cache file {} deletion would not be attempted as write lock could not"
+ " be acquired within {} {}", entry.path,
PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT,
PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
continue;
}
try {
if (Files.deleteIfExists(entry.path)) {
prefetchingStatistics.blockRemovedFromFileCache();
numFilesDeleted++;
}
} catch (IOException e) {
LOG.warn("Failed to delete cache file {}", entry.path, e);
} finally {
entry.releaseLock(Entry.LockType.WRITE);
}
}
LOG.debug("Prefetch cache close: Deleted {} cache files", numFilesDeleted);
} | 3.68 |
hbase_BlockCacheUtil_getCount | /** Returns count of blocks in the cache */
public int getCount() {
return count;
} | 3.68 |
shardingsphere-elasticjob_ShardingService_getCrashedShardingItems | /**
* Get crashed sharding items.
*
* @param jobInstanceId crashed job instance ID
* @return crashed sharding items
*/
public List<Integer> getCrashedShardingItems(final String jobInstanceId) {
String serverIp = jobInstanceId.substring(0, jobInstanceId.indexOf(JobInstance.DELIMITER));
if (!serverService.isEnableServer(serverIp)) {
return Collections.emptyList();
}
List<Integer> result = new LinkedList<>();
int shardingTotalCount = configService.load(true).getShardingTotalCount();
for (int i = 0; i < shardingTotalCount; i++) {
if (isRunningItem(i) && jobInstanceId.equals(jobNodeStorage.getJobNodeData(ShardingNode.getInstanceNode(i)))) {
result.add(i);
}
}
return result;
} | 3.68 |
hbase_MasterSnapshotVerifier_verifySnapshot | /**
* Verify that the snapshot in the directory is a valid snapshot
* @param snapshotDir snapshot directory to check
* @throws CorruptedSnapshotException if the snapshot is invalid
* @throws IOException if there is an unexpected connection issue to the filesystem
*/
public void verifySnapshot(Path snapshotDir, boolean verifyRegions)
throws CorruptedSnapshotException, IOException {
SnapshotManifest manifest =
SnapshotManifest.open(services.getConfiguration(), workingDirFs, snapshotDir, snapshot);
// verify snapshot info matches
verifySnapshotDescription(snapshotDir);
// check that tableinfo is a valid table description
verifyTableInfo(manifest);
// check that each region is valid
verifyRegions(manifest, verifyRegions);
} | 3.68 |
hadoop_TFile_makeComparator | /**
* Make a raw comparator from a string name.
*
* @param name
* Comparator name
* @return A RawComparable comparator.
*/
static public Comparator<RawComparable> makeComparator(String name) {
return TFileMeta.makeComparator(name);
} | 3.68 |
framework_AbstractComponent_setActionManagerViewer | /**
* Set a viewer for the action manager to be the parent sub window (if the
* component is in a window) or the UI (otherwise). This is still a
* simplification of the real case as this should be handled by the parent
* VOverlay (on the client side) if the component is inside an VOverlay
* component.
*/
private void setActionManagerViewer() {
if (actionManager != null && getUI() != null) {
// Attached and has action manager
Window w = findAncestor(Window.class);
if (w != null) {
actionManager.setViewer(w);
} else {
actionManager.setViewer(getUI());
}
}
} | 3.68 |
framework_VCalendar_setDisabled | /**
* Is the component disabled.
*
* @param disabled
* True if disabled
*/
public void setDisabled(boolean disabled) {
this.disabled = disabled;
} | 3.68 |
hadoop_BufferPool_getAll | /**
* Gets a list of all blocks in this pool.
* @return a list of all blocks in this pool.
*/
public List<BufferData> getAll() {
synchronized (allocated) {
return Collections.unmodifiableList(new ArrayList<>(allocated.keySet()));
}
} | 3.68 |
hbase_HRegion_getSmallestReadPoint | /**
* @return The smallest mvcc readPoint across all the scanners in this region. Writes older than
* this readPoint, are included in every read operation.
*/
public long getSmallestReadPoint() {
// We need to ensure that while we are calculating the smallestReadPoint
// no new RegionScanners can grab a readPoint that we are unaware of.
smallestReadPointCalcLock.lock(ReadPointCalculationLock.LockType.CALCULATION_LOCK);
try {
long minimumReadPoint = mvcc.getReadPoint();
for (Long readPoint : this.scannerReadPoints.values()) {
minimumReadPoint = Math.min(minimumReadPoint, readPoint);
}
return minimumReadPoint;
} finally {
smallestReadPointCalcLock.unlock(ReadPointCalculationLock.LockType.CALCULATION_LOCK);
}
} | 3.68 |
streampipes_AdapterConfigurationBuilder_create | /**
* Creates a new adapter configuration using the builder pattern.
* @param appId A unique identifier of the new adapter, e.g., com.mycompany.processor.mynewdataprocessor
* @param supplier instance of the adapter to be described
* @param version version of the processing element for migration purposes. Should be 0 in standard cases.
* Only in case there exist migrations for the specific element the version needs to be aligned.
*/
public static AdapterConfigurationBuilder create(String appId,
int version,
Supplier<StreamPipesAdapter> supplier) {
return new AdapterConfigurationBuilder(appId, version, supplier);
} | 3.68 |
rocketmq-connect_ConnAndTaskStatus_delete | /**
* if it has been deleted, it is meaningless to send it again
*/
public void delete() {
this.deleted = true;
} | 3.68 |
morf_AbstractSqlDialectTest_testRepairAutoNumberStartPositionUnderRepairLimit | /**
* Tests for {@link SqlDialect#repairAutoNumberStartPosition(Table, SqlScriptExecutor, Connection)}
*/
@Test
public void testRepairAutoNumberStartPositionUnderRepairLimit() {
setMaxIdOnAutonumberTable(MAX_ID_UNDER_REPAIR_LIMIT);
testDialect.repairAutoNumberStartPosition(metadata.getTable(TEST_TABLE), sqlScriptExecutor,connection);
testDialect.repairAutoNumberStartPosition(metadata.getTable(AUTO_NUMBER_TABLE), sqlScriptExecutor,connection);
verifyRepairAutoNumberStartPosition(sqlScriptExecutor,connection);
} | 3.68 |
flink_InMemoryPartition_allocateSegments | /**
* attempts to allocate specified number of segments and should only be used by compaction
* partition fails silently if not enough segments are available since next compaction could
* still succeed
*
* @param numberOfSegments allocation count
*/
public void allocateSegments(int numberOfSegments) {
while (getBlockCount() < numberOfSegments) {
MemorySegment next = this.availableMemory.nextSegment();
if (next != null) {
this.partitionPages.add(next);
} else {
return;
}
}
} | 3.68 |
framework_ConnectorHelper_writeHierarchyInformation | /**
* Creates a string containing hierarchy information for the connector.
*
* @since 7.1
* @param connector
* The connector to get hierarchy information for
* @param builder
* The StringBuilder where the information should be written
*/
public static void writeHierarchyInformation(ClientConnector connector,
StringBuilder builder) {
LinkedList<ClientConnector> h = new LinkedList<>();
h.add(connector);
ClientConnector parent = connector.getParent();
while (parent != null) {
h.addFirst(parent);
parent = parent.getParent();
}
builder.append("\nConnector hierarchy:\n");
int l = 0;
for (ClientConnector connector2 : h) {
if (l != 0) {
builder.append("\n");
for (int i = 0; i < l; i++) {
builder.append(" ");
}
}
l++;
Class<? extends ClientConnector> connectorClass = connector2
.getClass();
Class<?> topClass = connectorClass;
while (topClass.getEnclosingClass() != null) {
topClass = topClass.getEnclosingClass();
}
builder.append(connectorClass.getName());
builder.append('(');
builder.append(topClass.getSimpleName());
builder.append(".java:1)");
}
} | 3.68 |
framework_VDragAndDropManager_findDragTarget | /**
* First seeks the widget from this element, then iterates widgets until one
* implement HasDropHandler. Returns DropHandler from that.
*
* @param element
* the topmost element that is a potential drag target
* @return the drop handler from the given element or its closest ancestor
* that has one, or {@code null} if there is no such thing
*/
protected VDropHandler findDragTarget(Element element) {
try {
Widget w = WidgetUtil.findWidget(element);
if (w == null) {
return null;
}
while (!(w instanceof VHasDropHandler)
|| !isDropEnabled((VHasDropHandler) w)) {
w = w.getParent();
if (w == null) {
break;
}
}
if (w == null) {
return null;
} else {
VDropHandler dh = ((VHasDropHandler) w).getDropHandler();
return dh;
}
} catch (Exception e) {
// ApplicationConnection.getConsole().log(
// "FIXME: Exception when detecting drop handler");
// e.printStackTrace();
return null;
}
} | 3.68 |
flink_ResolveCallByArgumentsRule_prepareInlineUserDefinedFunction | /** Validates and cleans an inline, unregistered {@link UserDefinedFunction}. */
private FunctionDefinition prepareInlineUserDefinedFunction(FunctionDefinition definition) {
if (definition instanceof ScalarFunctionDefinition) {
final ScalarFunctionDefinition sf = (ScalarFunctionDefinition) definition;
UserDefinedFunctionHelper.prepareInstance(
resolutionContext.configuration(), sf.getScalarFunction());
return new ScalarFunctionDefinition(sf.getName(), sf.getScalarFunction());
} else if (definition instanceof TableFunctionDefinition) {
final TableFunctionDefinition tf = (TableFunctionDefinition) definition;
UserDefinedFunctionHelper.prepareInstance(
resolutionContext.configuration(), tf.getTableFunction());
return new TableFunctionDefinition(
tf.getName(), tf.getTableFunction(), tf.getResultType());
} else if (definition instanceof AggregateFunctionDefinition) {
final AggregateFunctionDefinition af = (AggregateFunctionDefinition) definition;
UserDefinedFunctionHelper.prepareInstance(
resolutionContext.configuration(), af.getAggregateFunction());
return new AggregateFunctionDefinition(
af.getName(),
af.getAggregateFunction(),
af.getResultTypeInfo(),
af.getAccumulatorTypeInfo());
} else if (definition instanceof TableAggregateFunctionDefinition) {
final TableAggregateFunctionDefinition taf =
(TableAggregateFunctionDefinition) definition;
UserDefinedFunctionHelper.prepareInstance(
resolutionContext.configuration(), taf.getTableAggregateFunction());
return new TableAggregateFunctionDefinition(
taf.getName(),
taf.getTableAggregateFunction(),
taf.getResultTypeInfo(),
taf.getAccumulatorTypeInfo());
} else if (definition instanceof UserDefinedFunction) {
UserDefinedFunctionHelper.prepareInstance(
resolutionContext.configuration(), (UserDefinedFunction) definition);
}
return definition;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_suppressRetryPolicyInClientIfNeeded | /**
* If we're asked by unit tests to not retry, set the retry policy factory in
* the client accordingly.
*/
private void suppressRetryPolicyInClientIfNeeded() {
if (suppressRetryPolicy) {
storageInteractionLayer.setRetryPolicyFactory(new RetryNoRetry());
}
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints4a | /**
* @return The expected SQL for the {@link InsertStatement#avoidDirectPath()} directive.
*/
protected String expectedHints4a() {
return "INSERT INTO " + tableName("Foo") + " SELECT a, b FROM " + tableName("Foo_1");
} | 3.68 |
hadoop_HdfsLocatedFileStatus_getSymlinkInBytes | /**
* Opaque referant for the symlink, to be resolved at the client.
*/
@Override
public byte[] getSymlinkInBytes() {
return uSymlink;
} | 3.68 |
flink_BlockCompressionFactory_createBlockCompressionFactory | /**
* Creates {@link BlockCompressionFactory} according to the configuration.
*
* @param compressionFactoryName supported compression codecs or user-defined class name
* inherited from {@link BlockCompressionFactory}.
*/
static BlockCompressionFactory createBlockCompressionFactory(String compressionFactoryName) {
checkNotNull(compressionFactoryName);
CompressionFactoryName compressionName;
try {
compressionName = CompressionFactoryName.valueOf(compressionFactoryName.toUpperCase());
} catch (IllegalArgumentException e) {
compressionName = null;
}
BlockCompressionFactory blockCompressionFactory;
if (compressionName != null) {
switch (compressionName) {
case LZ4:
blockCompressionFactory = new Lz4BlockCompressionFactory();
break;
case LZO:
blockCompressionFactory =
new AirCompressorFactory(new LzoCompressor(), new LzoDecompressor());
break;
case ZSTD:
blockCompressionFactory =
new AirCompressorFactory(new ZstdCompressor(), new ZstdDecompressor());
break;
default:
throw new IllegalStateException("Unknown CompressionMethod " + compressionName);
}
} else {
Object factoryObj;
try {
factoryObj = Class.forName(compressionFactoryName).newInstance();
} catch (ClassNotFoundException e) {
throw new IllegalConfigurationException(
"Cannot load class " + compressionFactoryName, e);
} catch (Exception e) {
throw new IllegalConfigurationException(
"Cannot create object for class " + compressionFactoryName, e);
}
if (factoryObj instanceof BlockCompressionFactory) {
blockCompressionFactory = (BlockCompressionFactory) factoryObj;
} else {
throw new IllegalArgumentException(
"CompressionFactoryName should inherit from"
+ " interface BlockCompressionFactory, or use the default compression codec.");
}
}
checkNotNull(blockCompressionFactory);
return blockCompressionFactory;
} | 3.68 |
flink_RegisterApplicationMasterResponseReflector_getSchedulerResourceTypeNames | /**
* Get names of resource types that are considered by the Yarn scheduler.
*
* @param response The response object from the registration at the ResourceManager.
* @return A set of resource type names, or {@link Optional#empty()} if the Yarn version does
* not support this API.
*/
Optional<Set<String>> getSchedulerResourceTypeNames(
final RegisterApplicationMasterResponse response) {
return getSchedulerResourceTypeNamesUnsafe(response);
} | 3.68 |
flink_WindowTableFunctionUtil_createWindowAssigner | /**
* Creates window assigner based on input window strategy.
*
* @param windowingStrategy input window strategy
* @return new created window assigner
*/
public static WindowAssigner<TimeWindow> createWindowAssigner(
TimeAttributeWindowingStrategy windowingStrategy) {
WindowSpec windowSpec = windowingStrategy.getWindow();
boolean isProctime = windowingStrategy.isProctime();
if (windowSpec instanceof TumblingWindowSpec) {
TumblingWindowSpec tumblingWindowSpec = (TumblingWindowSpec) windowSpec;
TumblingWindowAssigner windowAssigner =
TumblingWindowAssigner.of(tumblingWindowSpec.getSize());
if (isProctime) {
windowAssigner = windowAssigner.withProcessingTime();
}
if (tumblingWindowSpec.getOffset() != null) {
windowAssigner = windowAssigner.withOffset(tumblingWindowSpec.getOffset());
}
return windowAssigner;
} else if (windowSpec instanceof HoppingWindowSpec) {
HoppingWindowSpec hoppingWindowSpec = (HoppingWindowSpec) windowSpec;
SlidingWindowAssigner windowAssigner =
SlidingWindowAssigner.of(
hoppingWindowSpec.getSize(), hoppingWindowSpec.getSlide());
if (isProctime) {
windowAssigner = windowAssigner.withProcessingTime();
}
if (hoppingWindowSpec.getOffset() != null) {
windowAssigner = windowAssigner.withOffset(hoppingWindowSpec.getOffset());
}
return windowAssigner;
} else if (windowSpec instanceof CumulativeWindowSpec) {
CumulativeWindowSpec cumulativeWindowSpec = (CumulativeWindowSpec) windowSpec;
CumulativeWindowAssigner windowAssigner =
CumulativeWindowAssigner.of(
cumulativeWindowSpec.getMaxSize(), cumulativeWindowSpec.getStep());
if (isProctime) {
windowAssigner = windowAssigner.withProcessingTime();
}
if (cumulativeWindowSpec.getOffset() != null) {
windowAssigner = windowAssigner.withOffset(cumulativeWindowSpec.getOffset());
}
return windowAssigner;
} else {
throw new TableException(
String.format(
"Unknown window spec: %s", windowSpec.getClass().getSimpleName()));
}
} | 3.68 |
cron-utils_SecondsDescriptor_nominalValue | /**
* Given an int, will return a nominal value. Example:
* 1 in weeks context, may mean "Monday",
* so nominal value for 1 would be "Monday"
* Default will return int as String
*
* @param fieldValue - some FieldValue
* @return String
*/
protected String nominalValue(final FieldValue<?> fieldValue) {
Preconditions.checkNotNull(fieldValue, "FieldValue must not be null");
if (fieldValue instanceof IntegerFieldValue) {
return StringUtils.EMPTY + ((IntegerFieldValue) fieldValue).getValue();
}
return fieldValue.toString();
} | 3.68 |
hudi_Pipelines_bulkInsert | /**
* Bulk insert the input dataset at once.
*
* <p>By default, the input dataset would shuffle by the partition path first then
* sort by the partition path before passing around to the write function.
* The whole pipeline looks like the following:
*
* <pre>
* | input1 | ===\ /=== |sorter| === | task1 | (p1, p2)
* shuffle
* | input2 | ===/ \=== |sorter| === | task2 | (p3, p4)
*
* Note: Both input1 and input2's dataset come from partitions: p1, p2, p3, p4
* </pre>
*
* <p>The write task switches to new file handle each time it receives a record
* from the different partition path, the shuffle and sort would reduce small files.
*
* <p>The bulk insert should be run in batch execution mode.
*
* @param conf The configuration
* @param rowType The input row type
* @param dataStream The input data stream
* @return the bulk insert data stream sink
*/
public static DataStreamSink<Object> bulkInsert(Configuration conf, RowType rowType, DataStream<RowData> dataStream) {
WriteOperatorFactory<RowData> operatorFactory = BulkInsertWriteOperator.getFactory(conf, rowType);
if (OptionsResolver.isBucketIndexType(conf)) {
// TODO support bulk insert for consistent bucket index
if (OptionsResolver.isConsistentHashingBucketIndexType(conf)) {
throw new HoodieException(
"Consistent hashing bucket index does not work with bulk insert using FLINK engine. Use simple bucket index or Spark engine.");
}
String indexKeys = OptionsResolver.getIndexKeyField(conf);
int numBuckets = conf.getInteger(FlinkOptions.BUCKET_INDEX_NUM_BUCKETS);
BucketIndexPartitioner<HoodieKey> partitioner = new BucketIndexPartitioner<>(numBuckets, indexKeys);
RowDataKeyGen keyGen = RowDataKeyGen.instance(conf, rowType);
RowType rowTypeWithFileId = BucketBulkInsertWriterHelper.rowTypeWithFileId(rowType);
InternalTypeInfo<RowData> typeInfo = InternalTypeInfo.of(rowTypeWithFileId);
boolean needFixedFileIdSuffix = OptionsResolver.isNonBlockingConcurrencyControl(conf);
Map<String, String> bucketIdToFileId = new HashMap<>();
dataStream = dataStream.partitionCustom(partitioner, keyGen::getHoodieKey)
.map(record -> BucketBulkInsertWriterHelper.rowWithFileId(bucketIdToFileId, keyGen, record, indexKeys, numBuckets, needFixedFileIdSuffix), typeInfo)
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS)); // same parallelism as write task to avoid shuffle
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SORT_INPUT)) {
SortOperatorGen sortOperatorGen = BucketBulkInsertWriterHelper.getFileIdSorterGen(rowTypeWithFileId);
dataStream = dataStream.transform("file_sorter", typeInfo, sortOperatorGen.createSortOperator(conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS)); // same parallelism as write task to avoid shuffle
ExecNodeUtil.setManagedMemoryWeight(dataStream.getTransformation(),
conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY) * 1024L * 1024L);
}
return dataStream
.transform(opName("bucket_bulk_insert", conf), TypeInformation.of(Object.class), operatorFactory)
.uid(opUID("bucket_bulk_insert", conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS))
.addSink(DummySink.INSTANCE)
.name("dummy");
}
final String[] partitionFields = FilePathUtils.extractPartitionKeys(conf);
if (partitionFields.length > 0) {
RowDataKeyGen rowDataKeyGen = RowDataKeyGen.instance(conf, rowType);
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SHUFFLE_INPUT)) {
// shuffle by partition keys
// use #partitionCustom instead of #keyBy to avoid duplicate sort operations,
// see BatchExecutionUtils#applyBatchExecutionSettings for details.
Partitioner<String> partitioner = (key, channels) -> KeyGroupRangeAssignment.assignKeyToParallelOperator(key,
KeyGroupRangeAssignment.computeDefaultMaxParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS)), channels);
dataStream = dataStream.partitionCustom(partitioner, rowDataKeyGen::getPartitionPath);
}
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SORT_INPUT)) {
String[] sortFields = partitionFields;
String operatorName = "sorter:(partition_key)";
if (conf.getBoolean(FlinkOptions.WRITE_BULK_INSERT_SORT_INPUT_BY_RECORD_KEY)) {
String[] recordKeyFields = conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(",");
ArrayList<String> sortList = new ArrayList<>(Arrays.asList(partitionFields));
Collections.addAll(sortList, recordKeyFields);
sortFields = sortList.toArray(new String[0]);
operatorName = "sorter:(partition_key, record_key)";
}
SortOperatorGen sortOperatorGen = new SortOperatorGen(rowType, sortFields);
// sort by partition keys or (partition keys and record keys)
dataStream = dataStream
.transform(operatorName,
InternalTypeInfo.of(rowType),
sortOperatorGen.createSortOperator(conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
ExecNodeUtil.setManagedMemoryWeight(dataStream.getTransformation(),
conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY) * 1024L * 1024L);
}
}
return dataStream
.transform(opName("hoodie_bulk_insert_write", conf),
TypeInformation.of(Object.class),
operatorFactory)
// follow the parallelism of upstream operators to avoid shuffle
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS))
.addSink(DummySink.INSTANCE)
.name("dummy");
} | 3.68 |
hbase_HRegionFileSystem_createDirOnFileSystem | /**
* Creates a directory for a filesystem and configuration object. Assumes the user has already
* checked for this directory existence.
* @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
* whether the directory exists or not, and returns true if it exists.
*/
private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
throws IOException {
int i = 0;
IOException lastIOE = null;
int hdfsClientRetriesNumber =
conf.getInt("hdfs.client.retries.number", DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
int baseSleepBeforeRetries =
conf.getInt("hdfs.client.sleep.before.retries", DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
do {
try {
return fs.mkdirs(dir);
} catch (IOException ioe) {
lastIOE = ioe;
if (fs.exists(dir)) return true; // directory is present
try {
sleepBeforeRetry("Create Directory", i + 1, baseSleepBeforeRetries,
hdfsClientRetriesNumber);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
}
} while (++i <= hdfsClientRetriesNumber);
throw new IOException("Exception in createDir", lastIOE);
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_jsonifySchemaInfo | /**
* Jsonify the schema info.
*
* @param schemaInfo the schema info
* @return the jsonified schema info
*/
public String jsonifySchemaInfo(SchemaInfo schemaInfo) {
return SchemaUtils.jsonifySchemaInfo(schemaInfo);
} | 3.68 |
framework_HasValue_clear | /**
* Resets the value to the empty one.
* <p>
* This is just a shorthand for resetting the value, see the methods
* {@link #setValue(Object)} and {@link #getEmptyValue()}.
*
* @see #setValue(Object)
* @see #getEmptyValue()
*/
public default void clear() {
setValue(getEmptyValue());
} | 3.68 |
hadoop_ReadBufferManager_tryEvict | /**
* If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
* The objective is to find just one buffer - there is no advantage to evicting more than one.
*
* @return whether the eviction succeeeded - i.e., were we able to free up one buffer
*/
private synchronized boolean tryEvict() {
ReadBuffer nodeToEvict = null;
if (completedReadList.size() <= 0) {
return false; // there are no evict-able buffers
}
long currentTimeInMs = currentTimeMillis();
// first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
for (ReadBuffer buf : completedReadList) {
if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
nodeToEvict = buf;
break;
}
}
if (nodeToEvict != null) {
return evict(nodeToEvict);
}
// next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
for (ReadBuffer buf : completedReadList) {
if (buf.isAnyByteConsumed()) {
nodeToEvict = buf;
break;
}
}
if (nodeToEvict != null) {
return evict(nodeToEvict);
}
// next, try any old nodes that have not been consumed
// Failed read buffers (with buffer index=-1) that are older than
// thresholdAge should be cleaned up, but at the same time should not
// report successful eviction.
// Queue logic expects that a buffer is freed up for read ahead when
// eviction is successful, whereas a failed ReadBuffer would have released
// its buffer when its status was set to READ_FAILED.
long earliestBirthday = Long.MAX_VALUE;
ArrayList<ReadBuffer> oldFailedBuffers = new ArrayList<>();
for (ReadBuffer buf : completedReadList) {
if ((buf.getBufferindex() != -1)
&& (buf.getTimeStamp() < earliestBirthday)) {
nodeToEvict = buf;
earliestBirthday = buf.getTimeStamp();
} else if ((buf.getBufferindex() == -1)
&& (currentTimeInMs - buf.getTimeStamp()) > thresholdAgeMilliseconds) {
oldFailedBuffers.add(buf);
}
}
for (ReadBuffer buf : oldFailedBuffers) {
evict(buf);
}
if ((currentTimeInMs - earliestBirthday > thresholdAgeMilliseconds) && (nodeToEvict != null)) {
return evict(nodeToEvict);
}
LOGGER.trace("No buffer eligible for eviction");
// nothing can be evicted
return false;
} | 3.68 |
hadoop_FedBalance_main | /**
* Main function of the FedBalance program. Parses the input arguments and
* invokes the FedBalance::run() method, via the ToolRunner.
* @param argv Command-line arguments sent to FedBalance.
*/
public static void main(String[] argv) {
Configuration conf = getDefaultConf();
FedBalance fedBalance = new FedBalance();
fedBalance.setConf(conf);
int exitCode;
try {
exitCode = ToolRunner.run(fedBalance, argv);
} catch (Exception e) {
LOG.warn("Couldn't complete FedBalance operation.", e);
exitCode = -1;
}
System.exit(exitCode);
} | 3.68 |
open-banking-gateway_ConsentAccessFactory_consentForPsuAndAspsp | /**
* Consent access for PSU-ASPSP tuple.
* @param psu Fintech user/PSU to grant consent for
* @param aspsp ASPSP(bank) that grants consent
* @param session Service session for this consent
* @return New consent access template
*/
public ConsentAccess consentForPsuAndAspsp(Psu psu, Bank aspsp, ServiceSession session) {
PsuAspspPrvKey prvKey = prvKeyRepository.findByPsuIdAndAspspId(psu.getId(), aspsp.getId())
.orElseThrow(() -> new IllegalStateException("No public key for: " + psu.getId()));
return new PsuConsentAccess(psu, aspsp, psuEncryption.forPublicKey(prvKey.getId(), prvKey.getPubKey().getKey()), session, consentRepository);
} | 3.68 |
morf_UnionSetOperator_deepCopy | /**
* @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation)
*/
@Override
public Builder<SetOperator> deepCopy(DeepCopyTransformation transformer) {
return TempTransitionalBuilderWrapper.<SetOperator>wrapper(new UnionSetOperator(getUnionStrategy(),transformer.deepCopy(getSelectStatement())));
} | 3.68 |
pulsar_JavaInstanceRunnable_setup | /**
* NOTE: this method should be called in the instance thread, in order to make class loading work.
*/
private synchronized void setup() throws Exception {
this.instanceCache = InstanceCache.getInstanceCache();
if (this.collectorRegistry == null) {
this.collectorRegistry = FunctionCollectorRegistry.getDefaultImplementation();
}
this.stats = ComponentStatsManager.getStatsManager(this.collectorRegistry, this.metricsLabels,
this.instanceCache.getScheduledExecutorService(),
this.componentType);
// initialize the thread context
ThreadContext.put("function", FunctionCommon.getFullyQualifiedName(instanceConfig.getFunctionDetails()));
ThreadContext.put("functionname", instanceConfig.getFunctionDetails().getName());
ThreadContext.put("instance", instanceConfig.getInstanceName());
log.info("Starting Java Instance {} : \n Details = {}",
instanceConfig.getFunctionDetails().getName(), instanceConfig.getFunctionDetails());
Object object;
if (instanceConfig.getFunctionDetails().getClassName()
.equals(org.apache.pulsar.functions.windowing.WindowFunctionExecutor.class.getName())) {
object = Reflections.createInstance(
instanceConfig.getFunctionDetails().getClassName(),
instanceClassLoader);
} else {
object = Reflections.createInstance(
instanceConfig.getFunctionDetails().getClassName(),
functionClassLoader);
}
if (!(object instanceof Function) && !(object instanceof java.util.function.Function)) {
throw new RuntimeException("User class must either be Function or java.util.Function");
}
// start the state table
setupStateStore();
ContextImpl contextImpl = setupContext();
// start the output producer
setupOutput(contextImpl);
// start the input consumer
setupInput(contextImpl);
// start any log topic handler
setupLogHandler();
if (!(object instanceof IdentityFunction) && !(sink instanceof PulsarSink)) {
sinkSchemaInfoProvider = new SinkSchemaInfoProvider();
}
javaInstance = new JavaInstance(contextImpl, object, instanceConfig);
try {
Thread.currentThread().setContextClassLoader(functionClassLoader);
javaInstance.initialize();
} finally {
Thread.currentThread().setContextClassLoader(instanceClassLoader);
}
// to signal member variables are initialized
isInitialized = true;
} | 3.68 |
hbase_ReplicationSourceManager_getReplicationPeers | /**
* Get the ReplicationPeers used by this ReplicationSourceManager
* @return the ReplicationPeers used by this ReplicationSourceManager
*/
public ReplicationPeers getReplicationPeers() {
return this.replicationPeers;
} | 3.68 |
flink_DefaultLeaderElectionService_getLeaderSessionID | /**
* Returns the current leader session ID for the given {@code componentId} or {@code null}, if
* the session wasn't confirmed.
*/
@VisibleForTesting
@Nullable
public UUID getLeaderSessionID(String componentId) {
synchronized (lock) {
return leaderContenderRegistry.containsKey(componentId)
? confirmedLeaderInformation
.forComponentIdOrEmpty(componentId)
.getLeaderSessionID()
: null;
}
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getCurrentState | /**
* Gets current Status.
*
* @return - Json String
*/
public List<DiskBalancerWorkEntry> getCurrentState() {
return currentState;
} | 3.68 |
flink_OverWindowPartitionedOrderedPreceding_as | /**
* Assigns an alias for this window that the following {@code select()} clause can refer to.
*
* @param alias alias for this over window
* @return the fully defined over window
*/
public OverWindow as(Expression alias) {
return new OverWindow(alias, partitionBy, orderBy, preceding, optionalFollowing);
} | 3.68 |
zxing_GenericGF_log | /**
* @return base 2 log of a in GF(size)
*/
int log(int a) {
if (a == 0) {
throw new IllegalArgumentException();
}
return logTable[a];
} | 3.68 |
hadoop_ReadaheadPool_getInstance | /**
* @return Return the singleton instance for the current process.
*/
public static ReadaheadPool getInstance() {
synchronized (ReadaheadPool.class) {
if (instance == null && NativeIO.isAvailable()) {
instance = new ReadaheadPool();
}
return instance;
}
} | 3.68 |
hbase_Segment_getCellLength | /**
* Get cell length after serialized in {@link KeyValue}
*/
static int getCellLength(Cell cell) {
return cell.getSerializedSize();
} | 3.68 |
flink_UnionInputGate_getNumberOfInputChannels | /** Returns the total number of input channels across all unioned input gates. */
@Override
public int getNumberOfInputChannels() {
return inputChannelToInputGateIndex.length;
} | 3.68 |
framework_VAbstractOrderedLayout_setSpacing | /**
* Turn on or off spacing in the layout.
*
* @param spacing
* True if spacing should be used, false if not
*/
public void setSpacing(boolean spacing) {
Profiler.enter("VAOL.onConnectorHierarchyChange setSpacing");
this.spacing = spacing;
// first widget does not have spacing on
// optimization to avoid looking up widget indices on every iteration
Widget firstSlot = null;
if (getWidgetCount() > 0) {
firstSlot = getWidget(0);
}
for (Slot slot : widgetToSlot.values()) {
slot.setSpacing(spacing && firstSlot != slot);
}
Profiler.leave("VAOL.onConnectorHierarchyChange setSpacing");
} | 3.68 |
hbase_CheckAndMutate_getFilter | /** Returns the filter to check */
public Filter getFilter() {
return filter;
} | 3.68 |
hbase_MultiVersionConcurrencyControl_await | /**
* Wait until the read point catches up to the write point; i.e. wait on all outstanding mvccs to
* complete.
*/
public void await() {
// Add a write and then wait on reads to catch up to it.
completeAndWait(begin());
} | 3.68 |
flink_Rowtime_watermarksPeriodicBounded | /**
* Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a bounded
* time interval.
*
* <p>Emits watermarks which are the maximum observed timestamp minus the specified delay.
*
* @param delay delay in milliseconds
*/
public Rowtime watermarksPeriodicBounded(long delay) {
internalProperties.putString(
ROWTIME_WATERMARKS_TYPE, ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_BOUNDED);
internalProperties.putLong(ROWTIME_WATERMARKS_DELAY, delay);
return this;
} | 3.68 |
hadoop_MutableCounterInt_incr | /**
* Increment the value by a delta
* @param delta of the increment
*/
public synchronized void incr(int delta) {
value.addAndGet(delta);
setChanged();
} | 3.68 |
hudi_ClusteringUtils_getClusteringPlan | /**
* Get Clustering plan from timeline.
* @param metaClient
* @param pendingReplaceInstant
* @return
*/
public static Option<Pair<HoodieInstant, HoodieClusteringPlan>> getClusteringPlan(HoodieTableMetaClient metaClient, HoodieInstant pendingReplaceInstant) {
try {
Option<HoodieRequestedReplaceMetadata> requestedReplaceMetadata = getRequestedReplaceMetadata(metaClient, pendingReplaceInstant);
if (requestedReplaceMetadata.isPresent() && WriteOperationType.CLUSTER.name().equals(requestedReplaceMetadata.get().getOperationType())) {
return Option.of(Pair.of(pendingReplaceInstant, requestedReplaceMetadata.get().getClusteringPlan()));
}
return Option.empty();
} catch (IOException e) {
throw new HoodieIOException("Error reading clustering plan " + pendingReplaceInstant.getTimestamp(), e);
}
} | 3.68 |
morf_TruncateStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser.dispatch(getTable());
} | 3.68 |
streampipes_TagActionMap_setTagAction | /**
* Sets a particular {@link TagAction} for a given tag. Any existing TagAction for that tag will
* be removed and overwritten.
*
* @param tag The tag (will be stored internally 1. as it is, 2. lower-case, 3. upper-case)
* @param action The {@link TagAction}
*/
protected void setTagAction(final String tag, final TagAction action) {
put(tag.toUpperCase(), action);
put(tag.toLowerCase(), action);
put(tag, action);
} | 3.68 |
hbase_SchemaLocking_clear | /**
* Removes all locks by clearing the maps. Used when procedure executor is stopped for failure and
* recovery testing.
*/
void clear() {
serverLocks.clear();
namespaceLocks.clear();
tableLocks.clear();
regionLocks.clear();
peerLocks.clear();
} | 3.68 |
hbase_MobUtils_deserializeMobFileRefs | /**
* Deserialize the set of referenced mob hfiles from store file metadata.
* @param bytes compatibly serialized data. can not be null
* @return a setmultimap of original table to list of hfile names. will be empty if no values.
* @throws IllegalStateException if there are values but no table name
*/
public static ImmutableSetMultimap.Builder<TableName, String> deserializeMobFileRefs(byte[] bytes)
throws IllegalStateException {
ImmutableSetMultimap.Builder<TableName, String> map = ImmutableSetMultimap.builder();
if (bytes.length > 1) {
// TODO avoid turning the tablename pieces in to strings.
String s = Bytes.toString(bytes);
String[] tables = s.split("//");
for (String tableEnc : tables) {
final int delim = tableEnc.indexOf('/');
if (delim <= 0) {
throw new IllegalStateException("MOB reference data does not match expected encoding: "
+ "no table name included before list of mob refs.");
}
TableName table = TableName.valueOf(tableEnc.substring(0, delim));
String[] refs = tableEnc.substring(delim + 1).split(",");
map.putAll(table, refs);
}
} else {
if (LOG.isDebugEnabled()) {
// array length 1 should be the NULL_VALUE.
if (!Arrays.equals(HStoreFile.NULL_VALUE, bytes)) {
LOG.debug(
"Serialized MOB file refs array was treated as the placeholder 'no entries' but"
+ " didn't have the expected placeholder byte. expected={} and actual={}",
Arrays.toString(HStoreFile.NULL_VALUE), Arrays.toString(bytes));
}
}
}
return map;
} | 3.68 |
hadoop_ClasspathConstructor_append | /**
* Append an entry
* @param path path
*/
public void append(String path) {
pathElements.add(path);
} | 3.68 |
querydsl_SQLExpressions_regrSyy | /**
* REGR_SYY makes the following computation after the elimination of null (arg1, arg2) pairs:
*
* <p>{@code REGR_COUNT(arg1, arg2) * VAR_POP(arg1)}</p>
*
* @param arg1 first arg
* @param arg2 second arg
* @return regr_syy(arg1, arg2)
*/
public static WindowOver<Double> regrSyy(Expression<? extends Number> arg1, Expression<? extends Number> arg2) {
return new WindowOver<Double>(Double.class, SQLOps.REGR_SYY, arg1, arg2);
} | 3.68 |
graphhopper_EdgeChangeBuilder_addVirtualEdges | /**
* Adds the virtual edges adjacent to the real tower nodes
*/
private void addVirtualEdges(boolean base, int node, int virtNode) {
QueryOverlay.EdgeChanges edgeChanges = edgeChangesAtRealNodes.get(node);
if (edgeChanges == null) {
edgeChanges = new QueryOverlay.EdgeChanges(2, 2);
edgeChangesAtRealNodes.put(node, edgeChanges);
}
EdgeIteratorState edge = base
? getVirtualEdge(virtNode * 4 + BASE_SNAP)
: getVirtualEdge(virtNode * 4 + ADJ_SNAP);
edgeChanges.getAdditionalEdges().add(edge);
} | 3.68 |
hbase_BinaryComparator_parseFrom | /**
* Parse a serialized representation of {@link BinaryComparator}
* @param pbBytes A pb serialized {@link BinaryComparator} instance
* @return An instance of {@link BinaryComparator} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static BinaryComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BinaryComparator proto;
try {
proto = ComparatorProtos.BinaryComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new BinaryComparator(proto.getComparable().getValue().toByteArray());
} | 3.68 |
hadoop_DistributedCache_setLocalArchives | /**
* Set the conf to contain the location for localized archives. Used
* by internal DistributedCache code.
* @param conf The conf to modify to contain the localized caches
* @param str a comma separated list of local archives
*/
@Deprecated
public static void setLocalArchives(Configuration conf, String str) {
conf.set(CACHE_LOCALARCHIVES, str);
} | 3.68 |
Activiti_DelegateExpressionTaskListener_getExpressionText | /**
* returns the expression text for this task listener. Comes in handy if you want to check which listeners you already have.
*/
public String getExpressionText() {
return expression.getExpressionText();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.