name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_KerberosAuthException_getTicketCacheFile | /** @return The ticket cache file path, or null if not set. */
public String getTicketCacheFile() {
return ticketCacheFile;
} | 3.68 |
Activiti_TablePage_getSize | /**
* @return the actual number of rows in this page.
*/
public long getSize() {
return rowData.size();
} | 3.68 |
hbase_Addressing_parseHostname | /**
* Parse the hostname portion of a host-and-port string
* @param hostAndPort Formatted as <code><hostname> ':' <port></code>
* @return The hostname portion of <code>hostAndPort</code>
*/
public static String parseHostname(final String hostAndPort) {
int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR);
if (colonIndex < 0) {
throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
}
return hostAndPort.substring(0, colonIndex);
} | 3.68 |
flink_Tuple6_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple6<T0, T1, T2, T3, T4, T5> copy() {
return new Tuple6<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5);
} | 3.68 |
flink_CopyOnWriteStateTable_stateSnapshot | /**
* Creates a snapshot of this {@link CopyOnWriteStateTable}, to be written in checkpointing.
*
* @return a snapshot from this {@link CopyOnWriteStateTable}, for checkpointing.
*/
@Nonnull
@Override
public CopyOnWriteStateTableSnapshot<K, N, S> stateSnapshot() {
return new CopyOnWriteStateTableSnapshot<>(
this,
getKeySerializer().duplicate(),
getNamespaceSerializer().duplicate(),
getStateSerializer().duplicate(),
getMetaInfo()
.getStateSnapshotTransformFactory()
.createForDeserializedState()
.orElse(null));
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredSingleValueSelection | /**
* Defines a configuration parameter that lets preprocessing developers
* select from a list of pre-defined configuration options.
* The parameter will be rendered as a RadioGroup in the StreamPipes UI.
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in a user-friendly manner.
* @param options A list of {@link org.apache.streampipes.model.staticproperty.Option} elements. Use
* @param horizontalRendering when set to true
* {@link org.apache.streampipes.sdk.helpers.Options}
* to create option elements from string values.
* @return this
*/
public K requiredSingleValueSelection(Label label,
List<Option> options,
boolean horizontalRendering) {
OneOfStaticProperty osp =
new OneOfStaticProperty(label.getInternalId(), label.getLabel(), label.getDescription(), horizontalRendering);
osp.setOptions(options);
this.staticProperties.add(osp);
return me();
} | 3.68 |
hbase_CommonFSUtils_logFileSystemState | /**
* Log the current state of the filesystem from a certain root directory
* @param fs filesystem to investigate
* @param root root file/directory to start logging from
* @param log log to output information
* @throws IOException if an unexpected exception occurs
*/
public static void logFileSystemState(final FileSystem fs, final Path root, Logger log)
throws IOException {
log.debug("File system contents for path {}", root);
logFSTree(log, fs, root, "|-");
} | 3.68 |
flink_HiveParserTypeCheckCtx_getOuterRR | /** @return the outerRR */
public HiveParserRowResolver getOuterRR() {
return outerRR;
} | 3.68 |
flink_ObjectArrayTypeInfo_getInfoFor | /**
* Creates a new {@link org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo} from a {@link
* TypeInformation} for the component type.
*
* <p>This must be used in cases where the complete type of the array is not available as a
* {@link java.lang.reflect.Type} or {@link java.lang.Class}.
*/
@SuppressWarnings("unchecked")
@PublicEvolving
public static <T, C> ObjectArrayTypeInfo<T, C> getInfoFor(TypeInformation<C> componentInfo) {
checkNotNull(componentInfo);
return new ObjectArrayTypeInfo<T, C>(
(Class<T>) Array.newInstance(componentInfo.getTypeClass(), 0).getClass(),
componentInfo);
} | 3.68 |
flink_EmbeddedLeaderService_addContender | /** Callback from leader contenders when they start their service. */
private void addContender(
EmbeddedLeaderElection embeddedLeaderElection, LeaderContender contender) {
synchronized (lock) {
checkState(!shutdown, "leader election is shut down");
checkState(!embeddedLeaderElection.running, "leader election is already started");
try {
if (!allLeaderContenders.add(embeddedLeaderElection)) {
throw new IllegalStateException(
"leader election was added to this service multiple times");
}
embeddedLeaderElection.contender = contender;
embeddedLeaderElection.running = true;
updateLeader()
.whenComplete(
(aVoid, throwable) -> {
if (throwable != null) {
fatalError(throwable);
}
});
} catch (Throwable t) {
fatalError(t);
}
}
} | 3.68 |
flink_OptimizableHashSet_containsNull | /** Is there a null key. */
public boolean containsNull() {
return containsNull;
} | 3.68 |
hadoop_AWSRequestAnalyzer_isRequestNotAlwaysInSpan | /**
* Predicate which returns true if the request is of a kind which
* could be outside a span because of how the AWS SDK generates them.
* @param request request
* @return true if the transfer manager creates them.
*/
public static boolean
isRequestNotAlwaysInSpan(final Object request) {
return request instanceof UploadPartCopyRequest
|| request instanceof CompleteMultipartUploadRequest
|| request instanceof GetBucketLocationRequest;
} | 3.68 |
morf_AbstractSqlDialectTest_compareStatements | /**
* Helper method to compare one statement to another.
*
* @param expected The expected statement.
* @param actual Actual statement.
*/
@SuppressWarnings("unchecked")
protected void compareStatements(String expected, Iterable<String> actual) {
compareStatements(Arrays.asList(expected), actual);
} | 3.68 |
flink_TieredStorageConfiguration_getEachTierExclusiveBufferNum | /**
* Get exclusive buffer number of each tier.
*
* @return buffer number of each tier.
*/
public List<Integer> getEachTierExclusiveBufferNum() {
return tierExclusiveBuffers;
} | 3.68 |
hadoop_CommonAuditContext_getGlobalContextEntries | /**
* Get an iterator over the global entries.
* Thread safe.
* @return an iterable to enumerate the values.
*/
public static Iterable<Map.Entry<String, String>>
getGlobalContextEntries() {
return new GlobalIterable();
} | 3.68 |
hadoop_StringKeyConverter_encode | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
* #encode(java.lang.Object)
*/
@Override
public byte[] encode(String key) {
return Separator.encode(key, Separator.SPACE, Separator.TAB);
} | 3.68 |
hadoop_TimelinePutResponse_getEntityId | /**
* Get the entity Id
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
} | 3.68 |
hadoop_NodePlan_toJson | /**
* Returns a Json representation of NodePlan.
*
* @return - json String
* @throws IOException
*/
public String toJson() throws IOException {
return WRITER.writeValueAsString(this);
} | 3.68 |
flink_RocksDBNativeMetricOptions_getMonitorTickerTypes | /** @return the enabled RocksDB statistics metrics. */
public Collection<TickerType> getMonitorTickerTypes() {
return Collections.unmodifiableCollection(monitorTickerTypes);
} | 3.68 |
rocketmq-connect_DebeziumTimeTypes_maybeBindDebeziumLogical | /**
* maybe bind debezium logical
*
* @param statement
* @param index
* @param schema
* @param value
* @param timeZone
* @return
* @throws SQLException
*/
public static boolean maybeBindDebeziumLogical(
PreparedStatement statement,
int index,
Schema schema,
Object value,
TimeZone timeZone
) throws SQLException {
if (schema.getName() != null) {
switch (schema.getName()) {
case Date.SCHEMA_NAME:
statement.setDate(index,
new java.sql.Date(
(long) DebeziumTimeTypes.toMillsTimestamp(Date.SCHEMA_NAME, value)
),
DateTimeUtils.getTimeZoneCalendar(timeZone)
);
return true;
case io.debezium.time.Timestamp.SCHEMA_NAME:
statement.setTimestamp(index,
new java.sql.Timestamp((long) DebeziumTimeTypes.toMillsTimestamp(io.debezium.time.Timestamp.SCHEMA_NAME, value)),
DateTimeUtils.getTimeZoneCalendar(timeZone)
);
return true;
case ZonedTimestamp.SCHEMA_NAME:
statement.setTimestamp(index,
new java.sql.Timestamp((long) toMillsTimestamp(ZonedTimestamp.SCHEMA_NAME, value)),
DateTimeUtils.getTimeZoneCalendar(timeZone)
);
return true;
default:
return false;
}
}
return false;
} | 3.68 |
morf_FieldReference_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return new HashCodeBuilder()
.appendSuper(super.hashCode())
.append(direction)
.append(name)
.append(nullValueHandling)
.append(table)
.toHashCode();
} | 3.68 |
flink_TimestampedFileInputSplit_getModificationTime | /** @return The modification time of the file this split belongs to. */
public long getModificationTime() {
return this.modificationTime;
} | 3.68 |
flink_HsSubpartitionFileReaderImpl_getNextToLoad | /** Returns a negative value if shouldn't load. */
private int getNextToLoad() {
int nextToLoad = Math.max(lastLoaded, lastConsumed) + 1;
int maxToLoad = lastConsumed + maxBuffersReadAhead;
return nextToLoad <= maxToLoad ? nextToLoad : -1;
} | 3.68 |
hbase_ScannerModel_setStartTime | /**
* @param startTime the lower bound on timestamps of values of interest
*/
public void setStartTime(long startTime) {
this.startTime = startTime;
} | 3.68 |
hadoop_PlacementConstraints_targetCardinality | /**
* This constraint generalizes the cardinality and target constraints.
*
* Consider a set of nodes N that belongs to the scope specified in the
* constraint. If the target expressions are satisfied at least minCardinality
* times and at most maxCardinality times in the node set N, then the
* constraint is satisfied.
*
* For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
* requires an allocation to be placed within a rack that has at least 2 and
* at most 10 other allocations with tag "zk".
*
* @param scope the scope of the constraint
* @param minCardinality the minimum number of times the target expressions
* have to be satisfied with the given scope
* @param maxCardinality the maximum number of times the target expressions
* have to be satisfied with the given scope
* @param targetExpressions the target expressions
* @return the resulting placement constraint
*/
public static AbstractConstraint targetCardinality(String scope,
int minCardinality, int maxCardinality,
TargetExpression... targetExpressions) {
return new SingleConstraint(scope, minCardinality, maxCardinality,
targetExpressions);
} | 3.68 |
flink_CliFrontend_cancel | /**
* Executes the CANCEL action.
*
* @param args Command line arguments for the cancel action.
*/
protected void cancel(String[] args) throws Exception {
LOG.info("Running 'cancel' command.");
final Options commandOptions = CliFrontendParser.getCancelCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
CancelOptions cancelOptions = new CancelOptions(commandLine);
// evaluate help flag
if (cancelOptions.isPrintHelp()) {
CliFrontendParser.printHelpForCancel(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine);
final String[] cleanedArgs = cancelOptions.getArgs();
if (cancelOptions.isWithSavepoint()) {
logAndSysout(
"DEPRECATION WARNING: Cancelling a job with savepoint is deprecated. Use \"stop\" instead.");
final JobID jobId;
final String targetDirectory;
if (cleanedArgs.length > 0) {
jobId = parseJobId(cleanedArgs[0]);
targetDirectory = cancelOptions.getSavepointTargetDirectory();
} else {
jobId = parseJobId(cancelOptions.getSavepointTargetDirectory());
targetDirectory = null;
}
final SavepointFormatType formatType = cancelOptions.getFormatType();
if (targetDirectory == null) {
logAndSysout(
"Cancelling job "
+ jobId
+ " with "
+ formatType
+ " savepoint to default savepoint directory.");
} else {
logAndSysout(
"Cancelling job "
+ jobId
+ " with "
+ formatType
+ " savepoint to "
+ targetDirectory
+ '.');
}
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) -> {
final String savepointPath;
try {
savepointPath =
clusterClient
.cancelWithSavepoint(jobId, targetDirectory, formatType)
.get(
getClientTimeout(effectiveConfiguration)
.toMillis(),
TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new FlinkException("Could not cancel job " + jobId + '.', e);
}
logAndSysout(
"Cancelled job "
+ jobId
+ ". Savepoint stored in "
+ savepointPath
+ '.');
});
} else {
final JobID jobId;
if (cleanedArgs.length > 0) {
jobId = parseJobId(cleanedArgs[0]);
} else {
throw new CliArgsException("Missing JobID. Specify a JobID to cancel a job.");
}
logAndSysout("Cancelling job " + jobId + '.');
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) -> {
try {
clusterClient
.cancel(jobId)
.get(
getClientTimeout(effectiveConfiguration).toMillis(),
TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new FlinkException("Could not cancel job " + jobId + '.', e);
}
});
logAndSysout("Cancelled job " + jobId + '.');
}
} | 3.68 |
hudi_ExternalSpillableMap_iterator | /**
* A custom iterator to wrap over iterating in-memory + disk spilled data.
*/
public Iterator<R> iterator() {
return new IteratorWrapper<>(inMemoryMap.values().iterator(), getDiskBasedMap().iterator());
} | 3.68 |
framework_ApplicationConnection_getResource | /**
* Gets a resource that has been pre-loaded via UIDL, such as custom
* layouts.
*
* @param name
* identifier of the resource to get
* @return the resource
*/
public String getResource(String name) {
return resourcesMap.get(name);
} | 3.68 |
hbase_MobFileCache_evict | /**
* Evicts the lru cached mob files when the count of the cached files is larger than the
* threshold.
*/
public void evict() {
if (isCacheEnabled) {
// Ensure only one eviction at a time
if (!evictionLock.tryLock()) {
return;
}
printStatistics();
List<CachedMobFile> evictedFiles = new ArrayList<>();
try {
if (map.size() <= mobFileMaxCacheSize) {
return;
}
List<CachedMobFile> files = new ArrayList<>(map.values());
Collections.sort(files);
int start = (int) (mobFileMaxCacheSize * evictRemainRatio);
if (start >= 0) {
for (int i = start; i < files.size(); i++) {
String name = files.get(i).getFileName();
CachedMobFile evictedFile = map.remove(name);
if (evictedFile != null) {
evictedFiles.add(evictedFile);
}
}
}
} finally {
evictionLock.unlock();
}
// EvictionLock is released. Close the evicted files one by one.
// The closes are sync in the closeFile method.
for (CachedMobFile evictedFile : evictedFiles) {
closeFile(evictedFile);
}
evictedFileCount.add(evictedFiles.size());
}
} | 3.68 |
framework_AbstractConnector_ensureHandlerManager | /**
* Ensure there is a handler manager. If one doesn't exist before this
* method is called, it gets created.
*
* @return the handler manager
*/
protected HandlerManager ensureHandlerManager() {
if (handlerManager == null) {
handlerManager = new HandlerManager(this);
}
return handlerManager;
} | 3.68 |
morf_DataSourceAdapter_getLogWriter | /**
* @see javax.sql.CommonDataSource#getLogWriter()
*/
@Override
public PrintWriter getLogWriter() throws SQLException {
throw new UnsupportedOperationException("Log writer not supported");
} | 3.68 |
hbase_HRegion_checkAndPrepareMutation | /**
* Helper method that checks and prepares only one mutation. This can be used to implement
* {@link #checkAndPrepare()} for entire Batch. NOTE: As CP
* prePut()/preDelete()/preIncrement()/preAppend() hooks may modify mutations, this method
* should be called after prePut()/preDelete()/preIncrement()/preAppend() CP hooks are run for
* the mutation
*/
protected void checkAndPrepareMutation(Mutation mutation, final long timestamp)
throws IOException {
region.checkRow(mutation.getRow(), "batchMutate");
if (mutation instanceof Put) {
// Check the families in the put. If bad, skip this one.
checkAndPreparePut((Put) mutation);
region.checkTimestamps(mutation.getFamilyCellMap(), timestamp);
} else if (mutation instanceof Delete) {
region.prepareDelete((Delete) mutation);
} else if (mutation instanceof Increment || mutation instanceof Append) {
region.checkFamilies(mutation.getFamilyCellMap().keySet(), mutation.getDurability());
}
} | 3.68 |
querydsl_BooleanBuilder_andAnyOf | /**
* Create the intersection of this and the union of the given args
* {@code (this && (arg1 || arg2 ... || argN))}
*
* @param args union of predicates
* @return the current object
*/
public BooleanBuilder andAnyOf(Predicate... args) {
if (args.length > 0) {
and(ExpressionUtils.anyOf(args));
}
return this;
} | 3.68 |
flink_InputTypeStrategies_or | /**
* Strategy for a disjunction of multiple {@link ArgumentTypeStrategy}s into one like {@code
* f(NUMERIC || STRING)}.
*
* <p>Some {@link ArgumentTypeStrategy}s cannot contribute an inferred type that is different
* from the input type (e.g. {@link #LITERAL}). Therefore, the order {@code f(X || Y)} or {@code
* f(Y || X)} matters as it defines the precedence in case the result must be casted to a more
* specific type.
*
* <p>This strategy aims to infer a type that is equal to the input type (to prevent unnecessary
* casting) or (if this is not possible) the first more specific, casted type.
*/
public static OrArgumentTypeStrategy or(ArgumentTypeStrategy... strategies) {
return new OrArgumentTypeStrategy(Arrays.asList(strategies));
} | 3.68 |
flink_JoinOperator_projectTuple13 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>
ProjectJoin<I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>
projectTuple13() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType =
new TupleTypeInfo<
Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fTypes);
return new ProjectJoin<
I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
flink_GroupReduceOperatorBase_setCombinable | /**
* Marks the group reduce operation as combinable. Combinable operations may pre-reduce the data
* before the actual group reduce operations. Combinable user-defined functions must implement
* the interface {@link GroupCombineFunction}.
*
* @param combinable Flag to mark the group reduce operation as combinable.
*/
public void setCombinable(boolean combinable) {
// sanity check
if (combinable
&& !GroupCombineFunction.class.isAssignableFrom(
this.userFunction.getUserCodeClass())) {
throw new IllegalArgumentException(
"Cannot set a UDF as combinable if it does not implement the interface "
+ GroupCombineFunction.class.getName());
} else {
this.combinable = combinable;
}
} | 3.68 |
flink_FileSourceSplit_path | /** Gets the file's path. */
public Path path() {
return filePath;
} | 3.68 |
AreaShop_FileManager_getRent | /**
* Get a rental region.
* @param name The name of the rental region (will be normalized)
* @return RentRegion if it could be found, otherwise null
*/
public RentRegion getRent(String name) {
GeneralRegion region = regions.get(name.toLowerCase());
if(region instanceof RentRegion) {
return (RentRegion)region;
}
return null;
} | 3.68 |
flink_TestcontainersSettings_getDependencies | /** @return The dependencies (other containers). */
public Collection<GenericContainer<?>> getDependencies() {
return dependencies;
} | 3.68 |
framework_DateTimeFieldElement_setValue | /**
* Set value of the date field element.
*
* @param chars
* new value of the date field
* @throws ReadOnlyException
* if the date field is in readonly mode
*/
public void setValue(CharSequence chars) throws ReadOnlyException {
if (isReadOnly()) {
throw new ReadOnlyException();
}
WebElement elem = findElement(By.tagName("input"));
TestBenchElement tbElement = (TestBenchElement) elem;
clearElementClientSide(tbElement);
tbElement.sendKeys(chars);
tbElement.sendKeys(Keys.TAB);
} | 3.68 |
hudi_HoodieTableConfig_create | /**
* Initialize the hoodie meta directory and any necessary files inside the meta (including the hoodie.properties).
*/
public static void create(FileSystem fs, Path metadataFolder, Properties properties)
throws IOException {
if (!fs.exists(metadataFolder)) {
fs.mkdirs(metadataFolder);
}
HoodieConfig hoodieConfig = new HoodieConfig(properties);
Path propertyPath = new Path(metadataFolder, HOODIE_PROPERTIES_FILE);
try (FSDataOutputStream outputStream = fs.create(propertyPath)) {
if (!hoodieConfig.contains(NAME)) {
throw new IllegalArgumentException(NAME.key() + " property needs to be specified");
}
hoodieConfig.setDefaultValue(TYPE);
if (hoodieConfig.getString(TYPE).equals(HoodieTableType.MERGE_ON_READ.name())) {
hoodieConfig.setDefaultValue(PAYLOAD_TYPE);
hoodieConfig.setDefaultValue(RECORD_MERGER_STRATEGY);
}
hoodieConfig.setDefaultValue(ARCHIVELOG_FOLDER);
if (!hoodieConfig.contains(TIMELINE_LAYOUT_VERSION)) {
// Use latest Version as default unless forced by client
hoodieConfig.setValue(TIMELINE_LAYOUT_VERSION, TimelineLayoutVersion.CURR_VERSION.toString());
}
if (hoodieConfig.contains(BOOTSTRAP_BASE_PATH)) {
// Use the default bootstrap index class.
hoodieConfig.setDefaultValue(BOOTSTRAP_INDEX_CLASS_NAME, BootstrapIndexType.getDefaultBootstrapIndexClassName(hoodieConfig));
}
if (hoodieConfig.contains(TIMELINE_TIMEZONE)) {
HoodieInstantTimeGenerator.setCommitTimeZone(HoodieTimelineTimeZone.valueOf(hoodieConfig.getString(TIMELINE_TIMEZONE)));
}
hoodieConfig.setDefaultValue(DROP_PARTITION_COLUMNS);
storeProperties(hoodieConfig.getProps(), outputStream);
}
} | 3.68 |
flink_Tuple19_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple19)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple19 tuple = (Tuple19) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
return true;
} | 3.68 |
hbase_KeyValue_hashCode | /**
* In line with {@link #equals(Object)}, only uses the key portion, not the value.
*/
@Override
public int hashCode() {
return calculateHashForKey(this);
} | 3.68 |
flink_MapView_isEmpty | /**
* Returns true if the map view contains no key-value mappings, otherwise false.
*
* @return True if the map view contains no key-value mappings, otherwise false.
* @throws Exception Thrown if the system cannot access the state.
*/
public boolean isEmpty() throws Exception {
return map.isEmpty();
} | 3.68 |
hbase_TableBackupClient_completeBackup | /**
* Complete the overall backup.
* @param backupInfo backup info
* @throws IOException exception
*/
protected void completeBackup(final Connection conn, BackupInfo backupInfo,
BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
// set the complete timestamp of the overall backup
backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime());
// set overall backup status: complete
backupInfo.setState(BackupState.COMPLETE);
backupInfo.setProgress(100);
// add and store the manifest for the backup
addManifest(backupInfo, backupManager, type, conf);
// compose the backup complete data
String backupCompleteData =
obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() + ",completets="
+ backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getTotalBytesCopied();
if (LOG.isDebugEnabled()) {
LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData);
}
// when full backup is done:
// - delete HBase snapshot
// - clean up directories with prefix "exportSnapshot-", which are generated when exporting
// snapshots
// incremental backups use distcp, which handles cleaning up its own directories
if (type == BackupType.FULL) {
deleteSnapshots(conn, backupInfo, conf);
cleanupExportSnapshotLog(conf);
}
BackupSystemTable.deleteSnapshot(conn);
backupManager.updateBackupInfo(backupInfo);
// Finish active session
backupManager.finishBackupSession();
LOG.info("Backup " + backupInfo.getBackupId() + " completed.");
} | 3.68 |
druid_NameResolveVisitor_isRowNumColumn | /**
* 是否是 rownum 或者 rownum 别名
*
* @param x x 是否是 rownum 或者 rownum 别名
* @param source 从 source 数据中查找 and 判断
* @return true:是、false:不是
*/
public boolean isRowNumColumn(SQLExpr x, SQLSelectQueryBlock source) {
if (x instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) x;
long nameHashCode64 = identifierExpr.nameHashCode64();
if (nameHashCode64 == FnvHash.Constants.ROWNUM) {
return true;
}
SQLSelectQueryBlock queryBlock = source;
if (queryBlock.getFrom() instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) queryBlock.getFrom()).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock subQueryBlock = ((SQLSubqueryTableSource) queryBlock.getFrom()).getSelect().getQueryBlock();
SQLSelectItem selectItem = subQueryBlock.findSelectItem(nameHashCode64);
if (selectItem != null && isRowNumColumn(selectItem.getExpr(), subQueryBlock)) {
return true;
}
}
}
return false;
} | 3.68 |
hadoop_ProtobufHelper_tokenFromProto | /**
* Get a token from a TokenProto payload.
* @param tokenProto marshalled token
* @return the token.
*/
public static Token<? extends TokenIdentifier> tokenFromProto(
TokenProto tokenProto) {
return ShadedProtobufHelper.tokenFromProto(tokenProto);
} | 3.68 |
hadoop_FederationStateStoreFacade_removeStoredMasterKey | /**
* The Router Supports Remove MasterKey (RouterMasterKey{@link RouterMasterKey}).
*
* @param newKey Key used for generating and verifying delegation tokens
* @throws YarnException if the call to the state store is unsuccessful
* @throws IOException An IO Error occurred
*/
public void removeStoredMasterKey(DelegationKey newKey) throws YarnException, IOException {
LOG.info("Removing master key with keyID {}.", newKey.getKeyId());
ByteBuffer keyBytes = ByteBuffer.wrap(newKey.getEncodedKey());
RouterMasterKey masterKey = RouterMasterKey.newInstance(newKey.getKeyId(),
keyBytes, newKey.getExpiryDate());
RouterMasterKeyRequest keyRequest = RouterMasterKeyRequest.newInstance(masterKey);
stateStore.removeStoredMasterKey(keyRequest);
} | 3.68 |
hbase_StoreFileInfo_getReferencedFileStatus | /**
* Get the {@link FileStatus} of the file referenced by this StoreFileInfo
* @param fs The current file system to use.
* @return The {@link FileStatus} of the file referenced by this StoreFileInfo
*/
public FileStatus getReferencedFileStatus(final FileSystem fs) throws IOException {
FileStatus status;
if (this.reference != null) {
if (this.link != null) {
FileNotFoundException exToThrow = null;
for (int i = 0; i < this.link.getLocations().length; i++) {
// HFileLink Reference
try {
return link.getFileStatus(fs);
} catch (FileNotFoundException ex) {
// try the other location
exToThrow = ex;
}
}
throw exToThrow;
} else {
// HFile Reference
Path referencePath = getReferredToFile(this.getPath());
status = fs.getFileStatus(referencePath);
}
} else {
if (this.link != null) {
FileNotFoundException exToThrow = null;
for (int i = 0; i < this.link.getLocations().length; i++) {
// HFileLink
try {
return link.getFileStatus(fs);
} catch (FileNotFoundException ex) {
// try the other location
exToThrow = ex;
}
}
throw exToThrow;
} else {
status = fs.getFileStatus(initialPath);
}
}
return status;
} | 3.68 |
hbase_ActiveMasterManager_fetchAndSetActiveMasterServerName | /**
* Fetches the active master's ServerName from zookeeper.
*/
private void fetchAndSetActiveMasterServerName() {
LOG.debug("Attempting to fetch active master sn from zk");
try {
activeMasterServerName = MasterAddressTracker.getMasterAddress(watcher);
} catch (IOException | KeeperException e) {
// Log and ignore for now and re-fetch later if needed.
LOG.error("Error fetching active master information", e);
}
} | 3.68 |
open-banking-gateway_Xs2aValidator_validate | /**
* Validates that all parameters necessary to perform ASPSP API call is present.
* In {@link de.adorsys.opba.protocol.bpmnshared.dto.context.ContextMode#MOCK_REAL_CALLS}
* reports all violations into {@link BaseContext#getViolations()} (merging with already existing ones)
*
* @param exec Current execution that will be updated with violations if present.
* @param dtosToValidate ASPSP API call parameter objects to validate.
*/
public <T> void validate(DelegateExecution exec, Xs2aContext context, Class<T> invokerClass, Object... dtosToValidate) {
Set<ConstraintViolation<Object>> allErrors = new HashSet<>();
FieldsToIgnoreLoader fieldsToIgnoreLoader = context.getRequestScoped().fieldsToIgnoreLoader();
Map<FieldCode, IgnoreValidationRule> rulesMap = fieldsToIgnoreLoader.getIgnoreValidationRules(
invokerClass,
context.getActiveScaApproach()
);
for (Object value : dtosToValidate) {
Set<ConstraintViolation<Object>> errors = validator.validate(value)
.stream()
.filter(f -> isFieldMandatory(f, context, rulesMap))
.collect(Collectors.toSet());
allErrors.addAll(errors);
}
if (allErrors.isEmpty()) {
return;
}
ContextUtil.getAndUpdateContext(
exec,
(BaseContext ctx) -> {
ctx.getViolations().addAll(allErrors.stream().map(this::toIssue).collect(Collectors.toSet()));
// Only when doing real calls validations cause termination of flow
// TODO: Those validation in real call should be propagated and handled
if (REAL_CALLS == ctx.getMode()) {
log.error("Fatal validation error for requestId={},sagaId={} - violations {}", ctx.getRequestId(), ctx.getSagaId(), allErrors);
throw new ValidationIssueException();
}
}
);
} | 3.68 |
cron-utils_FieldDayOfWeekDefinitionBuilder_withMondayDoWValue | /**
* Registers the field supports the W (W) special char.
*
* @return this FieldSpecialCharsDefinitionBuilder instance
*/
public FieldDayOfWeekDefinitionBuilder withMondayDoWValue(final int mondayDoW) {
constraints.withShiftedStringMapping(mondayDoW - mondayDoWValue);
mondayDoWValue = mondayDoW;
return this;
} | 3.68 |
flink_NFACompiler_createGroupPatternState | /**
* Create all the states for the group pattern.
*
* @param groupPattern the group pattern to create the states for
* @param sinkState the state that the group pattern being converted should point to
* @param proceedState the state that the group pattern being converted should proceed to
* @param isOptional whether the group pattern being converted is optional
* @return the first state of the states of the group pattern
*/
private State<T> createGroupPatternState(
final GroupPattern<T, ?> groupPattern,
final State<T> sinkState,
final State<T> proceedState,
final boolean isOptional) {
final IterativeCondition<T> proceedCondition = getTrueFunction();
Pattern<T, ?> oldCurrentPattern = currentPattern;
Pattern<T, ?> oldFollowingPattern = followingPattern;
GroupPattern<T, ?> oldGroupPattern = currentGroupPattern;
State<T> lastSink = sinkState;
currentGroupPattern = groupPattern;
currentPattern = groupPattern.getRawPattern();
lastSink = createMiddleStates(lastSink);
lastSink = convertPattern(lastSink);
if (isOptional) {
// for the first state of a group pattern, its PROCEED edge should point to
// the following state of that group pattern
lastSink.addProceed(proceedState, proceedCondition);
}
currentPattern = oldCurrentPattern;
followingPattern = oldFollowingPattern;
currentGroupPattern = oldGroupPattern;
return lastSink;
} | 3.68 |
morf_UpgradePathFinder_schemasNotMatch | /**
* Wraps calls to the {@link SchemaHomology} to provide debug logging.
*
* @param targetSchema First schema to compare.
* @param trialSchema Second schema to compare.
* @param firstSchemaContext Context of the target schema for logging.
* @param secondScehmaContext Context of the trial schema for logging.
* @param exceptionRegexes Regular exceptions for the table exceptions.
* @return True if the schemas don't match.
*/
private boolean schemasNotMatch(Schema targetSchema, Schema trialSchema, String firstSchemaContext, String secondScehmaContext, Collection<String> exceptionRegexes) {
log.info("Comparing schema [" + firstSchemaContext + "] to [" + secondScehmaContext + "]");
DifferenceWriter differenceWriter = log::warn;
SchemaHomology homology = new SchemaHomology(differenceWriter, firstSchemaContext, secondScehmaContext );
if (homology.schemasMatch(targetSchema, trialSchema, exceptionRegexes)) {
log.info("Schemas match");
return false;
} else {
log.info("Schema differences found");
return true;
}
} | 3.68 |
hadoop_HashPartitioner_getPartition | /** Use {@link Object#hashCode()} to partition. */
public int getPartition(K2 key, V2 value,
int numReduceTasks) {
return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
} | 3.68 |
hudi_Hive3Shim_getTimestampWriteable | /**
* Get timestamp writeable object from long value.
* Hive3 use TimestampWritableV2 to build timestamp objects and Hive2 use TimestampWritable.
* So that we need to initialize timestamp according to the version of Hive.
*/
public Writable getTimestampWriteable(long value, boolean timestampMillis) {
try {
Object timestamp = TIMESTAMP_CLASS.newInstance();
SET_TIME_IN_MILLIS.invoke(timestamp, timestampMillis ? value : value / 1000);
return (Writable) TIMESTAMP_WRITEABLE_V2_CONSTRUCTOR.newInstance(timestamp);
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
throw new HoodieException("can not create writable v2 class!", e);
}
} | 3.68 |
hadoop_SelectBinding_isEnabled | /**
* Is the service supported?
* @return true iff select is enabled.
*/
public boolean isEnabled() {
return enabled;
} | 3.68 |
hadoop_IOStatisticsBinding_passthroughFn | /**
* A passthrough copy operation suitable for immutable
* types, including numbers.
*
* @param <E> type of values.
* @param src source object
* @return the source object
*/
public static <E extends Serializable> E passthroughFn(E src) {
return src;
} | 3.68 |
flink_EnvironmentSettings_inBatchMode | /** Sets that the components should work in a batch mode. Streaming mode by default. */
public Builder inBatchMode() {
configuration.set(RUNTIME_MODE, BATCH);
return this;
} | 3.68 |
hadoop_ContainerId_toString | /**
* @return A string representation of containerId. The format is
* container_e*epoch*_*clusterTimestamp*_*appId*_*attemptId*_*containerId*
* when epoch is larger than 0
* (e.g. container_e17_1410901177871_0001_01_000005).
* *epoch* is increased when RM restarts or fails over.
* When epoch is 0, epoch is omitted
* (e.g. container_1410901177871_0001_01_000005).
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder(64);
sb.append(CONTAINER_PREFIX);
long epoch = getContainerId() >> 40;
if (epoch > 0) {
sb.append(EPOCH_PREFIX);
FastNumberFormat.format(sb, epoch, EPOCH_MIN_DIGITS);
sb.append('_');
}
ApplicationId appId = getApplicationAttemptId().getApplicationId();
sb.append(appId.getClusterTimestamp())
.append('_');
FastNumberFormat.format(sb, appId.getId(), APP_ID_MIN_DIGITS);
sb.append('_');
FastNumberFormat.format(sb, getApplicationAttemptId().getAttemptId(),
ATTEMPT_ID_MIN_DIGITS);
sb.append('_');
FastNumberFormat.format(sb, CONTAINER_ID_BITMASK & getContainerId(),
CONTAINER_ID_MIN_DIGITS);
return sb.toString();
} | 3.68 |
dubbo_DubboBeanUtils_registerInfrastructureBean | /**
* Register Infrastructure Bean
*
* @param beanDefinitionRegistry {@link BeanDefinitionRegistry}
* @param beanType the type of bean
* @param beanName the name of bean
* @return if it's a first time to register, return <code>true</code>, or <code>false</code>
*/
static boolean registerInfrastructureBean(
BeanDefinitionRegistry beanDefinitionRegistry, String beanName, Class<?> beanType) {
boolean registered = false;
if (!beanDefinitionRegistry.containsBeanDefinition(beanName)) {
RootBeanDefinition beanDefinition = new RootBeanDefinition(beanType);
beanDefinition.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
beanDefinitionRegistry.registerBeanDefinition(beanName, beanDefinition);
registered = true;
if (log.isDebugEnabled()) {
log.debug("The Infrastructure bean definition [" + beanDefinition + "with name [" + beanName
+ "] has been registered.");
}
}
return registered;
} | 3.68 |
hadoop_TypedBytesOutput_writeString | /**
* Writes a string as a typed bytes sequence.
*
* @param s the string to be written
* @throws IOException
*/
public void writeString(String s) throws IOException {
out.write(Type.STRING.code);
WritableUtils.writeString(out, s);
} | 3.68 |
flink_VertexFlameGraphFactory_createOnCpuFlameGraph | /**
* Converts {@link VertexThreadInfoStats} into a FlameGraph representing actively running
* (On-CPU) threads.
*
* <p>Includes threads in states Thread.State.[RUNNABLE, NEW].
*
* @param sample Thread details sample containing stack traces.
* @return FlameGraph data structure
*/
public static VertexFlameGraph createOnCpuFlameGraph(VertexThreadInfoStats sample) {
EnumSet<Thread.State> included = EnumSet.of(Thread.State.RUNNABLE, Thread.State.NEW);
return createFlameGraphFromSample(sample, included);
} | 3.68 |
druid_SpringIbatisBeanTypeAutoProxyCreator_isMatch | /**
* Return if the given bean name matches the mapped name.
* <p>
* The default implementation checks for "xxx*", "*xxx" and "*xxx*" matches, as well as direct equality. Can be
* overridden in subclasses.
*
* @param beanName the bean name to check
* @param mappedName the name in the configured list of names
* @return if the names match
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
protected boolean isMatch(String beanName, String mappedName) {
return PatternMatchUtils.simpleMatch(mappedName, beanName);
} | 3.68 |
flink_ThreadDumpInfo_stringifyThreadInfo | /**
* Custom stringify format of JVM thread info to bypass the MAX_FRAMES = 8 limitation.
*
* <p>This method is based on
* https://github.com/openjdk/jdk/blob/master/src/java.management/share/classes/java/lang/management/ThreadInfo.java#L597
*/
@VisibleForTesting
protected static String stringifyThreadInfo(
java.lang.management.ThreadInfo threadInfo, int maxDepth) {
StringBuilder sb =
new StringBuilder(
"\""
+ threadInfo.getThreadName()
+ "\""
+ " Id="
+ threadInfo.getThreadId()
+ " "
+ threadInfo.getThreadState());
if (threadInfo.getLockName() != null) {
sb.append(" on " + threadInfo.getLockName());
}
if (threadInfo.getLockOwnerName() != null) {
sb.append(
" owned by \""
+ threadInfo.getLockOwnerName()
+ "\" Id="
+ threadInfo.getLockOwnerId());
}
if (threadInfo.isSuspended()) {
sb.append(" (suspended)");
}
if (threadInfo.isInNative()) {
sb.append(" (in native)");
}
sb.append('\n');
int i = 0;
StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (; i < stackTraceElements.length && i < maxDepth; i++) {
StackTraceElement ste = stackTraceElements[i];
sb.append("\tat " + ste.toString());
sb.append('\n');
if (i == 0 && threadInfo.getLockInfo() != null) {
Thread.State ts = threadInfo.getThreadState();
switch (ts) {
case BLOCKED:
sb.append("\t- blocked on " + threadInfo.getLockInfo());
sb.append('\n');
break;
case WAITING:
case TIMED_WAITING:
sb.append("\t- waiting on " + threadInfo.getLockInfo());
sb.append('\n');
break;
default:
}
}
for (MonitorInfo mi : threadInfo.getLockedMonitors()) {
if (mi.getLockedStackDepth() == i) {
sb.append("\t- locked " + mi);
sb.append('\n');
}
}
}
if (i < threadInfo.getStackTrace().length) {
sb.append("\t...");
sb.append('\n');
}
LockInfo[] locks = threadInfo.getLockedSynchronizers();
if (locks.length > 0) {
sb.append("\n\tNumber of locked synchronizers = " + locks.length);
sb.append('\n');
for (LockInfo li : locks) {
sb.append("\t- " + li);
sb.append('\n');
}
}
sb.append('\n');
return sb.toString();
} | 3.68 |
framework_Heartbeat_send | /**
* Sends a heartbeat to the server.
*/
public void send() {
timer.cancel();
final RequestBuilder rb = new RequestBuilder(RequestBuilder.POST, uri);
XhrConnection.addXsrfHeaderFromCookie(rb);
final RequestCallback callback = new RequestCallback() {
@Override
public void onResponseReceived(Request request, Response response) {
int status = response.getStatusCode();
if (status == Response.SC_OK) {
connection.getConnectionStateHandler().heartbeatOk();
} else {
// Handler should stop the application if heartbeat should
// no longer be sent
connection.getConnectionStateHandler()
.heartbeatInvalidStatusCode(request, response);
}
schedule();
}
@Override
public void onError(Request request, Throwable exception) {
// Handler should stop the application if heartbeat should no
// longer be sent
connection.getConnectionStateHandler()
.heartbeatException(request, exception);
schedule();
}
};
rb.setCallback(callback);
try {
getLogger().fine("Sending heartbeat request...");
rb.send();
} catch (RequestException re) {
callback.onError(null, re);
}
} | 3.68 |
framework_LegacyCommunicationManager_registerDependency | /**
* Resolves a dependency URI, registering the URI with this
* {@code LegacyCommunicationManager} if needed and returns a fully
* qualified URI.
*
* @deprecated As of 7.1. See #11413.
*/
@Deprecated
public String registerDependency(String resourceUri, Class<?> context) {
try {
URI uri = new URI(resourceUri);
String protocol = uri.getScheme();
if (ApplicationConstants.PUBLISHED_PROTOCOL_NAME.equals(protocol)) {
// Strip initial slash
String resourceName = uri.getPath().substring(1);
return registerPublishedFile(resourceName, context);
}
if (protocol != null || uri.getHost() != null) {
return resourceUri;
}
// Bare path interpreted as published file
return registerPublishedFile(resourceUri, context);
} catch (URISyntaxException e) {
getLogger().log(Level.WARNING,
"Could not parse resource url " + resourceUri, e);
return resourceUri;
}
} | 3.68 |
hadoop_HdfsLocatedFileStatus_makeQualifiedLocated | /**
* This function is used to transform the underlying HDFS LocatedBlocks to
* BlockLocations. This method must be invoked before
* {@link #getBlockLocations()}.
*
* The returned BlockLocation will have different formats for replicated
* and erasure coded file.
* Please refer to
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
* (FileStatus, long, long)}
* for examples.
*/
public LocatedFileStatus makeQualifiedLocated(URI defaultUri, Path path) {
makeQualified(defaultUri, path);
setBlockLocations(
DFSUtilClient.locatedBlocks2Locations(getLocatedBlocks()));
return this;
} | 3.68 |
flink_MemorySize_getMebiBytes | /** Gets the memory size in Mebibytes (= 1024 Kibibytes). */
public int getMebiBytes() {
return (int) (bytes >> 20);
} | 3.68 |
hbase_HBaseTestingUtility_startMiniCluster | /**
* Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
* Configuration. It homes the cluster data directory under a random subdirectory in a directory
* under System property test.build.data, to be cleaned up on exit.
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
LOG.info("Starting up minicluster with option: {}", option);
// If we already put up a cluster, fail.
if (miniClusterRunning) {
throw new IllegalStateException("A mini-cluster is already running");
}
miniClusterRunning = true;
setupClusterTestDir();
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
if (dfsCluster == null) {
LOG.info("STARTING DFS");
dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
} else {
LOG.info("NOT STARTING DFS");
}
// Start up a zk cluster.
if (getZkCluster() == null) {
startMiniZKCluster(option.getNumZkServers());
}
// Start the MiniHBaseCluster
return startMiniHBaseCluster(option);
} | 3.68 |
hbase_BloomFilterUtil_actualErrorRate | /**
* Computes the actual error rate for the given number of elements, number of bits, and number of
* hash functions. Taken directly from the
* <a href= "http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives" > Wikipedia
* Bloom filter article</a>.
* @return the actual error rate
*/
public static double actualErrorRate(long maxKeys, long bitSize, int functionCount) {
return Math
.exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 / bitSize)) * functionCount);
} | 3.68 |
flink_SqlGatewayOptionsParser_printHelpSqlGateway | /** Prints the help for the client. */
public static void printHelpSqlGateway(PrintStream writer) {
writer.println();
printHelpForStart(writer);
} | 3.68 |
druid_Utils_murmurhash2_64 | /**
* murmur hash 2.0, The murmur hash is a relatively fast hash function from http://murmurhash.googlepages.com/ for
* platforms with efficient multiplication.
*
* @author Viliam Holub
*/
public static long murmurhash2_64(final byte[] data, int length, int seed) {
final long m = 0xc6a4a7935bd1e995L;
final int r = 47;
long h = (seed & 0xffffffffL) ^ (length * m);
int length8 = length / 8;
for (int i = 0; i < length8; i++) {
final int i8 = i * 8;
long k = ((long) data[i8 + 0] & 0xff) //
+ (((long) data[i8 + 1] & 0xff) << 8) //
+ (((long) data[i8 + 2] & 0xff) << 16)//
+ (((long) data[i8 + 3] & 0xff) << 24) //
+ (((long) data[i8 + 4] & 0xff) << 32)//
+ (((long) data[i8 + 5] & 0xff) << 40)//
+ (((long) data[i8 + 6] & 0xff) << 48) //
+ (((long) data[i8 + 7] & 0xff) << 56);
k *= m;
k ^= k >>> r;
k *= m;
h ^= k;
h *= m;
}
switch (length % 8) {
case 7:
h ^= (long) (data[(length & ~7) + 6] & 0xff) << 48;
case 6:
h ^= (long) (data[(length & ~7) + 5] & 0xff) << 40;
case 5:
h ^= (long) (data[(length & ~7) + 4] & 0xff) << 32;
case 4:
h ^= (long) (data[(length & ~7) + 3] & 0xff) << 24;
case 3:
h ^= (long) (data[(length & ~7) + 2] & 0xff) << 16;
case 2:
h ^= (long) (data[(length & ~7) + 1] & 0xff) << 8;
case 1:
h ^= (long) (data[length & ~7] & 0xff);
h *= m;
}
h ^= h >>> r;
h *= m;
h ^= h >>> r;
return h;
} | 3.68 |
pulsar_TxnLogBufferedWriter_asyncAddData | /**
* Append a new entry to the end of a managed ledger. All writes will be performed in the same thread. Callbacks are
* executed in strict write order,but after {@link #close()}, callbacks that fail by state check will execute
* earlier, and successful callbacks will not be affected.
* @param data data entry to be persisted.
* @param callback Will call {@link AddDataCallback#addComplete(Position, Object)} when
* add complete.
* Will call {@link AddDataCallback#addFailed(ManagedLedgerException, Object)} when
* add failure.
* @throws ManagedLedgerException
*/
public void asyncAddData(T data, AddDataCallback callback, Object ctx){
if (!batchEnabled){
if (state == State.CLOSING || state == State.CLOSED){
callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
return;
}
ByteBuf byteBuf = dataSerializer.serialize(data);
managedLedger.asyncAddEntry(byteBuf, DisabledBatchCallback.INSTANCE,
AsyncAddArgs.newInstance(callback, ctx, System.currentTimeMillis(), byteBuf));
return;
}
CompletableFuture
.runAsync(
() -> internalAsyncAddData(data, callback, ctx), singleThreadExecutorForWrite)
.exceptionally(e -> {
log.warn("Execute 'internalAsyncAddData' fail", e);
return null;
});
} | 3.68 |
hadoop_RouterRMAdminService_getRootInterceptor | /**
* Gets the root request interceptor.
*
* @return the root request interceptor
*/
public synchronized RMAdminRequestInterceptor getRootInterceptor() {
return rootInterceptor;
} | 3.68 |
morf_DeleteStatementBuilder_getTable | /**
* Gets the table being deleted from.
*
* @return the table being inserted into
*/
TableReference getTable() {
return table;
} | 3.68 |
flink_MessageSerializer_writePayload | /**
* Helper for serializing the messages.
*
* @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param requestId The id of the request to which the message refers to.
* @param messageType The {@link MessageType type of the message}.
* @param payload The serialized version of the message.
* @return A {@link ByteBuf} containing the serialized message.
*/
private static ByteBuf writePayload(
final ByteBufAllocator alloc,
final long requestId,
final MessageType messageType,
final byte[] payload) {
final int frameLength = HEADER_LENGTH + REQUEST_ID_SIZE + payload.length;
final ByteBuf buf = alloc.ioBuffer(frameLength + Integer.BYTES);
buf.writeInt(frameLength);
writeHeader(buf, messageType);
buf.writeLong(requestId);
buf.writeBytes(payload);
return buf;
} | 3.68 |
pulsar_LoadManagerShared_getSystemResourceUsage | // Get the system resource usage for this broker.
public static SystemResourceUsage getSystemResourceUsage(final BrokerHostUsage brokerHostUsage) {
SystemResourceUsage systemResourceUsage = brokerHostUsage.getBrokerHostUsage();
// Override System memory usage and limit with JVM heap usage and limit
double maxHeapMemoryInBytes = Runtime.getRuntime().maxMemory();
double memoryUsageInBytes = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
double memoryUsage = memoryUsageInBytes / MIBI;
double memoryLimit = maxHeapMemoryInBytes / MIBI;
systemResourceUsage.setMemory(new ResourceUsage(memoryUsage, memoryLimit));
// Collect JVM direct memory
systemResourceUsage.setDirectMemory(new ResourceUsage((double) (getJvmDirectMemoryUsed() / MIBI),
(double) (DirectMemoryUtils.jvmMaxDirectMemory() / MIBI)));
return systemResourceUsage;
} | 3.68 |
morf_OracleDialect_triggerName | /**
* Form the standard name for a table's autonumber trigger.
*
* @param tableName Name of the table for which the trigger name is required.
* @return Name of trigger.
*/
private String triggerName(String tableName) {
return truncatedTableNameWithSuffix(tableName, "_TG").toUpperCase();
} | 3.68 |
morf_MySqlDialect_getSqlForOrderByField | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForOrderByField(org.alfasoftware.morf.sql.element.FieldReference)
*/
@Override
protected String getSqlForOrderByField(FieldReference orderByField) {
StringBuilder result = new StringBuilder();
String sqlFromField = getSqlFrom(orderByField);
if (orderByField.getNullValueHandling().isPresent()) {
switch (orderByField.getNullValueHandling().get()) {
case FIRST:
result.append("-ISNULL(").append(sqlFromField).append("), ");
break;
case LAST:
result.append("ISNULL(").append(sqlFromField).append("), ");
break;
case NONE:
default:
break;
}
}
result.append(sqlFromField);
switch (orderByField.getDirection()) {
case DESCENDING:
result.append(" DESC");
break;
case ASCENDING:
case NONE:
default:
break;
}
return result.toString().trim();
} | 3.68 |
hadoop_ExternalSPSFilePathCollector_remainingCapacity | /**
* Returns queue remaining capacity.
*/
public int remainingCapacity() {
int size = service.processingQueueSize();
int remainingSize = 0;
if (size < maxQueueLimitToScan) {
remainingSize = maxQueueLimitToScan - size;
}
if (LOG.isDebugEnabled()) {
LOG.debug("SPS processing Q -> maximum capacity:{}, current size:{},"
+ " remaining size:{}", maxQueueLimitToScan, size, remainingSize);
}
return remainingSize;
} | 3.68 |
flink_LatencyMarker_getMarkedTime | /** Returns the timestamp marked by the LatencyMarker. */
public long getMarkedTime() {
return markedTime;
} | 3.68 |
flink_SqlValidatorUtils_adjustTypeForMultisetConstructor | /**
* When the element element does not equal with the component type, making explicit casting.
*
* @param evenType derived type for element with even index
* @param oddType derived type for element with odd index
* @param sqlCallBinding description of call
*/
private static void adjustTypeForMultisetConstructor(
RelDataType evenType, RelDataType oddType, SqlCallBinding sqlCallBinding) {
SqlCall call = sqlCallBinding.getCall();
List<RelDataType> operandTypes = sqlCallBinding.collectOperandTypes();
List<SqlNode> operands = call.getOperandList();
RelDataType elementType;
for (int i = 0; i < operands.size(); i++) {
if (i % 2 == 0) {
elementType = evenType;
} else {
elementType = oddType;
}
if (operandTypes.get(i).equalsSansFieldNames(elementType)) {
continue;
}
call.setOperand(i, castTo(operands.get(i), elementType));
}
} | 3.68 |
hibernate-validator_ValidationInterceptor_validateConstructorInvocation | /**
* Validates the Bean Validation constraints specified at the parameters and/or return value of the intercepted constructor.
*
* @param ctx The context of the intercepted constructor invocation.
*
* @throws Exception Any exception caused by the intercepted constructor invocation. A {@link ConstraintViolationException}
* in case at least one constraint violation occurred either during parameter or return value validation.
*/
@AroundConstruct
public void validateConstructorInvocation(InvocationContext ctx) throws Exception {
ExecutableValidator executableValidator = validator.forExecutables();
Set<? extends ConstraintViolation<?>> violations = executableValidator.validateConstructorParameters(
ctx.getConstructor(),
ctx.getParameters()
);
if ( !violations.isEmpty() ) {
throw new ConstraintViolationException(
getMessage( ctx.getConstructor(), ctx.getParameters(), violations ),
violations
);
}
ctx.proceed();
Object createdObject = ctx.getTarget();
violations = validator.forExecutables().validateConstructorReturnValue(
ctx.getConstructor(),
createdObject
);
if ( !violations.isEmpty() ) {
throw new ConstraintViolationException(
getMessage( ctx.getConstructor(), ctx.getParameters(), violations ),
violations
);
}
} | 3.68 |
flink_StreamExecutionEnvironment_setMaxParallelism | /**
* Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
* is Short.MAX_VALUE + 1.
*
* <p>The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
* defines the number of key groups used for partitioned state.
*
* @param maxParallelism Maximum degree of parallelism to be used for the program., with {@code
* 0 < maxParallelism <= 2^15}.
*/
public StreamExecutionEnvironment setMaxParallelism(int maxParallelism) {
Preconditions.checkArgument(
maxParallelism > 0
&& maxParallelism <= KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM,
"maxParallelism is out of bounds 0 < maxParallelism <= "
+ KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM
+ ". Found: "
+ maxParallelism);
config.setMaxParallelism(maxParallelism);
return this;
} | 3.68 |
framework_AbstractDateField_setDateStyle | /**
* <p>
* Sets a custom style name for the given date's calendar cell. Setting the
* style name will override any previous style names that have been set for
* that date, but can contain several actual style names separated by space.
* Setting the custom style name {@code null} will only remove the previous
* custom style name.
* </p>
* <p>
* This logic is entirely separate from {@link #setStyleName(String)}
* </p>
* <p>
* Usage examples: <br>
* {@code setDateStyle(LocalDate.now(), "teststyle");} <br>
* {@code setDateStyle(LocalDate.now(), "teststyle1 teststyle2");}
* </p>
*
* @param date
* which date cell to modify, not {@code null}
* @param styleName
* the custom style name(s) for given date, {@code null} to clear
* custom style name(s)
*
* @since 8.3
*/
public void setDateStyle(LocalDate date, String styleName) {
Objects.requireNonNull(date, "Date cannot be null");
if (styleName != null) {
getState().dateStyles.put(date.toString(), styleName);
} else {
getState().dateStyles.remove(date.toString());
}
} | 3.68 |
hadoop_Name_registerExpression | /** Registers this expression with the specified factory. */
public static void registerExpression(ExpressionFactory factory)
throws IOException {
factory.addClass(Name.class, "-name");
factory.addClass(Iname.class, "-iname");
} | 3.68 |
flink_ManagedTableListener_enrichOptions | /** Enrich options for creating managed table. */
private ResolvedCatalogTable enrichOptions(
ObjectIdentifier identifier, ResolvedCatalogBaseTable<?> table, boolean isTemporary) {
if (!(table instanceof ResolvedCatalogTable)) {
throw new UnsupportedOperationException(
"Managed table only supports catalog table, unsupported table type: "
+ table.getClass());
}
ResolvedCatalogTable resolvedTable = (ResolvedCatalogTable) table;
Map<String, String> newOptions =
discoverManagedTableFactory(classLoader)
.enrichOptions(
createTableFactoryContext(identifier, resolvedTable, isTemporary));
return resolvedTable.copy(newOptions);
} | 3.68 |
framework_Notification_setDelayMsec | /**
* Sets the delay before the notification disappears.
*
* @param delayMsec
* the desired delay in milliseconds, {@value #DELAY_FOREVER} to
* require the user to click the message
*/
public void setDelayMsec(int delayMsec) {
getState().delay = delayMsec;
} | 3.68 |
hbase_ConnectionCache_setEffectiveUser | /**
* Set the current thread local effective user
*/
public void setEffectiveUser(String user) {
effectiveUserNames.set(user);
} | 3.68 |
framework_ResourceLoader_loadScript | /**
* Load a script and notify a listener when the script is loaded. Calling
* this method when the script is currently loading or already loaded
* doesn't cause the script to be loaded again, but the listener will still
* be notified when appropriate.
*
* @param scriptUrl
* the url of the script to load
* @param resourceLoadListener
* the listener that will get notified when the script is loaded
*/
public void loadScript(final String scriptUrl,
final ResourceLoadListener resourceLoadListener) {
final String url = WidgetUtil.getAbsoluteUrl(scriptUrl);
ResourceLoadEvent event = new ResourceLoadEvent(this, url);
if (loadedResources.contains(url)) {
if (resourceLoadListener != null) {
resourceLoadListener.onLoad(event);
}
return;
}
if (addListener(url, resourceLoadListener, loadListeners)) {
getLogger().info("Loading script from " + url);
ScriptElement scriptTag = Document.get().createScriptElement();
scriptTag.setSrc(url);
scriptTag.setType("text/javascript");
// async=false causes script injected scripts to be executed in the
// injection order. See e.g.
// https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script
scriptTag.setPropertyBoolean("async", false);
addOnloadHandler(scriptTag, new ResourceLoadListener() {
@Override
public void onLoad(ResourceLoadEvent event) {
fireLoad(event);
}
@Override
public void onError(ResourceLoadEvent event) {
fireError(event);
}
}, event);
head.appendChild(scriptTag);
}
} | 3.68 |
flink_FrontMetricGroup_getLogicalScope | /** @deprecated work against the LogicalScopeProvider interface instead. */
@Override
@Deprecated
public String getLogicalScope(CharacterFilter filter, char delimiter) {
return parentMetricGroup.getLogicalScope(
getDelimiterFilter(this.settings, filter),
delimiter,
this.settings.getReporterIndex());
} | 3.68 |
hudi_OptionsResolver_hasNoSpecificReadCommits | /**
* Returns true if there are no explicit start and end commits.
*/
public static boolean hasNoSpecificReadCommits(Configuration conf) {
return !conf.contains(FlinkOptions.READ_START_COMMIT) && !conf.contains(FlinkOptions.READ_END_COMMIT);
} | 3.68 |
zxing_Version_getVersionForDimensions | /**
* <p>Deduces version information from Data Matrix dimensions.</p>
*
* @param numRows Number of rows in modules
* @param numColumns Number of columns in modules
* @return Version for a Data Matrix Code of those dimensions
* @throws FormatException if dimensions do correspond to a valid Data Matrix size
*/
public static Version getVersionForDimensions(int numRows, int numColumns) throws FormatException {
if ((numRows & 0x01) != 0 || (numColumns & 0x01) != 0) {
throw FormatException.getFormatInstance();
}
for (Version version : VERSIONS) {
if (version.symbolSizeRows == numRows && version.symbolSizeColumns == numColumns) {
return version;
}
}
throw FormatException.getFormatInstance();
} | 3.68 |
hadoop_CommitContext_getConf | /**
* Job configuration.
* @return configuration (never null)
*/
public Configuration getConf() {
return conf;
} | 3.68 |
hbase_FSTableDescriptors_deleteTableDescriptorFiles | /**
* Deletes files matching the table info file pattern within the given directory whose sequenceId
* is at most the given max sequenceId.
*/
private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId)
throws IOException {
FileStatus[] status = CommonFSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
for (FileStatus file : status) {
Path path = file.getPath();
int sequenceId = getTableInfoSequenceIdAndFileLength(path).sequenceId;
if (sequenceId <= maxSequenceId) {
boolean success = CommonFSUtils.delete(fs, path, false);
if (success) {
LOG.debug("Deleted {}", path);
} else {
LOG.error("Failed to delete table descriptor at {}", path);
}
}
}
} | 3.68 |
flink_JobMasterId_fromUuidOrNull | /**
* If the given uuid is null, this returns null, otherwise a JobMasterId that corresponds to the
* UUID, via {@link #JobMasterId(UUID)}.
*/
public static JobMasterId fromUuidOrNull(@Nullable UUID uuid) {
return uuid == null ? null : new JobMasterId(uuid);
} | 3.68 |
pulsar_JsonRecordBuilderImpl_set | /**
* Sets the value of a field.
*
* @param field the field to set.
* @param value the value to set.
* @return a reference to the RecordBuilder.
*/
@Override
public GenericRecordBuilder set(Field field, Object value) {
set(field.getName(), value);
return this;
} | 3.68 |
hadoop_CommitContext_revertCommit | /**
* See {@link CommitOperations#revertCommit(SinglePendingCommit)}.
* @param commit pending commit
* @throws IOException failure
*/
public void revertCommit(final SinglePendingCommit commit)
throws IOException {
commitOperations.revertCommit(commit);
} | 3.68 |
flink_EventTimeTriggers_withEarlyFirings | /**
* Creates a new {@code Trigger} like the this, except that it fires repeatedly whenever the
* given {@code Trigger} fires before the watermark has passed the end of the window.
*/
public AfterEndOfWindowNoLate<W> withEarlyFirings(Trigger<W> earlyFirings) {
checkNotNull(earlyFirings);
return new AfterEndOfWindowNoLate<>(earlyFirings);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.