name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
cron-utils_StringUtils_containsAny | /**
* <p>Checks if the CharSequence contains any character in the given.
* set of characters.</p>
*
* <p>A {@code null} CharSequence will return {@code false}.
* A {@code null} or zero length search array will return {@code false}.</p>
*
* <pre>
* StringUtils.containsAny(null, *) = false
* StringUtils.containsAny("", *) = false
* StringUtils.containsAny(*, null) = false
* StringUtils.containsAny(*, []) = false
* StringUtils.containsAny("zzabyycdxx",['z','a']) = true
* StringUtils.containsAny("zzabyycdxx",['b','y']) = true
* StringUtils.containsAny("aba", ['z']) = false
* </pre>
*
* @param cs the CharSequence to check, may be null
* @param searchChars the chars to search for, may be null
* @return the {@code true} if any of the chars are found,
* {@code false} if no match or null input
* @since 2.4
* @since 3.0 Changed signature from containsAny(String, char[]) to containsAny(CharSequence, char...)
*/
public static boolean containsAny(final CharSequence cs, final char... searchChars) {
if (isEmpty(cs) || searchChars == null || searchChars.length == 0) {
return false;
}
final int csLength = cs.length();
final int searchLength = searchChars.length;
final int csLast = csLength - 1;
final int searchLast = searchLength - 1;
for (int i = 0; i < csLength; i++) {
final char ch = cs.charAt(i);
for (int j = 0; j < searchLength; j++) {
if (searchChars[j] == ch) {
if (Character.isHighSurrogate(ch)) {
if (j == searchLast) {
// missing low surrogate, fine, like String.indexOf(String)
return true;
}
if (i < csLast && searchChars[j + 1] == cs.charAt(i + 1)) {
return true;
}
} else {
// ch is in the Basic Multilingual Plane
return true;
}
}
}
}
return false;
} | 3.68 |
framework_CustomizedSystemMessages_setInternalErrorMessage | /**
* Sets the message of the notification. Set to null for no message. If both
* caption and message is null, the notification is disabled;
*
* @param internalErrorMessage
* the message
*/
public void setInternalErrorMessage(String internalErrorMessage) {
this.internalErrorMessage = internalErrorMessage;
} | 3.68 |
flink_Tuple19_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
* @param f14 The value for field 14
* @param f15 The value for field 15
* @param f16 The value for field 16
* @param f17 The value for field 17
* @param f18 The value for field 18
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
this.f18 = f18;
} | 3.68 |
flink_CastRuleProvider_exists | /**
* Returns {@code true} if and only if a {@link CastRule} can be resolved for the provided input
* type and target type.
*/
public static boolean exists(LogicalType inputType, LogicalType targetType) {
return resolve(inputType, targetType) != null;
} | 3.68 |
flink_ConfigOptions_floatType | /** Defines that the value of the option should be of {@link Float} type. */
public TypedConfigOptionBuilder<Float> floatType() {
return new TypedConfigOptionBuilder<>(key, Float.class);
} | 3.68 |
hbase_MiniBatchOperationInProgress_getWalEdit | /** Returns Gets the walEdit for the operation(Mutation) at the specified position. */
public WALEdit getWalEdit(int index) {
return this.walEditsFromCoprocessors[getAbsoluteIndex(index)];
} | 3.68 |
framework_GridMultiSelect_isAllSelected | /**
* Returns whether all items are selected or not.
* <p>
* This is only {@code true} if user has selected all rows with the select
* all checkbox on client side, or if {@link #selectAll()} has been used
* from server side.
*
* @return {@code true} if all selected, {@code false} if not
* @since 8.12.0
*/
public boolean isAllSelected() {
return model.isAllSelected();
} | 3.68 |
hadoop_ExportedBlockKeys_write | /**
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(isBlockTokenEnabled);
out.writeLong(keyUpdateInterval);
out.writeLong(tokenLifetime);
currentKey.write(out);
out.writeInt(allKeys.length);
for (int i = 0; i < allKeys.length; i++) {
allKeys[i].write(out);
}
} | 3.68 |
flink_StateDescriptor_getSerializer | /**
* Returns the {@link TypeSerializer} that can be used to serialize the value in the state. Note
* that the serializer may initialized lazily and is only guaranteed to exist after calling
* {@link #initializeSerializerUnlessSet(ExecutionConfig)}.
*/
public TypeSerializer<T> getSerializer() {
TypeSerializer<T> serializer = serializerAtomicReference.get();
if (serializer != null) {
return serializer.duplicate();
} else {
throw new IllegalStateException("Serializer not yet initialized.");
}
} | 3.68 |
flink_TSetClientInfoResp_findByName | /** Find the _Fields constant that matches name, or null if its not found. */
public static _Fields findByName(java.lang.String name) {
return byName.get(name);
} | 3.68 |
hbase_SortedCompactionPolicy_getNextMajorCompactTime | /** Returns When to run next major compaction */
public long getNextMajorCompactTime(Collection<HStoreFile> filesToCompact) {
/** Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}. */
long period = comConf.getMajorCompactionPeriod();
if (period <= 0) {
return period;
}
/**
* Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, that
* is, +/- 3.5 days (7 days * 0.5).
*/
double jitterPct = comConf.getMajorCompactionJitter();
if (jitterPct <= 0) {
return period;
}
// deterministic jitter avoids a major compaction storm on restart
OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
if (seed.isPresent()) {
long jitter = Math.round(period * jitterPct);
// Synchronized to ensure one user of random instance at a time.
synchronized (RNG) {
RNG.setSeed(seed.getAsInt());
return period + jitter - Math.round(2L * jitter * RNG.nextDouble());
}
} else {
return 0L;
}
} | 3.68 |
flink_ResourceID_generate | /**
* Generate a random resource id.
*
* @return A random resource id.
*/
public static ResourceID generate() {
return new ResourceID(new AbstractID().toString());
} | 3.68 |
framework_VCalendar_getBackwardListener | /**
* Set the listener which listens to backward events from the calendar.
*
* @return
*/
public BackwardListener getBackwardListener() {
return backwardListener;
} | 3.68 |
hudi_BaseFileUtils_readBloomFilterFromMetadata | /**
* Read the bloom filter from the metadata of the given data file.
* @param configuration Configuration
* @param filePath The data file path
* @return a BloomFilter object
*/
public BloomFilter readBloomFilterFromMetadata(Configuration configuration, Path filePath) {
Map<String, String> footerVals =
readFooter(configuration, false, filePath,
HoodieAvroWriteSupport.HOODIE_AVRO_BLOOM_FILTER_METADATA_KEY,
HoodieAvroWriteSupport.OLD_HOODIE_AVRO_BLOOM_FILTER_METADATA_KEY,
HoodieBloomFilterWriteSupport.HOODIE_BLOOM_FILTER_TYPE_CODE);
String footerVal = footerVals.get(HoodieAvroWriteSupport.HOODIE_AVRO_BLOOM_FILTER_METADATA_KEY);
if (null == footerVal) {
// We use old style key "com.uber.hoodie.bloomfilter"
footerVal = footerVals.get(HoodieAvroWriteSupport.OLD_HOODIE_AVRO_BLOOM_FILTER_METADATA_KEY);
}
BloomFilter toReturn = null;
if (footerVal != null) {
if (footerVals.containsKey(HoodieBloomFilterWriteSupport.HOODIE_BLOOM_FILTER_TYPE_CODE)) {
toReturn = BloomFilterFactory.fromString(footerVal,
footerVals.get(HoodieBloomFilterWriteSupport.HOODIE_BLOOM_FILTER_TYPE_CODE));
} else {
toReturn = BloomFilterFactory.fromString(footerVal, BloomFilterTypeCode.SIMPLE.name());
}
}
return toReturn;
} | 3.68 |
hadoop_Find_registerCommands | /**
* Register the names for the count command
*
* @param factory the command factory that will instantiate this class
*/
public static void registerCommands(CommandFactory factory) {
factory.addClass(Find.class, "-find");
} | 3.68 |
framework_Table_addFooterClickListener | /**
* Adds a footer click listener which handles the click events when the user
* clicks on a column footer cell in the Table.
* <p>
* The listener will receive events which contain information about which
* column was clicked and some details about the mouse event.
* </p>
*
* @param listener
* The handler which should handle the footer click events.
*/
public void addFooterClickListener(FooterClickListener listener) {
addListener(TableConstants.FOOTER_CLICK_EVENT_ID,
FooterClickEvent.class, listener,
FooterClickEvent.FOOTER_CLICK_METHOD);
} | 3.68 |
hbase_VisibilityController_checkForReservedVisibilityTagPresence | /**
* Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This tag type is
* reserved and should not be explicitly set by user.
* @param cell The cell under consideration
* @param pair An optional pair of type {@code <Boolean, Tag>} which would be reused if already
* set and new one will be created if NULL is passed
* @return If the boolean is false then it indicates that the cell has a RESERVERD_VIS_TAG and
* with boolean as true, not null tag indicates that a string modified tag was found.
*/
private Pair<Boolean, Tag> checkForReservedVisibilityTagPresence(Cell cell,
Pair<Boolean, Tag> pair) throws IOException {
if (pair == null) {
pair = new Pair<>(false, null);
} else {
pair.setFirst(false);
pair.setSecond(null);
}
// Bypass this check when the operation is done by a system/super user.
// This is done because, while Replication, the Cells coming to the peer cluster with reserved
// typed tags and this is fine and should get added to the peer cluster table
if (isSystemOrSuperUser()) {
// Does the cell contain special tag which indicates that the replicated
// cell visiblilty tags
// have been modified
Tag modifiedTag = null;
Iterator<Tag> tagsIterator = PrivateCellUtil.tagsIterator(cell);
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
if (tag.getType() == TagType.STRING_VIS_TAG_TYPE) {
modifiedTag = tag;
break;
}
}
pair.setFirst(true);
pair.setSecond(modifiedTag);
return pair;
}
Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(cell);
while (tagsItr.hasNext()) {
if (RESERVED_VIS_TAG_TYPES.contains(tagsItr.next().getType())) {
return pair;
}
}
pair.setFirst(true);
return pair;
} | 3.68 |
hadoop_ResourceEstimatorServer_shutdown | /**
* Stop embedded Hadoop HTTP server.
*
* @throws Exception in case the HTTP server fails to shut down.
*/
public void shutdown() throws Exception {
LOGGER.info("Stopping resourceestimator service at: {}.",
baseURI.toString());
webServer.stop();
} | 3.68 |
flink_PekkoInvocationHandler_ask | /**
* Sends the message to the RPC endpoint and returns a future containing its response.
*
* @param message to send to the RPC endpoint
* @param timeout time to wait until the response future is failed with a {@link
* TimeoutException}
* @return Response future
*/
protected CompletableFuture<?> ask(Object message, Duration timeout) {
final CompletableFuture<?> response =
ScalaFutureUtils.toJava(Patterns.ask(rpcEndpoint, message, timeout.toMillis()));
return guardCompletionWithContextClassLoader(response, flinkClassLoader);
} | 3.68 |
hadoop_AclFeature_getEntryAt | /**
* Get the entry at the specified position
* @param pos Position of the entry to be obtained
* @return integer representation of AclEntry
* @throws IndexOutOfBoundsException if pos out of bound
*/
int getEntryAt(int pos) {
if (pos < 0 || pos > entries.length) {
throw new IndexOutOfBoundsException("Invalid position for AclEntry");
}
return entries[pos];
} | 3.68 |
hadoop_RolePolicies_bucketToArn | /**
* From an S3 bucket name, build an ARN to refer to it.
* @param bucket bucket name.
* @return return the ARN to use in statements.
*/
public static String bucketToArn(String bucket) {
return String.format("arn:aws:s3:::%s", bucket);
} | 3.68 |
hbase_HStoreFile_isReferencedInReads | /** Returns true if the file is still used in reads */
public boolean isReferencedInReads() {
int rc = fileInfo.getRefCount();
assert rc >= 0; // we should not go negative.
return rc > 0;
} | 3.68 |
framework_UIDL_getStringVariable | /**
* Gets the value of the named variable.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public String getStringVariable(String name) {
return var().getString(name);
} | 3.68 |
flink_TemporaryClassLoaderContext_of | /**
* Sets the context class loader to the given ClassLoader and returns a resource that sets it
* back to the current context ClassLoader when the resource is closed.
*
* <pre>{@code
* try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(classloader)) {
* // code that needs the context class loader
* }
* }</pre>
*/
public static TemporaryClassLoaderContext of(ClassLoader cl) {
final Thread t = Thread.currentThread();
final ClassLoader original = t.getContextClassLoader();
t.setContextClassLoader(cl);
return new TemporaryClassLoaderContext(t, original);
} | 3.68 |
hadoop_StringValueMin_getCombinerOutput | /**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add(minVal);
return retv;
} | 3.68 |
zxing_PDF417HighLevelEncoder_determineConsecutiveDigitCount | /**
* Determines the number of consecutive characters that are encodable using numeric compaction.
*
* @param input the input
* @param startpos the start position within the input
* @return the requested character count
*/
private static int determineConsecutiveDigitCount(ECIInput input, int startpos) {
int count = 0;
final int len = input.length();
int idx = startpos;
if (idx < len) {
while (idx < len && !input.isECI(idx) && isDigit(input.charAt(idx))) {
count++;
idx++;
}
}
return count;
} | 3.68 |
hudi_RocksDBDAO_init | /**
* Initialized Rocks DB instance.
*/
private void init() {
try {
LOG.info("DELETING RocksDB persisted at " + rocksDBBasePath);
FileIOUtils.deleteDirectory(new File(rocksDBBasePath));
managedHandlesMap = new ConcurrentHashMap<>();
managedDescriptorMap = new ConcurrentHashMap<>();
// If already present, loads the existing column-family handles
final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true)
.setWalDir(rocksDBBasePath).setStatsDumpPeriodSec(300).setStatistics(new Statistics());
dbOptions.setLogger(new org.rocksdb.Logger(dbOptions) {
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
switch (infoLogLevel) {
case DEBUG_LEVEL:
LOG.debug("From Rocks DB : {}", logMsg);
break;
case WARN_LEVEL:
LOG.warn("From Rocks DB : {}", logMsg);
break;
case ERROR_LEVEL:
case FATAL_LEVEL:
LOG.error("From Rocks DB : {}", logMsg);
break;
case HEADER_LEVEL:
case NUM_INFO_LOG_LEVELS:
case INFO_LEVEL:
default:
LOG.info("From Rocks DB : {}", logMsg);
break;
}
}
});
final List<ColumnFamilyDescriptor> managedColumnFamilies = loadManagedColumnFamilies(dbOptions);
final List<ColumnFamilyHandle> managedHandles = new ArrayList<>(managedColumnFamilies.size());
FileIOUtils.mkdir(new File(rocksDBBasePath));
rocksDB = RocksDB.open(dbOptions, rocksDBBasePath, managedColumnFamilies, managedHandles);
ValidationUtils.checkArgument(managedHandles.size() == managedColumnFamilies.size(),
"Unexpected number of handles are returned");
for (int index = 0; index < managedHandles.size(); index++) {
ColumnFamilyHandle handle = managedHandles.get(index);
ColumnFamilyDescriptor descriptor = managedColumnFamilies.get(index);
String familyNameFromHandle = new String(handle.getName());
String familyNameFromDescriptor = new String(descriptor.getName());
ValidationUtils.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle),
"Family Handles not in order with descriptors");
managedHandlesMap.put(familyNameFromHandle, handle);
managedDescriptorMap.put(familyNameFromDescriptor, descriptor);
}
} catch (RocksDBException | IOException re) {
LOG.error("Got exception opening Rocks DB instance ", re);
throw new HoodieException(re);
}
} | 3.68 |
hadoop_UserDefinedValueAggregatorDescriptor_generateKeyValPairs | /**
* Generate a list of aggregation-id/value pairs for the given
* key/value pairs by delegating the invocation to the real object.
*
* @param key
* input key
* @param val
* input value
* @return a list of aggregation id/value pairs. An aggregation id encodes an
* aggregation type which is used to guide the way to aggregate the
* value in the reduce/combiner phrase of an Aggregate based job.
*/
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
if (this.theAggregatorDescriptor != null) {
retv = this.theAggregatorDescriptor.generateKeyValPairs(key, val);
}
return retv;
} | 3.68 |
framework_Button_setHtmlContentAllowed | /**
* Set whether the caption text is rendered as HTML or not. You might need
* to re-theme button to allow higher content than the original text style.
*
* If set to true, the captions are passed to the browser as html and the
* developer is responsible for ensuring no harmful html is used. If set to
* false, the content is passed to the browser as plain text.
*
* @param htmlContentAllowed
* <code>true</code> if caption is rendered as HTML,
* <code>false</code> otherwise
*
* @deprecated as of 8.0.0, use {@link #setCaptionAsHtml(boolean)} instead.
*/
@Deprecated
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
getState().captionAsHtml = htmlContentAllowed;
} | 3.68 |
hmily_ConsulPassiveConfig_fileName | /**
* File name string.
*
* @return the string
*/
public String fileName() {
return key + "." + fileExtension;
} | 3.68 |
framework_StreamResource_getStreamSource | /**
* Returns the source for this <code>StreamResource</code>. StreamSource is
* queried when the resource is about to be streamed to the client.
*
* @return Source of the StreamResource.
*/
public StreamSource getStreamSource() {
return streamSource;
} | 3.68 |
hbase_HFileLink_getReferencedHFileName | /**
* Get the HFile name of the referenced link
* @param fileName HFileLink file name
* @return the name of the referenced HFile
*/
public static String getReferencedHFileName(final String fileName) {
Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName);
if (!m.matches()) {
throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
}
return (m.group(4));
} | 3.68 |
flink_ExecutionConfig_configure | /**
* Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link
* PipelineOptions#CLOSURE_CLEANER_LEVEL}.
*
* <p>It will change the value of a setting only if a corresponding option was set in the {@code
* configuration}. If a key is not present, the current value of a field will remain untouched.
*
* @param configuration a configuration to read the values from
* @param classLoader a class loader to use when loading classes
*/
public void configure(ReadableConfig configuration, ClassLoader classLoader) {
configuration
.getOptional(PipelineOptions.AUTO_TYPE_REGISTRATION)
.ifPresent(this::setAutoTypeRegistration);
configuration
.getOptional(PipelineOptions.AUTO_GENERATE_UIDS)
.ifPresent(this::setAutoGeneratedUids);
configuration
.getOptional(PipelineOptions.AUTO_WATERMARK_INTERVAL)
.ifPresent(this::setAutoWatermarkInterval);
configuration
.getOptional(PipelineOptions.CLOSURE_CLEANER_LEVEL)
.ifPresent(this::setClosureCleanerLevel);
configuration.getOptional(PipelineOptions.FORCE_AVRO).ifPresent(this::setForceAvro);
configuration.getOptional(PipelineOptions.GENERIC_TYPES).ifPresent(this::setGenericTypes);
configuration.getOptional(PipelineOptions.FORCE_KRYO).ifPresent(this::setForceKryo);
configuration
.getOptional(PipelineOptions.GLOBAL_JOB_PARAMETERS)
.ifPresent(this::setGlobalJobParameters);
configuration
.getOptional(MetricOptions.LATENCY_INTERVAL)
.ifPresent(this::setLatencyTrackingInterval);
configuration
.getOptional(StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL)
.ifPresent(this::setPeriodicMaterializeIntervalMillis);
configuration
.getOptional(StateChangelogOptions.MATERIALIZATION_MAX_FAILURES_ALLOWED)
.ifPresent(this::setMaterializationMaxAllowedFailures);
configuration
.getOptional(PipelineOptions.MAX_PARALLELISM)
.ifPresent(this::setMaxParallelism);
configuration.getOptional(CoreOptions.DEFAULT_PARALLELISM).ifPresent(this::setParallelism);
configuration.getOptional(PipelineOptions.OBJECT_REUSE).ifPresent(this::setObjectReuse);
configuration
.getOptional(TaskManagerOptions.TASK_CANCELLATION_INTERVAL)
.ifPresent(this::setTaskCancellationInterval);
configuration
.getOptional(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT)
.ifPresent(this::setTaskCancellationTimeout);
configuration
.getOptional(ExecutionOptions.SNAPSHOT_COMPRESSION)
.ifPresent(this::setUseSnapshotCompression);
RestartStrategies.fromConfiguration(configuration).ifPresent(this::setRestartStrategy);
configuration
.getOptional(PipelineOptions.KRYO_DEFAULT_SERIALIZERS)
.map(s -> parseKryoSerializersWithExceptionHandling(classLoader, s))
.ifPresent(s -> this.defaultKryoSerializerClasses = s);
configuration
.getOptional(PipelineOptions.POJO_REGISTERED_CLASSES)
.map(c -> loadClasses(c, classLoader, "Could not load pojo type to be registered."))
.ifPresent(c -> this.registeredPojoTypes = c);
configuration
.getOptional(PipelineOptions.KRYO_REGISTERED_CLASSES)
.map(c -> loadClasses(c, classLoader, "Could not load kryo type to be registered."))
.ifPresent(c -> this.registeredKryoTypes = c);
configuration
.getOptional(JobManagerOptions.SCHEDULER)
.ifPresent(t -> this.configuration.set(JobManagerOptions.SCHEDULER, t));
} | 3.68 |
flink_ExternalizedSnapshotLocation_validatePath | /**
* Checks the validity of the path's scheme and path.
*
* @param path The path to check.
* @return The URI as a Path.
* @throws IllegalArgumentException Thrown, if the URI misses scheme or path.
*/
private static Path validatePath(Path path) {
if (path == null) {
return null;
}
Optional.ofNullable(path.toUri().getScheme())
.orElseThrow(
() ->
new IllegalArgumentException(
"The scheme (hdfs://, file://, etc) is null. "
+ "Please specify the file system scheme explicitly in the URI."));
Optional.ofNullable(path.getPath())
.orElseThrow(
() ->
new IllegalArgumentException(
"The path to store the checkpoint data in is null. "
+ "Please specify a directory path for the checkpoint data."));
Optional.ofNullable(path.getParent())
.orElseThrow(
() ->
new IllegalArgumentException(
"Cannot use the root directory for checkpoints."));
return path;
} | 3.68 |
framework_ConnectorTracker_addStreamVariable | /**
* Adds a StreamVariable of the given name to the indicated connector.
*
* @param connectorId
* @param variableName
* @param variable
*/
public void addStreamVariable(String connectorId, String variableName,
StreamVariable variable) {
assert getConnector(connectorId) != null;
if (pidToNameToStreamVariable == null) {
pidToNameToStreamVariable = new HashMap<>();
}
Map<String, StreamVariable> nameToStreamVariable = pidToNameToStreamVariable
.get(connectorId);
if (nameToStreamVariable == null) {
nameToStreamVariable = new HashMap<>();
pidToNameToStreamVariable.put(connectorId, nameToStreamVariable);
}
nameToStreamVariable.put(variableName, variable);
if (streamVariableToSeckey == null) {
streamVariableToSeckey = new HashMap<>();
}
String seckey = streamVariableToSeckey.get(variable);
if (seckey == null) {
/*
* Despite section 6 of RFC 4122, this particular use of UUID *is*
* adequate for security capabilities. Type 4 UUIDs contain 122 bits
* of random data, and UUID.randomUUID() is defined to use a
* cryptographically secure random generator.
*/
seckey = UUID.randomUUID().toString();
streamVariableToSeckey.put(variable, seckey);
}
} | 3.68 |
rocketmq-connect_ExpressionBuilder_appendStringQuoted | /**
* Append to this builder's expression a string surrounded by single quote characters ({@code '}).
* Use {@link #appendIdentifier(String, QuoteMethod)} for identifiers,
* {@link #appendColumnName(String, QuoteMethod)} for column names, or
* {@link #appendTableName(String, QuoteMethod)} for table names.
*
* @param name the object whose string representation is to be appended
* @return this builder to enable methods to be chained; never null
*/
public ExpressionBuilder appendStringQuoted(Object name) {
appendStringQuote();
sb.append(name);
appendStringQuote();
return this;
} | 3.68 |
framework_ComboBoxElement_getPopupSuggestionElements | /**
* Gets the elements of all suggestions on the current page.
* <p>
* Opens the popup if not already open.
*
* @return a list of elements for the suggestions on the current page
*/
public List<WebElement> getPopupSuggestionElements() {
List<WebElement> tables = getSuggestionPopup()
.findElements(By.tagName("table"));
if (tables == null || tables.isEmpty()) {
return Collections.emptyList();
}
WebElement table = tables.get(0);
return table.findElements(By.tagName("td"));
} | 3.68 |
hbase_BucketEntry_release | /**
* We've three cases to release refCnt now: <br>
* 1. BucketCache#evictBlock, it will release the backingMap's reference by force because we're
* closing file or clear the bucket cache or some corruption happen. when all rpc references gone,
* then free the area in bucketAllocator. <br>
* 2. BucketCache#returnBlock . when rpc shipped, we'll release the block, only when backingMap
* also release its refCnt (case.1 will do this) and no other rpc reference, then it will free the
* area in bucketAllocator. <br>
* 3.evict those block without any rpc reference if cache size exceeded. we'll only free those
* blocks with zero rpc reference count.
* @return true to indicate we've decreased to zero and do the de-allocation.
*/
@Override
public boolean release() {
return refCnt.release();
} | 3.68 |
hbase_Strings_appendKeyValue | /**
* Append to a StringBuilder a key/value. Uses default separators.
* @param sb StringBuilder to use
* @param key Key to append.
* @param value Value to append.
* @param separator Value to use between key and value.
* @param keyValueSeparator Value to use between key/value sets.
* @return Passed <code>sb</code> populated with key/value.
*/
public static StringBuilder appendKeyValue(final StringBuilder sb, final String key,
final Object value, final String separator, final String keyValueSeparator) {
if (sb.length() > 0) {
sb.append(keyValueSeparator);
}
return sb.append(key).append(separator).append(value);
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_convertPartial | /**
* Create a new {@link GenericRecord} with random values. Not all the fields have value, it is random, and its value is random too.
*
* @param schema Schema to create with.
* @return A {@link GenericRecord} with random value.
*/
protected GenericRecord convertPartial(Schema schema) {
GenericRecord result = new GenericData.Record(schema);
for (Schema.Field f : schema.getFields()) {
if (f.name().equals(DEFAULT_HOODIE_IS_DELETED_COL)) {
result.put(f.name(), false);
} else {
boolean setNull = random.nextBoolean();
if (!setNull) {
result.put(f.name(), typeConvert(f));
} else {
result.put(f.name(), null);
}
}
}
// TODO : pack remaining bytes into a complex field
return result;
} | 3.68 |
flink_ResourceGuard_releaseResource | /**
* Releases access for one client of the guarded resource. This method must only be called after
* a matching call to {@link #acquireResource()}.
*/
private void releaseResource() {
synchronized (lock) {
--leaseCount;
if (closed && leaseCount == 0) {
lock.notifyAll();
}
}
} | 3.68 |
hbase_RegionStates_hasTableRegionStates | // ============================================================================================
// TODO: helpers
// ============================================================================================
public boolean hasTableRegionStates(final TableName tableName) {
// TODO
return !getTableRegionStates(tableName).isEmpty();
} | 3.68 |
dubbo_ApplicationModel_getServiceRepository | /**
* @deprecated Replace to {@link ApplicationModel#getApplicationServiceRepository()}
*/
@Deprecated
public static ServiceRepository getServiceRepository() {
return defaultModel().getApplicationServiceRepository();
} | 3.68 |
hadoop_RegistryTypeUtils_getAddressField | /**
* Get a specific field from an address -raising an exception if
* the field is not present
* @param address address to query
* @param field field to resolve
* @return the resolved value. Guaranteed to be non-null.
* @throws InvalidRecordException if the field did not resolve
*/
public static String getAddressField(Map<String, String> address,
String field) throws InvalidRecordException {
String val = address.get(field);
if (val == null) {
throw new InvalidRecordException("", "Missing address field: " + field);
}
return val;
} | 3.68 |
flink_AsyncSinkBaseBuilder_setMaxRecordSizeInBytes | /**
* @param maxRecordSizeInBytes the maximum size of each records in bytes. If a record larger
* than this is passed to the sink, it will throw an {@code IllegalArgumentException}.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxRecordSizeInBytes(long maxRecordSizeInBytes) {
this.maxRecordSizeInBytes = maxRecordSizeInBytes;
return (ConcreteBuilderT) this;
} | 3.68 |
hadoop_RolloverSignerSecretProvider_startScheduler | /**
* Starts the scheduler for the rollover to run at an interval.
* @param initialDelay The initial delay in the rollover in milliseconds
* @param period The interval for the rollover in milliseconds
*/
protected synchronized void startScheduler(long initialDelay, long period) {
if (!schedulerRunning) {
schedulerRunning = true;
scheduler = Executors.newSingleThreadScheduledExecutor();
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
rollSecret();
}
}, initialDelay, period, TimeUnit.MILLISECONDS);
}
} | 3.68 |
flink_TemporalRowTimeJoinOperator_cleanupState | /**
* The method to be called when a cleanup timer fires.
*
* @param time The timestamp of the fired timer.
*/
@Override
public void cleanupState(long time) {
leftState.clear();
rightState.clear();
nextLeftIndex.clear();
registeredTimer.clear();
} | 3.68 |
flink_KryoUtils_applyRegistrations | /**
* Apply a list of {@link KryoRegistration} to a Kryo instance. The list of registrations is
* assumed to already be a final resolution of all possible registration overwrites.
*
* <p>The registrations are applied in the given order and always specify the registration id,
* using the given {@code firstRegistrationId} and incrementing it for each registration.
*
* @param kryo the Kryo instance to apply the registrations
* @param resolvedRegistrations the registrations, which should already be resolved of all
* possible registration overwrites
* @param firstRegistrationId the first registration id to use
*/
public static void applyRegistrations(
Kryo kryo,
Collection<KryoRegistration> resolvedRegistrations,
int firstRegistrationId) {
int currentRegistrationId = firstRegistrationId;
Serializer<?> serializer;
for (KryoRegistration registration : resolvedRegistrations) {
serializer = registration.getSerializer(kryo);
if (serializer != null) {
kryo.register(registration.getRegisteredClass(), serializer, currentRegistrationId);
} else {
kryo.register(registration.getRegisteredClass(), currentRegistrationId);
}
// if Kryo already had a serializer for that type then it ignores the registration
if (kryo.getRegistration(currentRegistrationId) != null) {
currentRegistrationId++;
}
}
} | 3.68 |
flink_JobExecutionResult_getNetRuntime | /**
* Gets the net execution time of the job, i.e., the execution time in the parallel system,
* without the pre-flight steps like the optimizer in a desired time unit.
*
* @param desiredUnit the unit of the <tt>NetRuntime</tt>
* @return The net execution time in the desired unit.
*/
public long getNetRuntime(TimeUnit desiredUnit) {
return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS);
} | 3.68 |
flink_MessageSerializer_serializeServerFailure | /**
* Serializes the failure message sent to the {@link
* org.apache.flink.queryablestate.network.Client} in case of server related errors.
*
* @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param cause The exception thrown at the server.
* @return The failure message.
*/
public static ByteBuf serializeServerFailure(
final ByteBufAllocator alloc, final Throwable cause) throws IOException {
final ByteBuf buf = alloc.ioBuffer();
// Frame length is set at end
buf.writeInt(0);
writeHeader(buf, MessageType.SERVER_FAILURE);
try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
ObjectOutput out = new ObjectOutputStream(bbos)) {
out.writeObject(cause);
}
// Set frame length
int frameLength = buf.readableBytes() - Integer.BYTES;
buf.setInt(0, frameLength);
return buf;
} | 3.68 |
streampipes_OpcUaUtil_getSchema | /***
* OPC UA specific implementation of
* @param extractor
* @return guess schema
* @throws AdapterException
* @throws ParseException
*/
public static GuessSchema getSchema(IAdapterParameterExtractor extractor)
throws AdapterException, ParseException {
var builder = GuessSchemaBuilder.create();
EventSchema eventSchema = new EventSchema();
Map<String, Object> eventPreview = new HashMap<>();
Map<String, FieldStatusInfo> fieldStatusInfos = new HashMap<>();
List<EventProperty> allProperties = new ArrayList<>();
SpOpcUaClient<OpcUaConfig> spOpcUaClient = new SpOpcUaClient<>(
SpOpcUaConfigExtractor.extractSharedConfig(extractor.getStaticPropertyExtractor(), new OpcUaConfig())
);
try {
spOpcUaClient.connect();
OpcUaNodeBrowser nodeBrowser =
new OpcUaNodeBrowser(spOpcUaClient.getClient(), spOpcUaClient.getSpOpcConfig());
List<OpcNode> selectedNodes = nodeBrowser.findNodes();
if (!selectedNodes.isEmpty()) {
for (OpcNode opcNode : selectedNodes) {
if (opcNode.hasUnitId()) {
allProperties.add(PrimitivePropertyBuilder
.create(opcNode.getType(), opcNode.getLabel())
.label(opcNode.getLabel())
.measurementUnit(new URI(opcNode.getQudtURI()))
.build());
} else {
allProperties.add(PrimitivePropertyBuilder
.create(opcNode.getType(), opcNode.getLabel())
.label(opcNode.getLabel())
.build());
}
}
}
var nodeIds = selectedNodes.stream().map(OpcNode::getNodeId).collect(Collectors.toList());
var response = spOpcUaClient.getClient().readValues(0, TimestampsToReturn.Both, nodeIds);
var returnValues = response.get();
makeEventPreview(selectedNodes, eventPreview, fieldStatusInfos, returnValues);
} catch (Exception e) {
throw new AdapterException("Could not guess schema for opc node: " + e.getMessage(), e);
} finally {
spOpcUaClient.disconnect();
}
eventSchema.setEventProperties(allProperties);
builder.properties(allProperties);
builder.fieldStatusInfos(fieldStatusInfos);
builder.preview(eventPreview);
return builder.build();
} | 3.68 |
morf_ConnectionResourcesBean_setFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming(Integer)
*/
@Override
public void setFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming(Integer fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming) {
this.fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming = fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming;
} | 3.68 |
flink_Types_TUPLE | /**
* Returns type information for typed subclasses of Flink's {@link
* org.apache.flink.api.java.tuple.Tuple}. Typed subclassed are classes that extend {@link
* org.apache.flink.api.java.tuple.Tuple0} till {@link org.apache.flink.api.java.tuple.Tuple25}
* to provide types for all fields and might add additional getters and setters for better
* readability. Additional member fields must not be added. A tuple must not be null.
*
* <p>A tuple is a fixed-length composite type for storing multiple values in a deterministic
* field order. Fields of a tuple are typed. Tuples are the most efficient composite type; a
* tuple does not support null-valued fields unless the type of the field supports nullability.
*
* <p>The generic types for all fields of the tuple can be defined in a hierarchy of subclasses.
*
* <p>If Flink's type analyzer is unable to extract a tuple type information with type
* information for all fields, an {@link
* org.apache.flink.api.common.functions.InvalidTypesException} is thrown.
*
* <p>Example use:
*
* <pre>{@code
* class MyTuple extends Tuple2<Integer, String> {
*
* public int getId() { return f0; }
*
* public String getName() { return f1; }
* }
* }
*
* Types.TUPLE(MyTuple.class)
* </pre>
*
* @param tupleSubclass A subclass of {@link org.apache.flink.api.java.tuple.Tuple0} till {@link
* org.apache.flink.api.java.tuple.Tuple25} that defines all field types and does not add
* any additional fields
*/
public static <T extends Tuple> TypeInformation<T> TUPLE(Class<T> tupleSubclass) {
final TypeInformation<T> ti = TypeExtractor.createTypeInfo(tupleSubclass);
if (ti instanceof TupleTypeInfo) {
return ti;
}
throw new InvalidTypesException("Tuple type expected but was: " + ti);
} | 3.68 |
hbase_MetricsSource_getSizeOfLogQueue | /**
* Get the sizeOfLogQueue
*/
public int getSizeOfLogQueue() {
return singleSourceSource.getSizeOfLogQueue();
} | 3.68 |
dubbo_Bytes_copyOf | /**
* byte array copy.
*
* @param src src.
* @param length new length.
* @return new byte array.
*/
public static byte[] copyOf(byte[] src, int length) {
byte[] dest = new byte[length];
System.arraycopy(src, 0, dest, 0, Math.min(src.length, length));
return dest;
} | 3.68 |
hadoop_MultipartUploaderBuilderImpl_replication | /**
* Set replication factor.
*/
@Override
public B replication(short replica) {
replication = replica;
return getThisBuilder();
} | 3.68 |
morf_AddTableFrom_getSelectStatement | /**
* @return the {@link SelectStatement} to be used.
*/
public SelectStatement getSelectStatement() {
return selectStatement;
} | 3.68 |
pulsar_TransactionMetadataStoreProvider_newProvider | /**
* Construct a provider from the provided class.
*
* @param providerClassName the provider class name.
* @return an instance of transaction metadata store provider.
*/
static TransactionMetadataStoreProvider newProvider(String providerClassName) throws IOException {
Class<?> providerClass;
try {
providerClass = Class.forName(providerClassName);
Object obj = providerClass.getDeclaredConstructor().newInstance();
checkArgument(obj instanceof TransactionMetadataStoreProvider,
"The factory has to be an instance of "
+ TransactionMetadataStoreProvider.class.getName());
return (TransactionMetadataStoreProvider) obj;
} catch (Exception e) {
throw new IOException(e);
}
} | 3.68 |
hadoop_StoreContext_getAuditor | /**
* Get the auditor.
* @return auditor.
*/
public AuditSpanSource<AuditSpanS3A> getAuditor() {
return auditor;
} | 3.68 |
rocketmq-connect_WorkerDirectTask_pause | /**
* Pause consumption of messages from the specified partition.
*
* @param partitions the partition list to be reset offset.
*/
@Override
public void pause(List<RecordPartition> partitions) {
// no-op
} | 3.68 |
framework_SystemError_getHtmlMessage | /**
* Returns the message of the error in HTML.
*
* Note that this API may change in future versions.
*/
protected String getHtmlMessage() {
// TODO wrapping div with namespace? See the old code:
// target.addXMLSection("div", message,
// "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd");
StringBuilder sb = new StringBuilder();
if (getMessage() != null) {
sb.append("<h2>");
sb.append(VaadinServlet.safeEscapeForHtml(getMessage()));
sb.append("</h2>");
}
return sb.toString();
} | 3.68 |
morf_OracleMetaDataProvider_viewNames | /**
* @see org.alfasoftware.morf.metadata.Schema#viewNames()
*/
@Override
public Collection<String> viewNames() {
return viewMap().keySet();
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_getCharacter | /**
* Gets character.
*
* @return the character
*/
public char getCharacter() {
return (char) this.character;
} | 3.68 |
framework_VScrollTable_getNavigationStartKey | /**
* Get the key the moves the selection to the beginning of the table. By
* default this is the Home key but by overriding this you can change the
* key to whatever you want.
*
* @return
*/
protected int getNavigationStartKey() {
return KeyCodes.KEY_HOME;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_getHTTPScheme | /**
* Get the appropriate return the appropriate scheme for communicating with
* Azure depending on whether wasb or wasbs is specified in the target URI.
*
* return scheme - HTTPS or HTTP as appropriate.
*/
private String getHTTPScheme() {
String sessionScheme = sessionUri.getScheme();
// Check if we're on a secure URI scheme: wasbs or the legacy asvs scheme.
if (sessionScheme != null
&& (sessionScheme.equalsIgnoreCase("asvs")
|| sessionScheme.equalsIgnoreCase("wasbs"))) {
return HTTPS_SCHEME;
} else {
// At this point the scheme should be either null or asv or wasb.
// Intentionally I'm not going to validate it though since I don't feel
// it's this method's job to ensure a valid URI scheme for this file
// system.
return HTTP_SCHEME;
}
} | 3.68 |
framework_Navigator_getViewClass | /**
* Get the view class for this provider.
*
* @return {@link View} class
*/
public Class<? extends View> getViewClass() {
return viewClass;
} | 3.68 |
framework_ValueContext_getHasValue | /**
* Returns an {@code Optional} for the {@code HasValue} used in the value
* conversion. In certain complicated cases, ex. cross-field validation,
* HasValue might be not available.
*
* @return the optional of {@code HasValue}
* @since 8.1
*/
public Optional<HasValue<?>> getHasValue() {
return Optional.ofNullable(hasValue);
} | 3.68 |
hmily_RepositoryPathUtils_buildRedisKey | /**
* Build redis key string.
*
* @param keyPrefix the key prefix
* @param id the id
* @return the string
*/
public static String buildRedisKey(final String keyPrefix, final String id) {
return String.join(":", keyPrefix, id);
} | 3.68 |
hbase_JmxCacheBuster_restart | /**
* Restarts the stopped service.
* @see #stop()
*/
public static void restart() {
stopped.set(false);
} | 3.68 |
framework_VTree_setText | /** For internal use only. May be removed or replaced in the future. */
public void setText(String text) {
DOM.setInnerText(nodeCaptionSpan, text);
} | 3.68 |
flink_CheckpointConfig_getMaxConcurrentCheckpoints | /**
* Gets the maximum number of checkpoint attempts that may be in progress at the same time. If
* this value is <i>n</i>, then no checkpoints will be triggered while <i>n</i> checkpoint
* attempts are currently in flight. For the next checkpoint to be triggered, one checkpoint
* attempt would need to finish or expire.
*
* @return The maximum number of concurrent checkpoint attempts.
*/
public int getMaxConcurrentCheckpoints() {
return configuration.get(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS);
} | 3.68 |
framework_DataCommunicator_beforeClientResponse | /**
* Initially and in the case of a reset all data should be pushed to the
* client.
*/
@Override
public void beforeClientResponse(boolean initial) {
super.beforeClientResponse(initial);
if (initial && getPushRows().isEmpty()) {
// Make sure rows are pushed when component is attached.
setPushRows(Range.withLength(0, getMinPushSize()));
}
sendDataToClient(initial);
} | 3.68 |
hbase_FileLink_tryOpen | /**
* Try to open the file from one of the available locations.
* @return FSDataInputStream stream of the opened file link
* @throws IOException on unexpected error, or file not found.
*/
private FSDataInputStream tryOpen() throws IOException {
IOException exception = null;
for (Path path : fileLink.getLocations()) {
if (path.equals(currentPath)) continue;
try {
in = fs.open(path, bufferSize);
if (pos != 0) in.seek(pos);
assert (in.getPos() == pos) : "Link unable to seek to the right position=" + pos;
if (LOG.isTraceEnabled()) {
if (currentPath == null) {
LOG.debug("link open path=" + path);
} else {
LOG.trace("link switch from path=" + currentPath + " to path=" + path);
}
}
currentPath = path;
return (in);
} catch (FileNotFoundException | AccessControlException | RemoteException e) {
exception = FileLink.handleAccessLocationException(fileLink, e, exception);
}
}
throw exception;
} | 3.68 |
framework_BeanItemContainer_addItemAt | /**
* Adds a new bean at the given index.
*
* The bean is used both as the item contents and as the item identifier.
*
* @param index
* Index at which the bean should be added.
* @param newItemId
* The bean to add to the container.
* @return Returns the new BeanItem or null if the operation fails.
*/
@Override
@SuppressWarnings("unchecked")
public BeanItem<BEANTYPE> addItemAt(int index, Object newItemId)
throws IllegalArgumentException {
return super.addBeanAt(index, (BEANTYPE) newItemId);
} | 3.68 |
morf_DatabaseMetaDataProvider_setColumnNullability | /**
* Sets column nullability from a result set.
*
* @param tableName Name of the table.
* @param column Column builder to set to.
* @param columnResultSet Result set to be read.
* @return Resulting column builder.
* @throws SQLException Upon errors.
*/
@SuppressWarnings("unused")
protected ColumnBuilder setColumnNullability(RealName tableName, ColumnBuilder column, ResultSet columnResultSet) throws SQLException {
boolean nullable = "YES".equals(columnResultSet.getString(COLUMN_IS_NULLABLE));
return nullable ? column.nullable() : column;
} | 3.68 |
Activiti_FlowNodeActivityBehavior_leave | /**
* Default way of leaving a BPMN 2.0 activity: evaluate the conditions on the outgoing sequence flow and take those that evaluate to true.
*/
public void leave(DelegateExecution execution) {
bpmnActivityBehavior.performDefaultOutgoingBehavior((ExecutionEntity) execution);
} | 3.68 |
hadoop_User_setLogin | /**
* Set the login object
* @param login
*/
public void setLogin(LoginContext login) {
this.login = login;
} | 3.68 |
hbase_ServerManager_removeDeletedRegionFromLoadedFlushedSequenceIds | /**
* Regions may have been removed between latest persist of FlushedSequenceIds and master abort. So
* after loading FlushedSequenceIds from file, and after meta loaded, we need to remove the
* deleted region according to RegionStates.
*/
public void removeDeletedRegionFromLoadedFlushedSequenceIds() {
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
Iterator<byte[]> it = flushedSequenceIdByRegion.keySet().iterator();
while (it.hasNext()) {
byte[] regionEncodedName = it.next();
if (regionStates.getRegionState(Bytes.toStringBinary(regionEncodedName)) == null) {
it.remove();
storeFlushedSequenceIdsByRegion.remove(regionEncodedName);
}
}
} | 3.68 |
hbase_Bytes_random | /**
* Fill given array with random bytes at the specified position.
* <p>
* If you want random bytes generated by a strong source of randomness use
* {@link Bytes#secureRandom(byte[], int, int)}.
* @param b array which needs to be filled with random bytes
* @param offset staring offset in array
* @param length number of bytes to fill
*/
public static void random(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset");
checkArgument(length > 0, "length must be greater than 0");
checkPositionIndex(offset + length, b.length, "offset + length");
byte[] buf = new byte[length];
RNG.nextBytes(buf);
System.arraycopy(buf, 0, b, offset, length);
} | 3.68 |
framework_AbstractDateField_setValue | /**
* Sets the value of this object. If the new value is not equal to
* {@code getValue()}, fires a {@link ValueChangeEvent} .
*
* @param value
* the new value, may be {@code null}
* @throws IllegalArgumentException
* if the value is not within range bounds
*/
@Override
public void setValue(T value) {
T adjusted = adjustToResolution(value, getResolution());
RangeValidator<T> validator = getRangeValidator();
ValidationResult result = validator.apply(adjusted,
new ValueContext(this, this));
if (result.isError()) {
throw new IllegalArgumentException(
"value is not within acceptable range");
} else {
currentErrorMessage = null;
/*
* First handle special case when the client side component has a
* date string but value is null (e.g. unparsable date string typed
* in by the user). No value changes should happen, but we need to
* do some internal housekeeping.
*/
if (adjusted == null && !getState(false).parsable) {
/*
* Side-effects of doSetValue clears possible previous strings
* and flags about invalid input.
*/
doSetValue(null);
markAsDirty();
return;
}
super.setValue(adjusted);
}
} | 3.68 |
framework_MonthEventLabel_isTimeSpecificEvent | /**
* Is the event bound to a specific time.
*
* @return
*/
public boolean isTimeSpecificEvent() {
return timeSpecificEvent;
} | 3.68 |
druid_DruidDriver_getDataSource | /**
* 参数定义: com.alibaba.druid.log.LogFilter=filter com.alibaba.druid.log.LogFilter.p1=prop-value
* com.alibaba.druid.log.LogFilter.p2=prop-value
*
* @param url
* @return
* @throws SQLException
*/
private DataSourceProxyImpl getDataSource(String url, Properties info) throws SQLException {
DataSourceProxyImpl dataSource = proxyDataSources.get(url);
if (dataSource == null) {
DataSourceProxyConfig config = parseConfig(url, info);
Driver rawDriver = createDriver(config.getRawDriverClassName());
DataSourceProxyImpl newDataSource = new DataSourceProxyImpl(rawDriver, config);
{
String property = System.getProperty("druid.filters");
if (property != null && property.length() > 0) {
for (String filterItem : property.split(",")) {
FilterManager.loadFilter(config.getFilters(), filterItem);
}
}
}
{
int dataSourceId = createDataSourceId();
newDataSource.setId(dataSourceId);
for (Filter filter : config.getFilters()) {
filter.init(newDataSource);
}
}
DataSourceProxy oldDataSource = proxyDataSources.putIfAbsent(url, newDataSource);
if (oldDataSource == null) {
if (config.isJmxOption()) {
JMXUtils.register("com.alibaba.druid:type=JdbcStat", JdbcStatManager.getInstance());
}
}
dataSource = proxyDataSources.get(url);
}
return dataSource;
} | 3.68 |
hadoop_AMRMClientRelayerMetrics_getInstance | /**
* Initialize the singleton instance.
*
* @return the singleton
*/
public static AMRMClientRelayerMetrics getInstance() {
if (!isInitialized.get()) {
synchronized (AMRMClientRelayerMetrics.class) {
if (instance == null) {
instance = new AMRMClientRelayerMetrics();
DefaultMetricsSystem.instance().register(RECORD_INFO.name(),
RECORD_INFO.description(), instance);
isInitialized.set(true);
}
}
}
return instance;
} | 3.68 |
hadoop_AllocateResponse_updatedContainers | /**
* Set the <code>updatedContainers</code> of the response.
* @see AllocateResponse#setUpdatedContainers(List)
* @param updatedContainers <code>updatedContainers</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder updatedContainers(
List<UpdatedContainer> updatedContainers) {
allocateResponse.setUpdatedContainers(updatedContainers);
return this;
} | 3.68 |
framework_FocusUtil_getTabIndex | /**
* Gets the widget's position in the tab index.
*
* @param focusable
* The widget
*
* @return the widget's tab index
*/
public static int getTabIndex(Widget focusable) {
assert (focusable != null && focusable
.getElement() != null) : "Can't getTabIndex for a widget without an element";
return focusable.getElement().getTabIndex();
} | 3.68 |
dubbo_FrameworkModel_defaultModel | /**
* During destroying the default FrameworkModel, the FrameworkModel.defaultModel() or ApplicationModel.defaultModel()
* will return a broken model, maybe cause unpredictable problem.
* Recommendation: Avoid using the default model as much as possible.
* @return the global default FrameworkModel
*/
public static FrameworkModel defaultModel() {
FrameworkModel instance = defaultInstance;
if (instance == null) {
synchronized (globalLock) {
resetDefaultFrameworkModel();
if (defaultInstance == null) {
defaultInstance = new FrameworkModel();
}
instance = defaultInstance;
}
}
Assert.notNull(instance, "Default FrameworkModel is null");
return instance;
} | 3.68 |
hudi_ZeroToOneUpgradeHandler_getFileNameForMarkerFromLogFile | /**
* Curates file name for marker from existing log file path.
* log file format : partitionpath/.fileid_baseInstant.log.writetoken
* marker file format : partitionpath/fileId_writetoken_baseinstant.basefileExtn.marker.APPEND
*
* @param logFilePath log file path for which marker file name needs to be generated.
* @param table {@link HoodieTable} instance to use
* @return the marker file name thus curated.
*/
private static String getFileNameForMarkerFromLogFile(String logFilePath, HoodieTable<?, ?, ?, ?> table) {
Path logPath = new Path(table.getMetaClient().getBasePath(), logFilePath);
String fileId = FSUtils.getFileIdFromLogPath(logPath);
String deltaInstant = FSUtils.getDeltaCommitTimeFromLogPath(logPath);
String writeToken = FSUtils.getWriteTokenFromLogPath(logPath);
return FSUtils.makeBaseFileName(deltaInstant, writeToken, fileId, table.getBaseFileExtension());
} | 3.68 |
hbase_MasterObserver_preAddReplicationPeer | /**
* Called before add a replication peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication peer
*/
default void preAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId, ReplicationPeerConfig peerConfig) throws IOException {
} | 3.68 |
AreaShop_WorldGuardHandler6_buildDomain | /**
* Build a DefaultDomain from a RegionAccessSet.
* @param regionAccessSet RegionAccessSet to read
* @return DefaultDomain containing the entities from the RegionAccessSet
*/
private DefaultDomain buildDomain(RegionAccessSet regionAccessSet) {
DefaultDomain owners = new DefaultDomain();
for(String playerName : regionAccessSet.getPlayerNames()) {
owners.addPlayer(playerName);
}
for(UUID uuid : regionAccessSet.getPlayerUniqueIds()) {
owners.addPlayer(uuid);
}
for(String group : regionAccessSet.getGroupNames()) {
owners.addGroup(group);
}
return owners;
} | 3.68 |
hadoop_AppIdKeyConverter_encode | /*
* (non-Javadoc)
*
* Converts/encodes a string app Id into a byte representation for (row) keys.
* For conversion, we extract cluster timestamp and sequence id from the
* string app id (calls ConverterUtils#toApplicationId(String) for
* conversion) and then store it in a byte array of length 12 (8 bytes (long)
* for cluster timestamp followed 4 bytes(int) for sequence id). Both cluster
* timestamp and sequence id are inverted so that the most recent cluster
* timestamp and highest sequence id appears first in the table (i.e.
* application id appears in a descending order).
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
* #encode(java.lang.Object)
*/
@Override
public byte[] encode(String appIdStr) {
ApplicationId appId = ApplicationId.fromString(appIdStr);
byte[] appIdBytes = new byte[getKeySize()];
byte[] clusterTs = Bytes.toBytes(
LongConverter.invertLong(appId.getClusterTimestamp()));
System.arraycopy(clusterTs, 0, appIdBytes, 0, Bytes.SIZEOF_LONG);
byte[] seqId = Bytes.toBytes(
HBaseTimelineSchemaUtils.invertInt(appId.getId()));
System.arraycopy(seqId, 0, appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT);
return appIdBytes;
} | 3.68 |
graphhopper_Helper_keepIn | /**
* This methods returns the value or min if too small or max if too big.
*/
public static double keepIn(double value, double min, double max) {
return Math.max(min, Math.min(value, max));
} | 3.68 |
flink_AllocatedSlot_hashCode | /** This always returns a reference hash code. */
@Override
public final int hashCode() {
return super.hashCode();
} | 3.68 |
flink_InPlaceMutableHashTable_insertAfterNoMatch | /**
* This method can be called after getMatchFor returned null. It inserts the given record to
* the hash table. Important: The given record should have the same key as the record that
* was given to getMatchFor! WARNING; Don't do any modifications to the table between
* getMatchFor and insertAfterNoMatch!
*
* @throws IOException (EOFException specifically, if memory ran out)
*/
public void insertAfterNoMatch(T record) throws IOException {
if (closed) {
return;
}
// create new link
long pointerToAppended;
try {
pointerToAppended = recordArea.appendPointerAndRecord(END_OF_LIST, record);
} catch (EOFException ex) {
compactOrThrow();
insert(record);
return;
}
// add new link to the end of the list
if (prevElemPtr == INVALID_PREV_POINTER) {
// list was empty
bucketSegments[bucketSegmentIndex].putLong(bucketOffset, pointerToAppended);
} else {
// update the pointer of the last element of the list.
recordArea.overwritePointerAt(prevElemPtr, pointerToAppended);
}
numElements++;
resizeTableIfNecessary();
} | 3.68 |
morf_Function_average | /**
* Helper method to create an instance of the "average" SQL function.
*
* @param field the field to evaluate in the average function.
* @return an instance of a average function.
*/
public static Function average(AliasedField field) {
return new Function(FunctionType.AVERAGE, field);
} | 3.68 |
hadoop_JobMetaData_getResourceSkyline | /**
* Get {@link ResourceSkyline}.
*
* @return {@link ResourceSkyline}.
*/
public final ResourceSkyline getResourceSkyline() {
return resourceSkyline;
} | 3.68 |
flink_LongHashPartition_get | /** Returns an iterator for all the values for the given key, or null if no value found. */
public MatchIterator get(long key, int hashCode) {
int bucket = findBucket(hashCode);
int bucketOffset = bucket << 4;
MemorySegment segment = buckets[bucketOffset >>> segmentSizeBits];
int segOffset = bucketOffset & segmentSizeMask;
while (true) {
long address = segment.getLong(segOffset + 8);
if (address != INVALID_ADDRESS) {
if (segment.getLong(segOffset) == key) {
return valueIter(address);
} else {
bucket = (bucket + 1) & numBucketsMask;
if (segOffset + 16 < segmentSize) {
segOffset += 16;
} else {
bucketOffset = bucket << 4;
segOffset = bucketOffset & segmentSizeMask;
segment = buckets[bucketOffset >>> segmentSizeBits];
}
}
} else {
return valueIter(INVALID_ADDRESS);
}
}
} | 3.68 |
zxing_ISBNResultParser_parse | /**
* See <a href="http://www.bisg.org/isbn-13/for.dummies.html">ISBN-13 For Dummies</a>
*/
@Override
public ISBNParsedResult parse(Result result) {
BarcodeFormat format = result.getBarcodeFormat();
if (format != BarcodeFormat.EAN_13) {
return null;
}
String rawText = getMassagedText(result);
int length = rawText.length();
if (length != 13) {
return null;
}
if (!rawText.startsWith("978") && !rawText.startsWith("979")) {
return null;
}
return new ISBNParsedResult(rawText);
} | 3.68 |
zxing_BitMatrix_xor | /**
* Exclusive-or (XOR): Flip the bit in this {@code BitMatrix} if the corresponding
* mask bit is set.
*
* @param mask XOR mask
*/
public void xor(BitMatrix mask) {
if (width != mask.width || height != mask.height || rowSize != mask.rowSize) {
throw new IllegalArgumentException("input matrix dimensions do not match");
}
BitArray rowArray = new BitArray(width);
for (int y = 0; y < height; y++) {
int offset = y * rowSize;
int[] row = mask.getRow(y, rowArray).getBitArray();
for (int x = 0; x < rowSize; x++) {
bits[offset + x] ^= row[x];
}
}
} | 3.68 |
hbase_FavoredNodeLoadBalancer_generateFavoredNodesForMergedRegion | /*
* Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep
* it simple.
*/
@Override
public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents)
throws IOException {
Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
regionFNMap.put(merged, getFavoredNodes(mergeParents[0]));
fnm.updateFavoredNodes(regionFNMap);
} | 3.68 |
flink_CliFrontend_getConfiguration | /**
* Getter which returns a copy of the associated configuration.
*
* @return Copy of the associated configuration
*/
public Configuration getConfiguration() {
Configuration copiedConfiguration = new Configuration();
copiedConfiguration.addAll(configuration);
return copiedConfiguration;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.