name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_RichTextArea_setValue | /**
* Sets the value of this object. If the new value is not equal to
* {@code getValue()}, fires a {@link ValueChangeEvent}. Throws
* {@code NullPointerException} if the value is null.
*
* @param value
* the new value, not {@code null}
* @throws NullPointerException
* if {@code value} is {@code null}
*/
@Override
public void setValue(String value) {
Objects.requireNonNull(value, "value cannot be null");
setValue(value, false);
} | 3.68 |
graphhopper_VectorTile_getTagsCount | /**
* <pre>
* Tags of this feature are encoded as repeated pairs of
* integers.
* A detailed description of tags is located in sections
* 4.2 and 4.4 of the specification
* </pre>
*
* <code>repeated uint32 tags = 2 [packed = true];</code>
*/
public int getTagsCount() {
return tags_.size();
} | 3.68 |
hbase_ScannerContext_setSizeLimitScope | /**
* @param scope The scope in which the size limit will be enforced
*/
void setSizeLimitScope(LimitScope scope) {
limits.setSizeScope(scope);
} | 3.68 |
hbase_AccessControlUtil_revoke | /**
* A utility used to revoke a user's namespace permissions.
* <p>
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param userShortName the short name of the user to revoke permissions
* @param namespace optional table name
* @param actions the permissions to be revoked
* @throws ServiceException on failure
* @deprecated Use {@link Admin#revoke(UserPermission)} instead.
*/
@Deprecated
public static void revoke(RpcController controller,
AccessControlService.BlockingInterface protocol, String userShortName, String namespace,
Permission.Action... actions) throws ServiceException {
List<AccessControlProtos.Permission.Action> permActions =
Lists.newArrayListWithCapacity(actions.length);
for (Permission.Action a : actions) {
permActions.add(toPermissionAction(a));
}
AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, namespace,
permActions.toArray(new AccessControlProtos.Permission.Action[actions.length]));
protocol.revoke(controller, request);
} | 3.68 |
hbase_MultiTableSnapshotInputFormatImpl_getSplits | /**
* Return the list of splits extracted from the scans/snapshots pushed to conf by
* {@link #setInput(Configuration, Map, Path)}
* @param conf Configuration to determine splits from
* @return Return the list of splits extracted from the scans/snapshots pushed to conf
*/
public List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf)
throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
List<TableSnapshotInputFormatImpl.InputSplit> rtn = Lists.newArrayList();
Map<String, Collection<Scan>> snapshotsToScans = getSnapshotsToScans(conf);
Map<String, Path> snapshotsToRestoreDirs = getSnapshotDirs(conf);
for (Map.Entry<String, Collection<Scan>> entry : snapshotsToScans.entrySet()) {
String snapshotName = entry.getKey();
Path restoreDir = snapshotsToRestoreDirs.get(snapshotName);
SnapshotManifest manifest =
TableSnapshotInputFormatImpl.getSnapshotManifest(conf, snapshotName, rootDir, fs);
List<RegionInfo> regionInfos =
TableSnapshotInputFormatImpl.getRegionInfosFromManifest(manifest);
for (Scan scan : entry.getValue()) {
List<TableSnapshotInputFormatImpl.InputSplit> splits =
TableSnapshotInputFormatImpl.getSplits(scan, manifest, regionInfos, restoreDir, conf);
rtn.addAll(splits);
}
}
return rtn;
} | 3.68 |
hadoop_ReservationListRequest_newInstance | /**
* The {@link ReservationListRequest} will use the reservationId to search for
* reservations to list if it is provided. Otherwise, it will select active
* reservations within the startTime and endTime (inclusive).
*
* @param queue Required. Cannot be null or empty. Refers to the reservable
* queue in the scheduler that was selected when creating a
* reservation submission {@link ReservationSubmissionRequest}.
* @param reservationId Optional. String representation of
* {@code ReservationId} If provided, other fields will
* be ignored.
* @return the list of reservations via {@link ReservationListRequest}
*/
@Public
@Unstable
public static ReservationListRequest newInstance(
String queue, String reservationId) {
return newInstance(queue, reservationId, -1, -1, false);
} | 3.68 |
flink_SlotManagerUtils_generateDefaultSlotResourceProfile | /**
* This must be consist with {@link
* org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils#generateDefaultSlotResourceProfile}.
*/
public static ResourceProfile generateDefaultSlotResourceProfile(
ResourceProfile resourceProfile, int numSlotsPerWorker) {
final ResourceProfile.Builder resourceProfileBuilder =
ResourceProfile.newBuilder()
.setCpuCores(resourceProfile.getCpuCores().divide(numSlotsPerWorker))
.setTaskHeapMemory(
resourceProfile.getTaskHeapMemory().divide(numSlotsPerWorker))
.setTaskOffHeapMemory(
resourceProfile.getTaskOffHeapMemory().divide(numSlotsPerWorker))
.setManagedMemory(
resourceProfile.getManagedMemory().divide(numSlotsPerWorker))
.setNetworkMemory(
resourceProfile.getNetworkMemory().divide(numSlotsPerWorker));
resourceProfile
.getExtendedResources()
.forEach(
(name, resource) ->
resourceProfileBuilder.setExtendedResource(
resource.divide(numSlotsPerWorker)));
return resourceProfileBuilder.build();
} | 3.68 |
hbase_OrderedBytes_encodeNumericSmall | /**
* <p>
* Encode the small magnitude floating point number {@code val} using the key encoding. The caller
* guarantees that 1.0 > abs(val) > 0.0.
* </p>
* <p>
* A floating point value is encoded as an integer exponent {@code E} and a mantissa {@code M}.
* The original value is equal to {@code (M * 100^E)}. {@code E} is set to the smallest value
* possible without making {@code M} greater than or equal to 1.0.
* </p>
* <p>
* For this routine, {@code E} will always be zero or negative, since the original value is less
* than one. The encoding written by this routine is the ones-complement of the varint of the
* negative of {@code E} followed by the mantissa:
*
* <pre>
* Encoding: ~-E M
* </pre>
* </p>
* @param dst The destination to which encoded digits are written.
* @param val The value to encode.
* @return the number of bytes written.
*/
private static int encodeNumericSmall(PositionedByteRange dst, BigDecimal val) {
// TODO: this can be done faster?
// assert 1.0 > abs(val) > 0.0
BigDecimal abs = val.abs();
assert BigDecimal.ZERO.compareTo(abs) < 0 && BigDecimal.ONE.compareTo(abs) > 0;
byte[] a = dst.getBytes();
boolean isNeg = val.signum() == -1;
final int offset = dst.getOffset(), start = dst.getPosition();
if (isNeg) { /* Small negative number: 0x14, -E, ~M */
dst.put(NEG_SMALL);
} else { /* Small positive number: 0x16, ~-E, M */
dst.put(POS_SMALL);
}
// normalize abs(val) to determine E
int zerosBeforeFirstNonZero = abs.scale() - abs.precision();
int lengthToMoveRight =
zerosBeforeFirstNonZero % 2 == 0 ? zerosBeforeFirstNonZero : zerosBeforeFirstNonZero - 1;
int e = lengthToMoveRight / 2;
abs = abs.movePointRight(lengthToMoveRight);
putVaruint64(dst, e, !isNeg); // encode appropriate E value.
// encode M by peeling off centimal digits, encoding x as 2x+1
int startM = dst.getPosition();
encodeToCentimal(dst, abs);
// terminal digit should be 2x
a[offset + dst.getPosition() - 1] = (byte) (a[offset + dst.getPosition() - 1] & 0xfe);
if (isNeg) {
// negative values encoded as ~M
DESCENDING.apply(a, offset + startM, dst.getPosition() - startM);
}
return dst.getPosition() - start;
} | 3.68 |
flink_StateBootstrapTransformation_getMaxParallelism | /** @return The max parallelism for this operator. */
int getMaxParallelism(int globalMaxParallelism) {
return operatorMaxParallelism.orElse(globalMaxParallelism);
} | 3.68 |
flink_ExceptionUtils_isDirectOutOfMemoryError | /**
* Checks whether the given exception indicates a JVM direct out-of-memory error.
*
* @param t The exception to check.
* @return True, if the exception is the direct {@link OutOfMemoryError}, false otherwise.
*/
public static boolean isDirectOutOfMemoryError(@Nullable Throwable t) {
return isOutOfMemoryErrorWithMessageContaining(t, "Direct buffer memory");
} | 3.68 |
hbase_HRegion_onConfigurationChange | /**
* {@inheritDoc}
*/
@Override
public void onConfigurationChange(Configuration conf) {
this.storeHotnessProtector.update(conf);
// update coprocessorHost if the configuration has changed.
if (
CoprocessorConfigurationUtil.checkConfigurationChange(getReadOnlyConfiguration(), conf,
CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY)
) {
LOG.info("Update the system coprocessors because the configuration has changed");
decorateRegionConfiguration(conf);
this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, conf);
}
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableBlockCacheCapacity | /** Returns block cache capacity. */
public void enableBlockCacheCapacity() {
this.properties.add(RocksDBProperty.BlockCacheCapacity.getRocksDBProperty());
} | 3.68 |
hbase_MasterObserver_postListSnapshot | /**
* Called after listSnapshots request has been processed.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor of the snapshot to list
*/
default void postListSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot) throws IOException {
} | 3.68 |
flink_ExecutionConfig_getRegisteredKryoTypes | /** Returns the registered Kryo types. */
public LinkedHashSet<Class<?>> getRegisteredKryoTypes() {
if (isForceKryoEnabled()) {
// if we force kryo, we must also return all the types that
// were previously only registered as POJO
LinkedHashSet<Class<?>> result = new LinkedHashSet<>();
result.addAll(registeredKryoTypes);
for (Class<?> t : registeredPojoTypes) {
if (!result.contains(t)) {
result.add(t);
}
}
return result;
} else {
return registeredKryoTypes;
}
} | 3.68 |
flink_CheckpointedInputGate_getCheckpointStartDelayNanos | /**
* @return the time that elapsed, in nanoseconds, between the creation of the latest checkpoint
* and the time when it's first {@link CheckpointBarrier} was received by this {@link
* InputGate}.
*/
@VisibleForTesting
long getCheckpointStartDelayNanos() {
return barrierHandler.getCheckpointStartDelayNanos();
} | 3.68 |
framework_AbstractMedia_setHtmlContentAllowed | /**
* Set whether the alternative text ({@link #setAltText(String)}) is
* rendered as HTML or not.
*
* @param htmlContentAllowed
*/
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
getState().htmlContentAllowed = htmlContentAllowed;
} | 3.68 |
flink_PythonEnvironmentManagerUtils_pipInstallRequirements | /**
* Installs the 3rd party libraries listed in the user-provided requirements file. An optional
* requirements cached directory can be provided to support offline installation. In order not
* to populate the public environment, the libraries will be installed to the specified
* directory, and added to the PYTHONPATH of the UDF workers.
*
* @param requirementsFilePath The path of the requirements file.
* @param requirementsCacheDir The path of the requirements cached directory.
* @param requirementsInstallDir The target directory of the installation.
* @param pythonExecutable The python interpreter used to launch the pip program.
* @param environmentVariables The environment variables used to launch the pip program.
*/
public static void pipInstallRequirements(
String requirementsFilePath,
@Nullable String requirementsCacheDir,
String requirementsInstallDir,
String pythonExecutable,
Map<String, String> environmentVariables)
throws IOException {
String sitePackagesPath =
getSitePackagesPath(requirementsInstallDir, pythonExecutable, environmentVariables);
String path = String.join(File.pathSeparator, requirementsInstallDir, "bin");
appendToEnvironmentVariable("PYTHONPATH", sitePackagesPath, environmentVariables);
appendToEnvironmentVariable("PATH", path, environmentVariables);
List<String> commands =
new ArrayList<>(
Arrays.asList(
pythonExecutable,
"-m",
"pip",
"install",
"--ignore-installed",
"-r",
requirementsFilePath,
"--prefix",
requirementsInstallDir));
if (requirementsCacheDir != null) {
commands.addAll(Arrays.asList("--no-index", "--find-links", requirementsCacheDir));
}
int retries = 0;
while (true) {
try {
execute(commands.toArray(new String[0]), environmentVariables, true);
break;
} catch (Throwable t) {
retries++;
if (retries < MAX_RETRY_TIMES) {
LOG.warn(
String.format(
"Pip install failed, retrying... (%d/%d)",
retries, MAX_RETRY_TIMES),
t);
} else {
LOG.error(
String.format(
"Pip install failed, already retried %d time...", retries));
throw new IOException(t);
}
}
}
} | 3.68 |
hbase_AbstractWALProvider_getSyncReplicationPeerIdFromWALName | /**
* <p>
* Returns the peer id if the wal file name is in the special group for a sync replication peer.
* </p>
* <p>
* The prefix format is <factoryId>-<ts>-<peerId>.
* </p>
*/
public static Optional<String> getSyncReplicationPeerIdFromWALName(String name) {
if (!name.endsWith(ReplicationUtils.SYNC_WAL_SUFFIX)) {
// fast path to return earlier if the name is not for a sync replication peer.
return Optional.empty();
}
String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(name);
Matcher matcher = LOG_PREFIX_PATTERN.matcher(logPrefix);
if (matcher.matches()) {
return Optional.of(matcher.group(1));
} else {
return Optional.empty();
}
} | 3.68 |
hadoop_SharedKeyCredentials_getCanonicalizedResource | /**
* Gets the canonicalized resource string for a Blob or Queue service request under the Shared Key Lite
* authentication scheme.
*
* @param address the resource URI.
* @param accountName the account name for the request.
* @return the canonicalized resource string.
*/
private static String getCanonicalizedResource(final URL address,
final String accountName) throws UnsupportedEncodingException {
// Resource path
final StringBuilder resourcepath = new StringBuilder(AbfsHttpConstants.FORWARD_SLASH);
resourcepath.append(accountName);
// Note that AbsolutePath starts with a '/'.
resourcepath.append(address.getPath());
final StringBuilder canonicalizedResource = new StringBuilder(resourcepath.toString());
// query parameters
if (address.getQuery() == null || !address.getQuery().contains(AbfsHttpConstants.EQUAL)) {
//no query params.
return canonicalizedResource.toString();
}
final Map<String, String[]> queryVariables = parseQueryString(address.getQuery());
final Map<String, String> lowercasedKeyNameValue = new HashMap<>();
for (final Entry<String, String[]> entry : queryVariables.entrySet()) {
// sort the value and organize it as comma separated values
final List<String> sortedValues = Arrays.asList(entry.getValue());
Collections.sort(sortedValues);
final StringBuilder stringValue = new StringBuilder();
for (final String value : sortedValues) {
if (stringValue.length() > 0) {
stringValue.append(AbfsHttpConstants.COMMA);
}
stringValue.append(value);
}
// key turns out to be null for ?a&b&c&d
lowercasedKeyNameValue.put((entry.getKey()) == null ? null
: entry.getKey().toLowerCase(Locale.ROOT), stringValue.toString());
}
final ArrayList<String> sortedKeys = new ArrayList<String>(lowercasedKeyNameValue.keySet());
Collections.sort(sortedKeys);
for (final String key : sortedKeys) {
final StringBuilder queryParamString = new StringBuilder();
queryParamString.append(key);
queryParamString.append(":");
queryParamString.append(lowercasedKeyNameValue.get(key));
appendCanonicalizedElement(canonicalizedResource, queryParamString.toString());
}
return canonicalizedResource.toString();
} | 3.68 |
hbase_Compactor_postCompactScannerOpen | /**
* Calls coprocessor, if any, to create scanners - after normal scanner creation.
* @param request Compaction request.
* @param scanType Scan type.
* @param scanner The default scanner created for compaction.
* @return Scanner scanner to use (usually the default); null if compaction should not proceed.
*/
private InternalScanner postCompactScannerOpen(CompactionRequestImpl request, ScanType scanType,
InternalScanner scanner, User user) throws IOException {
if (store.getCoprocessorHost() == null) {
return scanner;
}
return store.getCoprocessorHost().preCompact(store, scanner, scanType, request.getTracker(),
request, user);
} | 3.68 |
framework_StaticSection_setStyleName | /**
* Sets a custom style name for this cell.
*
* @param styleName
* the style name to set or null to not use any style name
*/
public void setStyleName(String styleName) {
cellState.styleName = styleName;
row.section.markAsDirty();
} | 3.68 |
hadoop_AbstractS3ACommitter_recoverTask | /**
* Task recovery considered Unsupported: Warn and fail.
* @param taskContext Context of the task whose output is being recovered
* @throws IOException always.
*/
@Override
public void recoverTask(TaskAttemptContext taskContext) throws IOException {
LOG.warn("Cannot recover task {}", taskContext.getTaskAttemptID());
throw new PathCommitException(outputPath,
String.format("Unable to recover task %s",
taskContext.getTaskAttemptID()));
} | 3.68 |
hadoop_ReencryptionHandler_processFileInode | /**
* Process an Inode for re-encryption. Add to current batch if it's a file,
* no-op otherwise.
*
* @param inode
* the inode
* @return true if inode is added to currentBatch and should be
* re-encrypted. false otherwise: could be inode is not a file, or
* inode's edek's key version is not changed.
* @throws IOException
* @throws InterruptedException
*/
@Override
public boolean processFileInode(INode inode, TraverseInfo traverseInfo)
throws IOException, InterruptedException {
assert dir.hasReadLock();
if (LOG.isTraceEnabled()) {
LOG.trace("Processing {} for re-encryption", inode.getFullPathName());
}
if (!inode.isFile()) {
return false;
}
FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(
dir, INodesInPath.fromINode(inode));
if (feInfo == null) {
LOG.warn("File {} skipped re-encryption because it is not encrypted! "
+ "This is very likely a bug.", inode.getId());
return false;
}
if (traverseInfo instanceof ZoneTraverseInfo
&& ((ZoneTraverseInfo) traverseInfo).getEzKeyVerName().equals(
feInfo.getEzKeyVersionName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("File {} skipped re-encryption because edek's key version"
+ " name is not changed.", inode.getFullPathName());
}
return false;
}
currentBatch.add(inode.asFile());
return true;
} | 3.68 |
hadoop_NFS3Request_readHandle | /**
* Deserialize a handle from an XDR object
*/
static FileHandle readHandle(XDR xdr) throws IOException {
FileHandle handle = new FileHandle();
if (!handle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
return handle;
} | 3.68 |
dubbo_AbstractConditionMatcher_doPatternMatch | // range, equal or other methods
protected boolean doPatternMatch(
String pattern, String value, URL url, Invocation invocation, boolean isWhenCondition) {
for (ValuePattern valueMatcher : valueMatchers) {
if (valueMatcher.shouldMatch(pattern)) {
return valueMatcher.match(pattern, value, url, invocation, isWhenCondition);
}
}
// this should never happen.
logger.error(
CLUSTER_FAILED_EXEC_CONDITION_ROUTER,
"Executing condition rule value match expression error.",
"pattern is " + pattern + ", value is " + value + ", condition type "
+ (isWhenCondition ? "when" : "then"),
"There should at least has one ValueMatcher instance that applies to all patterns, will force to use wildcard matcher now.");
ValuePattern paramValueMatcher =
model.getExtensionLoader(ValuePattern.class).getExtension("wildcard");
return paramValueMatcher.match(pattern, value, url, invocation, isWhenCondition);
} | 3.68 |
pulsar_SchemaDefinitionImpl_getProperties | /**
* Get schema class.
*
* @return schema class
*/
public Map<String, String> getProperties() {
return Collections.unmodifiableMap(properties);
} | 3.68 |
morf_MergingDatabaseDataSetConsumer_table | /**
* @see DataSetConsumer#table(Table, Iterable)
*/
@Override
public void table(Table table, Iterable<Record> records) {
TableLoader.builder()
.withConnection(connection)
.withSqlScriptExecutor(sqlExecutor)
.withDialect(sqlDialect)
.explicitCommit(true)
.merge(true)
.insertingWithPresetAutonums()
.forTable(table)
.load(records);
} | 3.68 |
flink_ChannelStatePersister_parseEvent | /**
* Parses the buffer as an event and returns the {@link CheckpointBarrier} if the event is
* indeed a barrier or returns null in all other cases.
*/
@Nullable
protected AbstractEvent parseEvent(Buffer buffer) throws IOException {
if (buffer.isBuffer()) {
return null;
} else {
AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
// reset the buffer because it would be deserialized again in SingleInputGate while
// getting next buffer.
// we can further improve to avoid double deserialization in the future.
buffer.setReaderIndex(0);
return event;
}
} | 3.68 |
hbase_ScannerModel_hasEndRow | /** Returns true if an end row was specified */
public boolean hasEndRow() {
return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
} | 3.68 |
flink_MetricStore_addAll | /**
* Add metric dumps to the store.
*
* @param metricDumps to add.
*/
synchronized void addAll(List<MetricDump> metricDumps) {
for (MetricDump metric : metricDumps) {
add(metric);
}
} | 3.68 |
flink_ExternalPythonKeyedCoProcessOperator_processTimer | /**
* It is responsible to send timer data to python worker when a registered timer is fired. The
* input data is a Row containing 4 fields: TimerFlag 0 for proc time, 1 for event time;
* Timestamp of the fired timer; Current watermark and the key of the timer.
*
* @param timeDomain The type of the timer.
* @param timer The fired timer.
* @throws Exception The runnerInputSerializer might throw exception.
*/
private void processTimer(TimeDomain timeDomain, InternalTimer<Row, VoidNamespace> timer)
throws Exception {
Row timerData =
timerHandler.buildTimerData(
timeDomain,
internalTimerService.currentWatermark(),
timer.getTimestamp(),
timer.getKey(),
null);
timerDataSerializer.serialize(timerData, baosWrapper);
pythonFunctionRunner.processTimer(baos.toByteArray());
baos.reset();
elementCount++;
checkInvokeFinishBundleByCount();
emitResults();
} | 3.68 |
querydsl_BooleanBuilder_andNot | /**
* Create the insertion of this and the negation of the given predicate
*
* @param right predicate to be negated
* @return the current object
*/
public BooleanBuilder andNot(Predicate right) {
return and(right.not());
} | 3.68 |
dubbo_DynamicConfiguration_publishConfig | /**
* Publish Config mapped to the given key and the given group.
*
* @param key the key to represent a configuration
* @param group the group where the key belongs to
* @param content the content of configuration
* @return <code>true</code> if success, or <code>false</code>
* @throws UnsupportedOperationException If the under layer does not support
* @since 2.7.5
*/
default boolean publishConfig(String key, String group, String content) throws UnsupportedOperationException {
return false;
} | 3.68 |
framework_ApplicationConnection_updateComponent | /**
* Method provided for backwards compatibility. Duties previously done by
* this method is now handled by the state change event handler in
* AbstractComponentConnector. The only function this method has is to
* return true if the UIDL is a "cached" update.
*
* @param component
* @param uidl
* @param manageCaption
* @deprecated As of 7.0, no longer serves any purpose
* @return
*/
@Deprecated
public boolean updateComponent(Widget component, UIDL uidl,
boolean manageCaption) {
ComponentConnector connector = getConnectorMap()
.getConnector(component);
if (!AbstractComponentConnector.isRealUpdate(uidl)) {
return true;
}
if (!manageCaption) {
getLogger().warning(Util.getConnectorString(connector)
+ " called updateComponent with manageCaption=false. The parameter was ignored - override delegateCaption() to return false instead. It is however not recommended to use caption this way at all.");
}
return false;
} | 3.68 |
framework_Window_setPositionY | /**
* Sets the distance of Window top border in pixels from top border of the
* containing (main window). Has effect only if in {@link WindowMode#NORMAL}
* mode.
*
* @param positionY
* the Distance of Window top border in pixels from top border of
* the containing (main window). or -1 if unspecified
*
* @since 4.0.0
*/
public void setPositionY(int positionY) {
getState().positionY = positionY;
getState().centered = false;
} | 3.68 |
pulsar_KerberosName_getServiceName | /**
* Get the first component of the name.
* @return the first section of the Kerberos principal name
*/
public String getServiceName() {
return serviceName;
} | 3.68 |
hbase_RegionModeStrategy_selectModeFieldsAndAddCountField | /**
* Form new record list with records formed by only fields provided through fieldInfo and add a
* count field for each record with value 1 We are doing two operation of selecting and adding new
* field because of saving some CPU cycles on rebuilding the record again
* @param fieldInfos List of FieldInfos required in the record
* @param records List of records which needs to be processed
* @param countField Field which needs to be added with value 1 for each record
* @return records after selecting required fields and adding count field
*/
List<Record> selectModeFieldsAndAddCountField(List<FieldInfo> fieldInfos, List<Record> records,
Field countField) {
return records.stream().map(
record -> Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField()))
.map(fi -> Record.entry(fi.getField(), record.get(fi.getField())))))
.map(record -> Record.builder().putAll(record).put(countField, 1).build())
.collect(Collectors.toList());
} | 3.68 |
dubbo_Bytes_bytes2float | /**
* to int.
*
* @param b byte array.
* @param off offset.
* @return int.
*/
public static float bytes2float(byte[] b, int off) {
int i = ((b[off + 3] & 0xFF) << 0)
+ ((b[off + 2] & 0xFF) << 8)
+ ((b[off + 1] & 0xFF) << 16)
+ ((b[off + 0]) << 24);
return Float.intBitsToFloat(i);
} | 3.68 |
flink_FlinkRexBuilder_makeZeroLiteral | /**
* Creates a literal of the default value for the given type.
*
* <p>This value is:
*
* <ul>
* <li>0 for numeric types;
* <li>FALSE for BOOLEAN;
* <li>The epoch for TIMESTAMP and DATE;
* <li>Midnight for TIME;
* <li>The empty string for string types (CHAR, BINARY, VARCHAR, VARBINARY).
* </ul>
*
* <p>Uses '1970-01-01 00:00:00'(epoch 0 second) as zero value for TIMESTAMP_LTZ, the zero value
* '0000-00-00 00:00:00' in Calcite is an invalid time whose month and day is invalid, we
* workaround here. Stop overriding once CALCITE-4555 fixed.
*
* @param type Type
* @return Simple literal, or cast simple literal
*/
@Override
public RexLiteral makeZeroLiteral(RelDataType type) {
switch (type.getSqlTypeName()) {
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return makeLiteral(new TimestampString(1970, 1, 1, 0, 0, 0), type);
default:
return super.makeZeroLiteral(type);
}
} | 3.68 |
hadoop_BlockBlobInputStream_resetStreamBuffer | /**
* Reset the internal stream buffer but do not release the memory.
* The buffer can be reused to avoid frequent memory allocations of
* a large buffer.
*/
private void resetStreamBuffer() {
streamBufferPosition = 0;
streamBufferLength = 0;
} | 3.68 |
hbase_TableSchemaModel___setIsMeta | /**
* @param value desired value of IS_META attribute
*/
public void __setIsMeta(boolean value) {
attrs.put(IS_META, Boolean.toString(value));
} | 3.68 |
hudi_SparkUtil_getDefaultConf | /**
* Get the default spark configuration.
*
* @param appName - Spark application name
* @param sparkMaster - Spark master node name
* @return Spark configuration
*/
public static SparkConf getDefaultConf(final String appName, final Option<String> sparkMaster) {
final Properties properties = System.getProperties();
SparkConf sparkConf = new SparkConf().setAppName(appName);
// Configure the sparkMaster
String sparkMasterNode = DEFAULT_SPARK_MASTER;
if (properties.getProperty(HoodieCliSparkConfig.CLI_SPARK_MASTER) != null) {
sparkMasterNode = properties.getProperty(HoodieCliSparkConfig.CLI_SPARK_MASTER);
}
if (sparkMaster.isPresent() && !sparkMaster.get().trim().isEmpty()) {
sparkMasterNode = sparkMaster.orElse(sparkMasterNode);
}
sparkConf.setMaster(sparkMasterNode);
// Configure driver
sparkConf.set(HoodieCliSparkConfig.CLI_DRIVER_MAX_RESULT_SIZE, "2g");
sparkConf.set(HoodieCliSparkConfig.CLI_EVENT_LOG_OVERWRITE, "true");
sparkConf.set(HoodieCliSparkConfig.CLI_EVENT_LOG_ENABLED, "false");
sparkConf.set(HoodieCliSparkConfig.CLI_SERIALIZER, "org.apache.spark.serializer.KryoSerializer");
sparkConf.set("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar");
// Configure hadoop conf
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESS, "true");
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESSION_CODEC, "true");
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESSION_CODEC, "org.apache.hadoop.io.compress.GzipCodec");
sparkConf.set(HoodieCliSparkConfig.CLI_MAPRED_OUTPUT_COMPRESSION_TYPE, "BLOCK");
return sparkConf;
} | 3.68 |
hmily_SchemaCache_getInstance | /**
* Gets instance.
*
* @return the instance
*/
protected static SchemaCache getInstance() {
return SchemaCacheHolder.cache;
} | 3.68 |
pulsar_AuthenticationProviderSasl_getAuthState | // return authState if it is in cache.
private AuthenticationState getAuthState(HttpServletRequest request) {
String id = request.getHeader(SASL_STATE_SERVER);
if (id == null) {
return null;
}
try {
return authStates.getIfPresent(Long.parseLong(id));
} catch (NumberFormatException e) {
log.error("[{}] Wrong Id String in Token {}. e:", request.getRequestURI(),
id, e);
return null;
}
} | 3.68 |
dubbo_RpcContext_getLocalAddress | /**
* get local address.
*
* @return local address
*/
public InetSocketAddress getLocalAddress() {
return newRpcContext.getLocalAddress();
} | 3.68 |
hadoop_CacheDirectiveStats_setFilesCached | /**
* Sets the files cached by this directive.
*
* @param filesCached The number of files cached.
* @return This builder, for call chaining.
*/
public Builder setFilesCached(long filesCached) {
this.filesCached = filesCached;
return this;
} | 3.68 |
hadoop_TaskAttemptsInfo_getTaskAttempts | // XmlElementRef annotation should be used to identify the exact type of a list element
// otherwise metadata will be added to XML attributes,
// it can lead to incorrect JSON marshaling
@XmlElementRef
public List<TaskAttemptInfo> getTaskAttempts() {
return taskAttempts;
} | 3.68 |
dubbo_RestRPCInvocationUtil_createBaseRpcInvocation | /**
* build RpcInvocation
*
* @param request
* @param restMethodMetadata
* @return
*/
public static RpcInvocation createBaseRpcInvocation(RequestFacade request, RestMethodMetadata restMethodMetadata) {
RpcInvocation rpcInvocation = new RpcInvocation();
rpcInvocation.setParameterTypes(restMethodMetadata.getReflectMethod().getParameterTypes());
rpcInvocation.setReturnType(restMethodMetadata.getReflectMethod().getReturnType());
rpcInvocation.setMethodName(restMethodMetadata.getMethod().getName());
// TODO set protocolServiceKey ,but no set method
//
HttpHeaderUtil.parseRequest(rpcInvocation, request);
String serviceKey = BaseServiceMetadata.buildServiceKey(
request.getHeader(RestHeaderEnum.PATH.getHeader()),
request.getHeader(RestHeaderEnum.GROUP.getHeader()),
request.getHeader(RestHeaderEnum.VERSION.getHeader()));
rpcInvocation.setTargetServiceUniqueName(serviceKey);
return rpcInvocation;
} | 3.68 |
flink_DataStreamAllroundTestJobFactory_createEventSource | /**
* @deprecated This method relies on the {@link
* org.apache.flink.streaming.api.functions.source.SourceFunction} API, which is due to be
* removed. Use the new {@link org.apache.flink.api.connector.source.Source} API instead.
*/
@Deprecated
static SourceFunction<Event> createEventSource(ParameterTool pt) {
return new SequenceGeneratorSource(
pt.getInt(
SEQUENCE_GENERATOR_SRC_KEYSPACE.key(),
SEQUENCE_GENERATOR_SRC_KEYSPACE.defaultValue()),
pt.getInt(
SEQUENCE_GENERATOR_SRC_PAYLOAD_SIZE.key(),
SEQUENCE_GENERATOR_SRC_PAYLOAD_SIZE.defaultValue()),
pt.getLong(
SEQUENCE_GENERATOR_SRC_EVENT_TIME_MAX_OUT_OF_ORDERNESS.key(),
SEQUENCE_GENERATOR_SRC_EVENT_TIME_MAX_OUT_OF_ORDERNESS.defaultValue()),
pt.getLong(
SEQUENCE_GENERATOR_SRC_EVENT_TIME_CLOCK_PROGRESS.key(),
SEQUENCE_GENERATOR_SRC_EVENT_TIME_CLOCK_PROGRESS.defaultValue()),
pt.getLong(
SEQUENCE_GENERATOR_SRC_SLEEP_TIME.key(),
SEQUENCE_GENERATOR_SRC_SLEEP_TIME.defaultValue()),
pt.getLong(
SEQUENCE_GENERATOR_SRC_SLEEP_AFTER_ELEMENTS.key(),
SEQUENCE_GENERATOR_SRC_SLEEP_AFTER_ELEMENTS.defaultValue()));
} | 3.68 |
flink_KeyGroupRangeAssignment_computeOperatorIndexForKeyGroup | /**
* Computes the index of the operator to which a key-group belongs under the given parallelism
* and maximum parallelism.
*
* <p>IMPORTANT: maxParallelism must be <= Short.MAX_VALUE + 1 to avoid rounding problems in
* this method. If we ever want to go beyond this boundary, this method must perform arithmetic
* on long values.
*
* @param maxParallelism Maximal parallelism that the job was initially created with. 0 <
* parallelism <= maxParallelism <= Short.MAX_VALUE + 1 must hold.
* @param parallelism The current parallelism under which the job runs. Must be <=
* maxParallelism.
* @param keyGroupId Id of a key-group. 0 <= keyGroupID < maxParallelism.
* @return The index of the operator to which elements from the given key-group should be routed
* under the given parallelism and maxParallelism.
*/
public static int computeOperatorIndexForKeyGroup(
int maxParallelism, int parallelism, int keyGroupId) {
return keyGroupId * parallelism / maxParallelism;
} | 3.68 |
AreaShop_WorldEditSelection_getLength | /**
* Get Z-size.
*
* @return length
*/
public int getLength() {
return maximum.getBlockZ() - minimum.getBlockZ() + 1;
} | 3.68 |
flink_KubernetesUtils_createJobGraphStateHandleStore | /**
* Create a {@link KubernetesStateHandleStore} which storing {@link JobGraph}.
*
* @param configuration configuration to build a RetrievableStateStorageHelper
* @param flinkKubeClient flink kubernetes client
* @param configMapName ConfigMap name
* @param lockIdentity lock identity to check the leadership
* @return a {@link KubernetesStateHandleStore} which storing {@link JobGraph}.
* @throws Exception when create the storage helper
*/
public static KubernetesStateHandleStore<JobGraph> createJobGraphStateHandleStore(
Configuration configuration,
FlinkKubeClient flinkKubeClient,
String configMapName,
String lockIdentity)
throws Exception {
final RetrievableStateStorageHelper<JobGraph> stateStorage =
new FileSystemStateStorageHelper<>(
HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(
configuration),
SUBMITTED_JOBGRAPH_FILE_PREFIX);
return new KubernetesStateHandleStore<>(
flinkKubeClient,
configMapName,
stateStorage,
k -> k.startsWith(JOB_GRAPH_STORE_KEY_PREFIX),
lockIdentity);
} | 3.68 |
hbase_MetricsSink_getStartTimestamp | /**
* Gets the time stamp from when the Sink was initialized.
*/
public long getStartTimestamp() {
return this.startTimestamp;
} | 3.68 |
hadoop_ContainerContext_getUser | /**
* Get user of the container being initialized or stopped.
*
* @return the user
*/
public String getUser() {
return user;
} | 3.68 |
graphhopper_GraphHopper_setUrbanDensityCalculation | /**
* Configures the urban density classification. Each edge will be classified as 'rural','residential' or 'city', {@link UrbanDensity}
*
* @param residentialAreaRadius in meters. The higher this value the longer the calculation will take and the bigger the area for
* which the road density used to identify residential areas is calculated.
* @param residentialAreaSensitivity Use this to find a trade-off between too many roads being classified as residential (too high
* values) and not enough roads being classified as residential (too small values)
* @param cityAreaRadius in meters. The higher this value the longer the calculation will take and the bigger the area for
* which the road density used to identify city areas is calculated. Set this to zero
* to skip the city classification.
* @param cityAreaSensitivity Use this to find a trade-off between too many roads being classified as city (too high values)
* and not enough roads being classified as city (too small values)
* @param threads the number of threads used for the calculation. If this is zero the urban density
* calculation is skipped entirely
*/
public GraphHopper setUrbanDensityCalculation(double residentialAreaRadius, double residentialAreaSensitivity,
double cityAreaRadius, double cityAreaSensitivity, int threads) {
ensureNotLoaded();
this.residentialAreaRadius = residentialAreaRadius;
this.residentialAreaSensitivity = residentialAreaSensitivity;
this.cityAreaRadius = cityAreaRadius;
this.cityAreaSensitivity = cityAreaSensitivity;
this.urbanDensityCalculationThreads = threads;
return this;
} | 3.68 |
hbase_NamespaceDescriptor_removeConfiguration | /**
* Remove a config setting represented by the key from the {@link #configuration} map
*/
public void removeConfiguration(final String key) {
configuration.remove(key);
} | 3.68 |
querydsl_DateTimeExpression_week | /**
* Create a week expression
*
* @return week
*/
public NumberExpression<Integer> week() {
if (week == null) {
week = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.WEEK, mixin);
}
return week;
} | 3.68 |
framework_VTabsheet_selectTab | /**
* Selects the indicated tab, deselects the previously selected tab, and
* updates the style names, tabulator indices, and the
* {@code aria-selected} roles to match. Also recalculates the tab
* caption widths in case the addition or removal of the selection style
* changed them, and schedules a scroll for moving the newly selected
* tab into view (at the end of the event loop to allow for layouting).
* If the previously selected item is the same as the new one, nothing
* is done.
*
* @param index
* the index of the tab to select
*
* @see Tab#setTabulatorIndex(int)
*/
public void selectTab(int index) {
final Tab newSelected = getTab(index);
final Tab oldSelected = selected;
if (oldSelected == newSelected) {
return;
}
newSelected.setStyleNames(true, isFirstVisibleTabClient(index),
true);
newSelected.setTabulatorIndex(getTabsheet().tabulatorIndex);
Roles.getTabRole().setAriaSelectedState(newSelected.getElement(),
SelectedValue.TRUE);
if (oldSelected != null) {
oldSelected.setStyleNames(false,
isFirstVisibleTabClient(getWidgetIndex(oldSelected)));
oldSelected.setTabulatorIndex(-1);
Roles.getTabRole().setAriaSelectedState(
oldSelected.getElement(), SelectedValue.FALSE);
// The unselected tab might need less (or more) space
oldSelected.recalculateCaptionWidth();
}
// Update the field holding the currently selected tab
selected = newSelected;
// The selected tab might need more (or less) space
newSelected.recalculateCaptionWidth();
// Scroll the tab into view if it is not already, after layout
Scheduler.get().scheduleFinally(() -> getTabsheet()
.scrollIntoView(getTab(tabsheet.activeTabIndex)));
} | 3.68 |
hudi_HoodieTableMetadataUtil_createIndexInitTimestamp | /**
* Create the timestamp for an index initialization operation on the metadata table.
* <p>
* Since many MDT partitions can be initialized one after other the offset parameter controls generating a
* unique timestamp.
*/
public static String createIndexInitTimestamp(String timestamp, int offset) {
return String.format("%s%03d", timestamp, PARTITION_INITIALIZATION_TIME_SUFFIX + offset);
} | 3.68 |
flink_MasterHooks_reset | /**
* Resets the master hooks.
*
* @param hooks The hooks to reset
* @throws FlinkException Thrown, if the hooks throw an exception.
*/
public static void reset(
final Collection<MasterTriggerRestoreHook<?>> hooks,
@SuppressWarnings("unused") final Logger log)
throws FlinkException {
for (MasterTriggerRestoreHook<?> hook : hooks) {
final String id = hook.getIdentifier();
try {
hook.reset();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
throw new FlinkException(
"Error while resetting checkpoint master hook '" + id + '\'', t);
}
}
} | 3.68 |
flink_TimestampData_fromEpochMillis | /**
* Creates an instance of {@link TimestampData} from milliseconds and a nanos-of-millisecond.
*
* @param milliseconds the number of milliseconds since {@code 1970-01-01 00:00:00}; a negative
* number is the number of milliseconds before {@code 1970-01-01 00:00:00}
* @param nanosOfMillisecond the nanoseconds within the millisecond, from 0 to 999,999
*/
public static TimestampData fromEpochMillis(long milliseconds, int nanosOfMillisecond) {
return new TimestampData(milliseconds, nanosOfMillisecond);
} | 3.68 |
querydsl_SQLMergeClause_executeWithKeys | /**
* Execute the clause and return the generated keys as a ResultSet
*
* @return result set with generated keys
*/
public ResultSet executeWithKeys() {
context = startContext(connection(), metadata, entity);
try {
if (configuration.getTemplates().isNativeMerge()) {
PreparedStatement stmt = null;
if (batches.isEmpty()) {
stmt = createStatement(true);
listeners.notifyMerge(entity, metadata, keys, columns, values, subQuery);
listeners.preExecute(context);
stmt.executeUpdate();
listeners.executed(context);
} else {
Collection<PreparedStatement> stmts = createStatements(true);
if (stmts != null && stmts.size() > 1) {
throw new IllegalStateException("executeWithKeys called with batch statement and multiple SQL strings");
}
stmt = stmts.iterator().next();
listeners.notifyMerges(entity, metadata, batches);
listeners.preExecute(context);
stmt.executeBatch();
listeners.executed(context);
}
final Statement stmt2 = stmt;
ResultSet rs = stmt.getGeneratedKeys();
return new ResultSetAdapter(rs) {
@Override
public void close() throws SQLException {
try {
super.close();
} finally {
stmt2.close();
reset();
endContext(context);
}
}
};
} else {
if (hasRow()) {
// update
SQLUpdateClause update = new SQLUpdateClause(connection(), configuration, entity);
update.addListener(listeners);
populate(update);
addKeyConditions(update);
reset();
endContext(context);
return EmptyResultSet.DEFAULT;
} else {
// insert
SQLInsertClause insert = new SQLInsertClause(connection(), configuration, entity);
insert.addListener(listeners);
populate(insert);
return insert.executeWithKeys();
}
}
} catch (SQLException e) {
onException(context,e);
reset();
endContext(context);
throw configuration.translate(queryString, constants, e);
}
} | 3.68 |
hadoop_NamenodeStatusReport_getScheduledReplicationBlocks | /**
* Blocks scheduled for replication.
*
* @return - num of blocks scheduled for replication
*/
public long getScheduledReplicationBlocks() {
return this.scheduledReplicationBlocks;
} | 3.68 |
dubbo_AbstractConfigManager_isNeedValidation | /**
* The component configuration that does not affect the main process does not need to be verified.
*
* @param config
* @param <T>
* @return
*/
protected <T extends AbstractConfig> boolean isNeedValidation(T config) {
if (config instanceof MetadataReportConfig) {
return false;
}
return true;
} | 3.68 |
hbase_ByteBufferUtils_putShort | /**
* Put a short value out to the given ByteBuffer's current position in big-endian format. This
* also advances the position in buffer by short size.
* @param buffer the ByteBuffer to write to
* @param val short to write out
*/
public static void putShort(ByteBuffer buffer, short val) {
ConverterHolder.BEST_CONVERTER.putShort(buffer, val);
} | 3.68 |
hadoop_DockerClientConfigHandler_getCredentialsFromTokensByteBuffer | /**
* Convert the Token ByteBuffer to the appropriate Credentials object.
*
* @param tokens the Tokens from the ContainerLaunchContext.
* @return the Credentials object populated from the Tokens.
* @throws IOException io error occur.
*/
public static Credentials getCredentialsFromTokensByteBuffer(
ByteBuffer tokens) throws IOException {
Credentials credentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
tokens.rewind();
dibb.reset(tokens);
credentials.readTokenStorageStream(dibb);
tokens.rewind();
if (LOG.isDebugEnabled()) {
for (Token token : credentials.getAllTokens()) {
LOG.debug("Token read from token storage: {}", token);
}
}
return credentials;
} | 3.68 |
rocketmq-connect_ColumnDefinition_isCurrency | /**
* Indicates whether the column is a cash value.
*
* @return <code>true</code> if so; <code>false</code> otherwise
*/
public boolean isCurrency() {
return currency;
} | 3.68 |
hudi_OptionsResolver_isConsistentHashingBucketIndexType | /**
* Returns whether the table index is consistent bucket index.
*/
public static boolean isConsistentHashingBucketIndexType(Configuration conf) {
return isBucketIndexType(conf) && getBucketEngineType(conf).equals(HoodieIndex.BucketIndexEngineType.CONSISTENT_HASHING);
} | 3.68 |
framework_TypeDataStore_getProperties | /**
* @param type
* @return
* @throws NoDataException
*
* @deprecated As of 7.0.1, use {@link #getPropertiesAsArray(Type)} instead
* for improved performance
*/
@Deprecated
public static Collection<Property> getProperties(Type type)
throws NoDataException {
JsArrayObject<Property> propertiesArray = getPropertiesAsArray(type);
int size = propertiesArray.size();
List<Property> properties = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
properties.add(propertiesArray.get(i));
}
return properties;
} | 3.68 |
hbase_HMaster_executeRegionPlansWithThrottling | /**
* Execute region plans with throttling
* @param plans to execute
* @return succeeded plans
*/
public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
List<RegionPlan> successRegionPlans = new ArrayList<>();
int maxRegionsInTransition = getMaxRegionsInTransition();
long balanceStartTime = EnvironmentEdgeManager.currentTime();
long cutoffTime = balanceStartTime + this.maxBalancingTime;
int rpCount = 0; // number of RegionPlans balanced so far
if (plans != null && !plans.isEmpty()) {
int balanceInterval = this.maxBalancingTime / plans.size();
LOG.info(
"Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval
+ " ms, and the max number regions in transition is " + maxRegionsInTransition);
for (RegionPlan plan : plans) {
LOG.info("balance " + plan);
// TODO: bulk assign
try {
this.assignmentManager.balance(plan);
} catch (HBaseIOException hioe) {
// should ignore failed plans here, avoiding the whole balance plans be aborted
// later calls of balance() can fetch up the failed and skipped plans
LOG.warn("Failed balance plan {}, skipping...", plan, hioe);
}
// rpCount records balance plans processed, does not care if a plan succeeds
rpCount++;
successRegionPlans.add(plan);
if (this.maxBalancingTime > 0) {
balanceThrottling(balanceStartTime + rpCount * balanceInterval, maxRegionsInTransition,
cutoffTime);
}
// if performing next balance exceeds cutoff time, exit the loop
if (
this.maxBalancingTime > 0 && rpCount < plans.size()
&& EnvironmentEdgeManager.currentTime() > cutoffTime
) {
// TODO: After balance, there should not be a cutoff time (keeping it as
// a security net for now)
LOG.debug(
"No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime);
break;
}
}
}
LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration()
.getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD));
return successRegionPlans;
} | 3.68 |
flink_MapView_getMap | /** Returns the entire view's content as an instance of {@link Map}. */
public Map<K, V> getMap() {
return map;
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_incrementCurrentKeyId | /**
* Obtains the next available delegation key id that can be allocated to a DelegationKey.
* Delegation key id need to be reserved using the shared delegationKeyIdCounter,
* which handles keyId allocation concurrently with other secret managers.
* @return Next available delegation key id.
*/
@Override
public int incrementCurrentKeyId() {
try {
return incrementKeyId(1) + 1;
} catch (SQLException e) {
throw new RuntimeException(
"Failed to increment delegation key id in SQL secret manager", e);
}
} | 3.68 |
pulsar_MessageDeduplication_replayCursor | /**
* Read all the entries published from the cursor position until the most recent and update the highest sequence id
* from each producer.
*
* @param future future to trigger when the replay is complete
*/
private void replayCursor(CompletableFuture<Void> future) {
managedCursor.asyncReadEntries(100, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
for (Entry entry : entries) {
ByteBuf messageMetadataAndPayload = entry.getDataBuffer();
MessageMetadata md = Commands.parseMessageMetadata(messageMetadataAndPayload);
String producerName = md.getProducerName();
long sequenceId = Math.max(md.getHighestSequenceId(), md.getSequenceId());
highestSequencedPushed.put(producerName, sequenceId);
highestSequencedPersisted.put(producerName, sequenceId);
producerRemoved(producerName);
entry.release();
}
if (managedCursor.hasMoreEntries()) {
// Read next batch of entries
pulsar.getExecutor().execute(() -> replayCursor(future));
} else {
// Done replaying
future.complete(null);
}
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
future.completeExceptionally(exception);
}
}, null, PositionImpl.LATEST);
} | 3.68 |
hudi_BaseHoodieTableServiceClient_inlineScheduleCompaction | /**
* Schedules compaction inline.
*
* @param extraMetadata extra metadata to be used.
* @return compaction instant if scheduled.
*/
protected Option<String> inlineScheduleCompaction(Option<Map<String, String>> extraMetadata) {
return scheduleCompaction(extraMetadata);
} | 3.68 |
hadoop_LongLong_shiftRight | /** Shift right operation (>>). */
long shiftRight(int n) {
return (d1 << (BITS_PER_LONG - n)) + (d0 >>> n);
} | 3.68 |
flink_AsyncSnapshotCallable_toAsyncSnapshotFutureTask | /**
* Creates a future task from this and registers it with the given {@link CloseableRegistry}.
* The task is unregistered again in {@link FutureTask#done()}.
*/
public AsyncSnapshotTask toAsyncSnapshotFutureTask(@Nonnull CloseableRegistry taskRegistry)
throws IOException {
return new AsyncSnapshotTask(taskRegistry);
} | 3.68 |
flink_Plan_getJobName | /**
* Gets the name of this job.
*
* @return The name of the job.
*/
public String getJobName() {
return this.jobName;
} | 3.68 |
hbase_Union3_decodeC | /**
* Read an instance of the third type parameter from buffer {@code src}.
*/
public C decodeC(PositionedByteRange src) {
return (C) decode(src);
} | 3.68 |
hbase_BloomFilterFactory_getErrorRate | /** Returns the Bloom filter error rate in the given configuration */
public static float getErrorRate(Configuration conf) {
return conf.getFloat(IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
} | 3.68 |
flink_ScalaCsvOutputFormat_setAllowNullValues | /**
* Configures the format to either allow null values (writing an empty field), or to throw an
* exception when encountering a null field.
*
* <p>By default, null values are allowed.
*
* @param allowNulls Flag to indicate whether the output format should accept null values.
*/
public void setAllowNullValues(boolean allowNulls) {
this.allowNullValues = allowNulls;
} | 3.68 |
hbase_RowFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof RowFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.68 |
hadoop_BlockMovementStatus_getStatusCode | /**
* @return the status code.
*/
int getStatusCode() {
return code;
} | 3.68 |
hbase_HRegion_replayFlushInStores | /**
* Replays the given flush descriptor by opening the flush files in stores and dropping the
* memstore snapshots if requested.
* @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region
* replica implementation.
*/
@Deprecated
private void replayFlushInStores(FlushDescriptor flush, PrepareFlushResult prepareFlushResult,
boolean dropMemstoreSnapshot) throws IOException {
for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) {
byte[] family = storeFlush.getFamilyName().toByteArray();
HStore store = getStore(family);
if (store == null) {
LOG.warn(getRegionInfo().getEncodedName() + " : "
+ "Received a flush commit marker from primary, but the family is not found."
+ "Ignoring StoreFlushDescriptor:" + storeFlush);
continue;
}
List<String> flushFiles = storeFlush.getFlushOutputList();
StoreFlushContext ctx = null;
long startTime = EnvironmentEdgeManager.currentTime();
if (prepareFlushResult == null || prepareFlushResult.storeFlushCtxs == null) {
ctx = store.createFlushContext(flush.getFlushSequenceNumber(), FlushLifeCycleTracker.DUMMY);
} else {
ctx = prepareFlushResult.storeFlushCtxs.get(family);
startTime = prepareFlushResult.startTime;
}
if (ctx == null) {
LOG.warn(getRegionInfo().getEncodedName() + " : "
+ "Unexpected: flush commit marker received from store " + Bytes.toString(family)
+ " but no associated flush context. Ignoring");
continue;
}
ctx.replayFlush(flushFiles, dropMemstoreSnapshot); // replay the flush
// Record latest flush time
this.lastStoreFlushTimeMap.put(store, startTime);
}
} | 3.68 |
hbase_ReplicationSourceManager_postLogRoll | // public because of we call it in TestReplicationEmptyWALRecovery
public void postLogRoll(Path newLog) throws IOException {
String logName = newLog.getName();
String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(logName);
// synchronized on latestPaths to avoid the new open source miss the new log
synchronized (this.latestPaths) {
// synchronized on walsById to avoid race with cleanOldLogs
synchronized (this.walsById) {
// Update walsById map
for (Map.Entry<ReplicationQueueId, Map<String, NavigableSet<String>>> entry : this.walsById
.entrySet()) {
ReplicationQueueId queueId = entry.getKey();
String peerId = queueId.getPeerId();
Map<String, NavigableSet<String>> walsByPrefix = entry.getValue();
boolean existingPrefix = false;
for (Map.Entry<String, NavigableSet<String>> walsEntry : walsByPrefix.entrySet()) {
SortedSet<String> wals = walsEntry.getValue();
if (this.sources.isEmpty()) {
// If there's no slaves, don't need to keep the old wals since
// we only consider the last one when a new slave comes in
wals.clear();
}
if (logPrefix.equals(walsEntry.getKey())) {
wals.add(logName);
existingPrefix = true;
}
}
if (!existingPrefix) {
// The new log belongs to a new group, add it into this peer
LOG.debug("Start tracking logs for wal group {} for peer {}", logPrefix, peerId);
NavigableSet<String> wals = new TreeSet<>();
wals.add(logName);
walsByPrefix.put(logPrefix, wals);
}
}
}
// Add to latestPaths
latestPaths.put(logPrefix, newLog);
}
// This only updates the sources we own, not the recovered ones
for (ReplicationSourceInterface source : this.sources.values()) {
source.enqueueLog(newLog);
LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", newLog,
source.getQueueId());
}
} | 3.68 |
hbase_HbckTableInfo_handleRegionStartKeyNotEmpty | /**
* This is a special case hole -- when the first region of a table is missing from META, HBase
* doesn't acknowledge the existance of the table.
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckRegionInfo next) throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. Creating a new "
+ "region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
TableDescriptor htd = getTableInfo().getTableDescriptor();
// from special EMPTY_START_ROW to next region's startKey
RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(next.getStartKey()).build();
// TODO test
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region start key was not empty. Created new empty region: " + newRegion + " "
+ region);
hbck.fixes++;
} | 3.68 |
shardingsphere-elasticjob_LeaderService_isLeaderUntilBlock | /**
* Judge current server is leader or not.
*
* <p>
* If leader is electing, this method will block until leader elected success.
* </p>
*
* @return current server is leader or not
*/
public boolean isLeaderUntilBlock() {
while (!hasLeader() && serverService.hasAvailableServers()) {
log.info("Leader is electing, waiting for {} ms", 100);
BlockUtils.waitingShortTime();
if (!JobRegistry.getInstance().isShutdown(jobName) && serverService.isAvailableServer(JobRegistry.getInstance().getJobInstance(jobName).getServerIp())) {
electLeader();
}
}
return isLeader();
} | 3.68 |
hbase_ProcedureEvent_wake | /**
* Wakes up the suspended procedures by pushing them back into scheduler queues and sets the event
* as ready. See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not
* synchronized.
*/
public void wake(AbstractProcedureScheduler procedureScheduler) {
procedureScheduler.wakeEvents(new ProcedureEvent[] { this });
} | 3.68 |
hbase_Get_familySet | /**
* Method for retrieving the keys in the familyMap
* @return keys in the current familyMap
*/
public Set<byte[]> familySet() {
return this.familyMap.keySet();
} | 3.68 |
hbase_ExplicitColumnTracker_done | /**
* Done when there are no more columns to match against.
*/
@Override
public boolean done() {
return this.index >= columns.length;
} | 3.68 |
hadoop_AbfsOutputStreamStatisticsImpl_writeCurrentBuffer | /**
* {@inheritDoc}
*
* Records the number of times AbfsOutputStream writes the buffer to the
* service via the AbfsClient and appends the buffer to the service.
*/
@Override
public void writeCurrentBuffer() {
writeCurrentBufferOps.incrementAndGet();
} | 3.68 |
hadoop_FederationApplicationHomeSubClusterStoreInputValidator_checkApplicationHomeSubCluster | /**
* Validate if the ApplicationHomeSubCluster info are present or not.
*
* @param applicationHomeSubCluster the information of the application to be
* verified
* @throws FederationStateStoreInvalidInputException if the SubCluster Info
* are invalid
*/
private static void checkApplicationHomeSubCluster(
ApplicationHomeSubCluster applicationHomeSubCluster)
throws FederationStateStoreInvalidInputException {
if (applicationHomeSubCluster == null) {
String message = "Missing ApplicationHomeSubCluster Info."
+ " Please try again by specifying"
+ " an ApplicationHomeSubCluster information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate application Id
checkApplicationId(applicationHomeSubCluster.getApplicationId());
// validate subcluster Id
FederationMembershipStateStoreInputValidator
.checkSubClusterId(applicationHomeSubCluster.getHomeSubCluster());
} | 3.68 |
druid_MySqlStatementParser_parseDeclare | /**
* parse declare statement
*/
public SQLStatement parseDeclare() {
Lexer.SavePoint savePoint = lexer.markOut();
lexer.nextToken();
if (lexer.token() == Token.CONTINUE) {
lexer.reset(savePoint);
return this.parseDeclareHandler();
}
lexer.nextToken();
if (lexer.token() == Token.CURSOR) {
lexer.reset(savePoint);
return this.parseCursorDeclare();
} else if (lexer.identifierEquals("HANDLER")) {
//DECLARE异常处理程序 [add by zhujun 2016-04-16]
lexer.reset(savePoint);
return this.parseDeclareHandler();
} else if (lexer.token() == Token.CONDITION) {
//DECLARE异常 [add by zhujun 2016-04-17]
lexer.reset(savePoint);
return this.parseDeclareCondition();
} else {
lexer.reset(savePoint);
}
MySqlDeclareStatement stmt = new MySqlDeclareStatement();
accept(Token.DECLARE);
// lexer.nextToken();
for (; ; ) {
SQLDeclareItem item = new SQLDeclareItem();
item.setName(exprParser.name());
stmt.addVar(item);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
stmt.setAfterSemi(true);
continue;
} else if (lexer.token() != Token.EOF) {
// var type
item.setDataType(exprParser.parseDataType());
if (lexer.token() == Token.DEFAULT) {
lexer.nextToken();
SQLExpr defaultValue = this.exprParser.primary();
item.setValue(defaultValue);
}
break;
} else {
throw new ParserException("TODO. " + lexer.info());
}
}
return stmt;
} | 3.68 |
flink_MethodlessRouter_anyMatched | /** Checks if there's any matching route. */
public boolean anyMatched(String[] requestPathTokens) {
Map<String, String> pathParams = new HashMap<>();
for (PathPattern pattern : routes.keySet()) {
if (pattern.match(requestPathTokens, pathParams)) {
return true;
}
// Reset for the next loop
pathParams.clear();
}
return false;
} | 3.68 |
framework_TableElement_getCell | /**
* Function to find a Table cell. Looking for a cell that is currently not
* visible will throw NoSuchElementException
*
* @param row
* 0 based row index
* @param column
* 0 based column index
* @return TestBenchElement containing wanted cell.
* @throws NoSuchElementException
* if the cell (row, column) is not found.
*/
public TestBenchElement getCell(int row, int column) {
TestBenchElement cell = wrapElement(
findElement(By.vaadin("#row[" + row + "]/col[" + column + "]")),
getCommandExecutor());
return cell;
} | 3.68 |
hadoop_RouterQuotaUsage_verifyStoragespaceQuota | /**
* Verify if storage space quota is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyStoragespaceQuota}.
* @throws DSQuotaExceededException If the quota is exceeded.
*/
public void verifyStoragespaceQuota() throws DSQuotaExceededException {
long spaceQuota = getSpaceQuota();
long spaceConsumed = getSpaceConsumed();
if (Quota.isViolated(spaceQuota, spaceConsumed)) {
throw new DSQuotaExceededException(spaceQuota, spaceConsumed);
}
} | 3.68 |
framework_AbsoluteLayout_getCSSString | /**
* Converts the internal values into a valid CSS string.
*
* @return A valid CSS string
*/
public String getCSSString() {
String s = "";
if (topValue != null) {
s += "top:" + topValue + topUnits.getSymbol() + ";";
}
if (rightValue != null) {
s += "right:" + rightValue + rightUnits.getSymbol() + ";";
}
if (bottomValue != null) {
s += "bottom:" + bottomValue + bottomUnits.getSymbol() + ";";
}
if (leftValue != null) {
s += "left:" + leftValue + leftUnits.getSymbol() + ";";
}
if (zIndex >= 0) {
s += "z-index:" + zIndex + ";";
}
return s;
} | 3.68 |
flink_HiveCatalog_retrieveFlinkProperties | /** Filter out Hive-created properties, and return Flink-created properties. */
private static Map<String, String> retrieveFlinkProperties(
Map<String, String> hiveTableParams) {
return hiveTableParams.entrySet().stream()
.filter(e -> e.getKey().startsWith(FLINK_PROPERTY_PREFIX))
.collect(
Collectors.toMap(
e -> e.getKey().substring(FLINK_PROPERTY_PREFIX.length()),
e -> e.getValue()));
} | 3.68 |
hbase_TableRegionModel_setName | /**
* @param name region printable name
*/
public void setName(String name) {
String split[] = name.split(",");
this.table = split[0];
this.startKey = Bytes.toBytes(split[1]);
String tail = split[2];
split = tail.split("\\.");
id = Long.parseLong(split[0]);
} | 3.68 |
flink_IOUtils_readFully | /**
* Reads len bytes in a loop.
*
* @param in The InputStream to read from
* @param buf The buffer to fill
* @param off offset from the buffer
* @param len the length of bytes to read
* @throws IOException if it could not read requested number of bytes for any reason (including
* EOF)
*/
public static void readFully(final InputStream in, final byte[] buf, int off, final int len)
throws IOException {
int toRead = len;
while (toRead > 0) {
final int ret = in.read(buf, off, toRead);
if (ret < 0) {
throw new IOException("Premature EOF from inputStream");
}
toRead -= ret;
off += ret;
}
} | 3.68 |
flink_ListElement_list | /**
* Creates a list with blocks of text. For example:
*
* <pre>{@code
* .list(
* text("this is first element of list"),
* text("this is second element of list with a %s", link("https://link"))
* )
* }</pre>
*
* @param elements list of this list entries
* @return list representation
*/
public static ListElement list(InlineElement... elements) {
return new ListElement(Arrays.asList(elements));
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.