name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbstractS3ACommitter_getJobCommitThreadCount | /**
* Get the thread count for this job's commit operations.
* @param context the JobContext for this commit
* @return a possibly zero thread count.
*/
private int getJobCommitThreadCount(final JobContext context) {
return context.getConfiguration().getInt(
FS_S3A_COMMITTER_THREADS,
DEFAULT_COMMITTER_THREADS);
} | 3.68 |
flink_FileSystem_initialize | /**
* Initializes the shared file system settings.
*
* <p>The given configuration is passed to each file system factory to initialize the respective
* file systems. Because the configuration of file systems may be different subsequent to the
* call of this method, this method clears the file system instance cache.
*
* <p>This method also reads the default file system URI from the configuration key {@link
* CoreOptions#DEFAULT_FILESYSTEM_SCHEME}. All calls to {@link FileSystem#get(URI)} where the
* URI has no scheme will be interpreted as relative to that URI. As an example, assume the
* default file system URI is set to {@code 'hdfs://localhost:9000/'}. A file path of {@code
* '/user/USERNAME/in.txt'} is interpreted as {@code
* 'hdfs://localhost:9000/user/USERNAME/in.txt'}.
*
* @param config the configuration from where to fetch the parameter.
* @param pluginManager optional plugin manager that is used to initialized filesystems provided
* as plugins.
*/
public static void initialize(Configuration config, @Nullable PluginManager pluginManager)
throws IllegalConfigurationException {
LOCK.lock();
try {
// make sure file systems are re-instantiated after re-configuration
CACHE.clear();
FS_FACTORIES.clear();
Collection<Supplier<Iterator<FileSystemFactory>>> factorySuppliers = new ArrayList<>(2);
factorySuppliers.add(() -> ServiceLoader.load(FileSystemFactory.class).iterator());
if (pluginManager != null) {
factorySuppliers.add(
() ->
Iterators.transform(
pluginManager.load(FileSystemFactory.class),
PluginFileSystemFactory::of));
}
final List<FileSystemFactory> fileSystemFactories =
loadFileSystemFactories(factorySuppliers);
// configure all file system factories
for (FileSystemFactory factory : fileSystemFactories) {
factory.configure(config);
String scheme = factory.getScheme();
FileSystemFactory fsf =
ConnectionLimitingFactory.decorateIfLimited(factory, scheme, config);
FS_FACTORIES.put(scheme, fsf);
}
// configure the default (fallback) factory
FALLBACK_FACTORY.configure(config);
// also read the default file system scheme
final String stringifiedUri =
config.getString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, null);
if (stringifiedUri == null) {
defaultScheme = null;
} else {
try {
defaultScheme = new URI(stringifiedUri);
} catch (URISyntaxException e) {
throw new IllegalConfigurationException(
"The default file system scheme ('"
+ CoreOptions.DEFAULT_FILESYSTEM_SCHEME
+ "') is invalid: "
+ stringifiedUri,
e);
}
}
ALLOWED_FALLBACK_FILESYSTEMS.clear();
final Iterable<String> allowedFallbackFilesystems =
Splitter.on(';')
.omitEmptyStrings()
.trimResults()
.split(config.getString(CoreOptions.ALLOWED_FALLBACK_FILESYSTEMS));
allowedFallbackFilesystems.forEach(ALLOWED_FALLBACK_FILESYSTEMS::add);
} finally {
LOCK.unlock();
}
} | 3.68 |
framework_AbstractSelect_addPropertySetChangeListener | /**
* Adds a new Property set change listener for this Container.
*
* @see Container.PropertySetChangeNotifier#addListener(Container.PropertySetChangeListener)
*/
@Override
public void addPropertySetChangeListener(
Container.PropertySetChangeListener listener) {
if (propertySetEventListeners == null) {
propertySetEventListeners = new LinkedHashSet<PropertySetChangeListener>();
}
propertySetEventListeners.add(listener);
} | 3.68 |
hadoop_FSTreeTraverser_traverseDir | /**
* Iterate through all files directly inside parent, and recurse down
* directories. The listing is done in batch, and can optionally start after
* a position. The iteration of the inode tree is done in a depth-first
* fashion. But instead of holding all {@link INodeDirectory}'s in memory
* on the fly, only the path components to the current inode is held. This
* is to reduce memory consumption.
*
* @param parent
* The inode id of parent directory
* @param startId
* Id of the start inode.
* @param startAfter
* Full path of a file the traverse should start after.
* @param traverseInfo
* info which may required for processing the child's.
* @throws IOException
* @throws InterruptedException
*/
protected void traverseDir(final INodeDirectory parent, final long startId,
byte[] startAfter, final TraverseInfo traverseInfo)
throws IOException, InterruptedException {
List<byte[]> startAfters = new ArrayList<>();
if (parent == null) {
return;
}
INode curr = parent;
// construct startAfters all the way up to the zone inode.
startAfters.add(startAfter);
while (curr.getId() != startId) {
startAfters.add(0, curr.getLocalNameBytes());
curr = curr.getParent();
}
curr = traverseDirInt(startId, parent, startAfters, traverseInfo);
while (!startAfters.isEmpty()) {
if (curr == null) {
// lock was reacquired, re-resolve path.
curr = resolvePaths(startId, startAfters);
}
curr = traverseDirInt(startId, curr, startAfters, traverseInfo);
}
} | 3.68 |
flink_DateTimeUtils_subtractMonths | /**
* Finds the number of months between two dates, each represented as the number of days since
* the epoch.
*/
public static int subtractMonths(int date0, int date1) {
if (date0 < date1) {
return -subtractMonths(date1, date0);
}
// Start with an estimate.
// Since no month has more than 31 days, the estimate is <= the true value.
int m = (date0 - date1) / 31;
while (true) {
int date2 = addMonths(date1, m);
if (date2 >= date0) {
return m;
}
int date3 = addMonths(date1, m + 1);
if (date3 > date0) {
return m;
}
++m;
}
} | 3.68 |
hbase_RestoreSnapshotProcedure_updateMETA | /**
* Apply changes to hbase:meta
**/
private void updateMETA(final MasterProcedureEnv env) throws IOException {
try {
Connection conn = env.getMasterServices().getConnection();
RegionStateStore regionStateStore = env.getAssignmentManager().getRegionStateStore();
int regionReplication = modifiedTableDescriptor.getRegionReplication();
// 1. Prepare to restore
getMonitorStatus().setStatus("Preparing to restore each region");
// 2. Applies changes to hbase:meta and in-memory states
// (2.1). Removes the current set of regions from META and in-memory states
//
// By removing also the regions to restore (the ones present both in the snapshot
// and in the current state) we ensure that no extra fields are present in META
// e.g. with a simple add addRegionToMeta() the splitA and splitB attributes
// not overwritten/removed, so you end up with old informations
// that are not correct after the restore.
if (regionsToRemove != null) {
regionStateStore.deleteRegions(regionsToRemove);
deleteRegionsFromInMemoryStates(regionsToRemove, env, regionReplication);
}
// (2.2). Add the new set of regions to META and in-memory states
//
// At this point the old regions are no longer present in META.
// and the set of regions present in the snapshot will be written to META.
// All the information in hbase:meta are coming from the .regioninfo of each region present
// in the snapshot folder.
if (regionsToAdd != null) {
MetaTableAccessor.addRegionsToMeta(conn, regionsToAdd, regionReplication);
addRegionsToInMemoryStates(regionsToAdd, env, regionReplication);
}
if (regionsToRestore != null) {
regionStateStore.overwriteRegions(regionsToRestore, regionReplication);
deleteRegionsFromInMemoryStates(regionsToRestore, env, regionReplication);
addRegionsToInMemoryStates(regionsToRestore, env, regionReplication);
}
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(modifiedTableDescriptor,
parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(conn, regionsToAdd);
// At this point the restore is complete.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " on table=" + getTableName() + " completed!");
} catch (IOException e) {
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " failed in meta update. Try re-running the restore command.";
LOG.error(msg, e);
monitorException
.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
monitorStatus.markComplete("Restore snapshot '" + snapshot.getName() + "'!");
MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
metricsSnapshot
.addSnapshotRestore(monitorStatus.getCompletionTimestamp() - monitorStatus.getStartTime());
} | 3.68 |
flink_Tuple14_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
} | 3.68 |
flink_MathUtils_roundUpToPowerOfTwo | /**
* Round the given number to the next power of two.
*
* @param x number to round
* @return x rounded up to the next power of two
*/
public static int roundUpToPowerOfTwo(int x) {
x = x - 1;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return x + 1;
} | 3.68 |
flink_JoinOperator_withPartitioner | /**
* Sets a custom partitioner for this join. The partitioner will be called on the join keys to
* determine the partition a key should be assigned to. The partitioner is evaluated on both
* join inputs in the same way.
*
* <p>NOTE: A custom partitioner can only be used with single-field join keys, not with
* composite join keys.
*
* @param partitioner The custom partitioner to be used.
* @return This join operator, to allow for function chaining.
*/
public JoinOperator<I1, I2, OUT> withPartitioner(Partitioner<?> partitioner) {
if (partitioner != null) {
keys1.validateCustomPartitioner(partitioner, null);
keys2.validateCustomPartitioner(partitioner, null);
}
this.customPartitioner = getInput1().clean(partitioner);
return this;
} | 3.68 |
flink_SubtaskStateStats_getStateSize | /**
* Returns the size of the checkpointed state at this subtask.
*
* @return Checkpoint state size of the sub task.
*/
public long getStateSize() {
return stateSize;
} | 3.68 |
flink_DataStatistics_getBaseStatistics | /**
* Gets the base statistics for the input identified by the given identifier.
*
* @param inputIdentifier The identifier for the input.
* @return The statistics that were cached for this input.
*/
public BaseStatistics getBaseStatistics(String inputIdentifier) {
synchronized (this.baseStatisticsCache) {
return this.baseStatisticsCache.get(inputIdentifier);
}
} | 3.68 |
hadoop_ChainReducer_addMapper | /**
* Adds a {@link Mapper} class to the chain reducer.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value For the added Mapper the configuration given for it,
* <code>mapperConf</code>, have precedence over the job's Configuration. This
* precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the
* chain.
* </p>
*
* @param job
* The job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void addMapper(Job job, Class<? extends Mapper> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration mapperConf) throws IOException {
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.addMapper(false, job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, mapperConf);
} | 3.68 |
hadoop_JsonSerialization_toBytes | /**
* Convert JSON to bytes.
* @param instance instance to convert
* @return a byte array
* @throws IOException IO problems
*/
public byte[] toBytes(T instance) throws IOException {
return mapper.writeValueAsBytes(instance);
} | 3.68 |
flink_StateMap_isEmpty | /**
* Returns whether this {@link StateMap} is empty.
*
* @return {@code true} if this {@link StateMap} has no elements, {@code false} otherwise.
* @see #size()
*/
public boolean isEmpty() {
return size() == 0;
} | 3.68 |
hadoop_WebHdfs_createWebHdfsFileSystem | /**
* Returns a new {@link WebHdfsFileSystem}, with the given configuration.
*
* @param conf configuration
* @return new WebHdfsFileSystem
*/
private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) {
WebHdfsFileSystem fs = new WebHdfsFileSystem();
fs.setConf(conf);
return fs;
} | 3.68 |
open-banking-gateway_ProtocolSelector_selectProtocolFor | /**
* Selects protocol service into internal context
* @param ctx Facade context
* @param protocolAction Protocol action to execute
* @param actionBeans Available beans for this action.
* @param <REQUEST> Request being executed
* @param <ACTION> Action associated
* @return Internal context with protocol service to handle the request.
*/
@Transactional
public <REQUEST, ACTION> Optional<InternalContext<REQUEST, ACTION>> selectProtocolFor(
InternalContext<REQUEST, ACTION> ctx,
ProtocolAction protocolAction,
Map<String, ? extends ACTION> actionBeans) {
Optional<BankAction> bankAction;
if (null == ctx.getAuthSession()) {
bankAction = bankActionRepository.findByBankProfileUuidAndAction(
ctx.getServiceCtx().getBankProfileId(),
protocolAction
);
} else {
Long id = isForAuthorization(protocolAction) ? ctx.getServiceCtx().getAuthorizationBankProtocolId() : ctx.getServiceCtx().getServiceBankProtocolId();
bankAction = bankActionRepository.findById(id);
}
return bankAction
.map(action -> {
ACTION actionBean = findActionBean(action, actionBeans, protocolAction);
return ctx.toBuilder()
.serviceCtx(ctx.getServiceCtx().toBuilder().serviceBankProtocolId(action.getId()).build())
.action(actionBean)
.build();
});
} | 3.68 |
framework_VTree_sendSelectionToServer | /**
* Sends the selection to the server
*/
private void sendSelectionToServer() {
Command command = new Command() {
@Override
public void execute() {
/*
* we should send selection to server immediately in 2 cases: 1)
* 'immediate' property of Tree is true 2) clickEventPending is
* true
*/
client.updateVariable(paintableId, "selected",
selectedIds.toArray(new String[selectedIds.size()]),
clickEventPending || immediate);
clickEventPending = false;
selectionHasChanged = false;
}
};
/*
* Delaying the sending of the selection in webkit to ensure the
* selection is always sent when the tree has focus and after click
* events have been processed. This is due to the focusing
* implementation in FocusImplSafari which uses timeouts when focusing
* and blurring.
*/
if (BrowserInfo.get().isWebkit()) {
Scheduler.get().scheduleDeferred(command);
} else {
command.execute();
}
} | 3.68 |
morf_DataSetProducerAdapter_close | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
delegate.close();
} | 3.68 |
flink_JoinOperator_projectTuple10 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>
ProjectJoin<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>
projectTuple10() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> tType =
new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes);
return new ProjectJoin<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
framework_VAccordion_getHeight | /**
* Returns the offset height of this stack item.
*
* @return the height in pixels
*
* @deprecated This method is not called by the framework code anymore.
* Use {@link #getOffsetHeight()} instead.
*/
@Deprecated
public int getHeight() {
return getOffsetHeight();
} | 3.68 |
hadoop_S3ARemoteInputStream_getIOStatistics | /**
* Gets the internal IO statistics.
*
* @return the internal IO statistics.
*/
@Override
public IOStatistics getIOStatistics() {
return ioStatistics;
} | 3.68 |
hbase_RegionLocator_getStartEndKeys | /**
* Gets the starting and ending row keys for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
* @return Pair of arrays of region starting and ending row keys
* @throws IOException if a remote or network exception occurs
*/
default Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
List<HRegionLocation> regions = getAllRegionLocations().stream()
.filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion()))
.collect(Collectors.toList());
byte[][] startKeys = new byte[regions.size()][];
byte[][] endKeys = new byte[regions.size()][];
for (int i = 0, n = regions.size(); i < n; i++) {
RegionInfo region = regions.get(i).getRegion();
startKeys[i] = region.getStartKey();
endKeys[i] = region.getEndKey();
}
return Pair.newPair(startKeys, endKeys);
} | 3.68 |
flink_ColumnStats_getMaxValue | /**
* Deprecated because Number type max/min is not well supported comparable type, e.g. {@link
* java.util.Date}, {@link java.sql.Timestamp}.
*
* <p>Returns null if this instance is constructed by {@link ColumnStats.Builder}.
*/
@Deprecated
public Number getMaxValue() {
return maxValue;
} | 3.68 |
framework_VAbstractPopupCalendar_setDescriptionForAssistiveDevices | /**
* Set a description that explains the usage of the Widget for users of
* assistive devices.
*
* @param descriptionForAssistiveDevices
* String with the description
*/
public void setDescriptionForAssistiveDevices(
String descriptionForAssistiveDevices) {
descriptionForAssistiveDevicesElement
.setInnerText(descriptionForAssistiveDevices);
} | 3.68 |
framework_ViewChangeListener_afterViewChange | /**
* Invoked after the view is changed. If a <code>beforeViewChange</code>
* method blocked the view change, this method is not called. Be careful of
* unbounded recursion if you decide to change the view again in the
* listener.
* <p>
* By default it does nothing. Override it in your listener if you need this
* functionality.
*
* @param event
* view change event
*/
public default void afterViewChange(ViewChangeEvent event) {
} | 3.68 |
flink_SlotID_getDynamicSlotID | /** Get a SlotID without actual slot index for dynamic slot allocation. */
public static SlotID getDynamicSlotID(ResourceID resourceID) {
return new SlotID(resourceID);
} | 3.68 |
hadoop_JournalProtocolServerSideTranslatorPB_startLogSegment | /** @see JournalProtocol#startLogSegment */
@Override
public StartLogSegmentResponseProto startLogSegment(RpcController controller,
StartLogSegmentRequestProto req) throws ServiceException {
try {
impl.startLogSegment(PBHelper.convert(req.getJournalInfo()),
req.getEpoch(), req.getTxid());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_START_LOG_SEGMENT_RESPONSE;
} | 3.68 |
flink_MailboxMetricsController_startLatencyMeasurement | /**
* Starts mailbox latency measurement. This requires setup of latency measurement via {@link
* MailboxMetricsController#setupLatencyMeasurement(TimerService, MailboxExecutor)}. Latency is
* measured through execution of a mail that is triggered by default in the interval defined by
* {@link MailboxMetricsController#defaultLatencyMeasurementInterval}.
*
* <p>Note: For each instance, latency measurement can be started only once.
*/
public void startLatencyMeasurement() {
checkState(!isLatencyMeasurementStarted(), "latency measurement has already been started");
checkState(
isLatencyMeasurementSetup(),
"timer service and mailbox executor must be setup for latency measurement");
scheduleLatencyMeasurement();
started = true;
} | 3.68 |
hbase_ParseFilter_createComparator | /**
* Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator
* <p>
* @param comparator the comparator in the form comparatorType:comparatorValue
* @return the parsed comparator
*/
public static ByteArrayComparable createComparator(byte[] comparator) {
if (comparator == null) throw new IllegalArgumentException("Incorrect Comparator");
byte[][] parsedComparator = ParseFilter.parseComparator(comparator);
byte[] comparatorType = parsedComparator[0];
byte[] comparatorValue = parsedComparator[1];
if (Bytes.equals(comparatorType, ParseConstants.binaryType))
return new BinaryComparator(comparatorValue);
else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType))
return new BinaryPrefixComparator(comparatorValue);
else if (Bytes.equals(comparatorType, ParseConstants.regexStringType))
return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8));
else if (Bytes.equals(comparatorType, ParseConstants.regexStringNoCaseType))
return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8),
Pattern.CASE_INSENSITIVE | Pattern.DOTALL);
else if (Bytes.equals(comparatorType, ParseConstants.substringType))
return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8));
else throw new IllegalArgumentException("Incorrect comparatorType");
} | 3.68 |
hudi_HoodieTable_getCompletedCommitTimeline | /**
* Get only the completed (no-inflights) commit timeline.
*/
public HoodieTimeline getCompletedCommitTimeline() {
return metaClient.getCommitTimeline().filterCompletedInstants();
} | 3.68 |
hbase_ClusterMetrics_getAverageLoad | /** Returns the average cluster load */
default double getAverageLoad() {
int serverSize = getLiveServerMetrics().size();
if (serverSize == 0) {
return 0;
}
return (double) getRegionCount() / (double) serverSize;
} | 3.68 |
framework_StringToFloatConverter_convertToModel | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object,
* java.util.Locale)
*/
@Override
public Float convertToModel(String value, Class<? extends Float> targetType,
Locale locale) throws ConversionException {
Number n = convertToNumber(value, targetType, locale);
return n == null ? null : n.floatValue();
} | 3.68 |
hbase_Compressor_uncompressIntoArray | /**
* Reads a compressed entry into an array. The output into the array ends up length-prefixed.
* @param to the array to write into
* @param offset array offset to start writing to
* @param in the DataInput to read from
* @param dict the dictionary to use for compression
* @return the length of the uncompressed data
*/
@Deprecated
static int uncompressIntoArray(byte[] to, int offset, DataInput in, Dictionary dict)
throws IOException {
byte status = in.readByte();
if (status == Dictionary.NOT_IN_DICTIONARY) {
// status byte indicating that data to be read is not in dictionary.
// if this isn't in the dictionary, we need to add to the dictionary.
int length = WritableUtils.readVInt(in);
in.readFully(to, offset, length);
dict.addEntry(to, offset, length);
return length;
} else {
// the status byte also acts as the higher order byte of the dictionary
// entry
short dictIdx = toShort(status, in.readByte());
byte[] entry;
try {
entry = dict.getEntry(dictIdx);
} catch (Exception ex) {
throw new IOException("Unable to uncompress the log entry", ex);
}
if (entry == null) {
throw new IOException("Missing dictionary entry for index " + dictIdx);
}
// now we write the uncompressed value.
Bytes.putBytes(to, offset, entry, 0, entry.length);
return entry.length;
}
} | 3.68 |
morf_Function_rightTrim | /**
* Helper method to create an instance of the "rightTrim" SQL function,
* which will result in argument having trailing spaces removed.
*
* @param expression the field to evaluate.
* @return an instance of the rightTrim function.
*/
public static Function rightTrim(AliasedField expression) {
return new Function(FunctionType.RIGHT_TRIM, expression);
} | 3.68 |
framework_LoginForm_getLoginButtonCaption | /**
* Gets the caption set with {@link #setLoginButtonCaption(String)}. Note
* that this method might not match what is shown to the user if
* {@link #createLoginButton()} has been overridden.
*
* @return the login button caption
*/
public String getLoginButtonCaption() {
return loginButtonCaption;
} | 3.68 |
framework_AbstractMultiSelect_addSelectionListener | /**
* Adds a selection listener that will be called when the selection is
* changed either by the user or programmatically.
*
* @param listener
* the value change listener, not {@code null}
* @return a registration for the listener
*/
@Override
public Registration addSelectionListener(
MultiSelectionListener<T> listener) {
return addListener(MultiSelectionEvent.class, listener,
MultiSelectionListener.SELECTION_CHANGE_METHOD);
} | 3.68 |
morf_DeleteStatementBuilder_getWhereCriterion | /**
* Gets the where criteria.
*
* @return the where criteria
*/
Criterion getWhereCriterion() {
return whereCriterion;
} | 3.68 |
hadoop_PlacementConstraints_targetIn | /**
* Creates a constraint that requires allocations to be placed on nodes that
* satisfy all target expressions within the given scope (e.g., node or rack).
*
* For example, {@code targetIn(RACK, allocationTag("hbase-m"))}, allows
* allocations on nodes that belong to a rack that has at least one tag with
* value "hbase-m".
*
* @param scope the scope within which the target expressions should be
* satisfied
* @param targetExpressions the expressions that need to be satisfied within
* the scope
* @return the resulting placement constraint
*/
public static AbstractConstraint targetIn(String scope,
TargetExpression... targetExpressions) {
return new SingleConstraint(scope, 1, Integer.MAX_VALUE, targetExpressions);
} | 3.68 |
dubbo_ApolloDynamicConfiguration_getInternalProperty | /**
* This method will be used by Configuration to get valid value at runtime.
* The group is expected to be 'app level', which can be fetched from the 'config.appnamespace' in url if necessary.
* But I think Apollo's inheritance feature of namespace can solve the problem .
*/
@Override
public String getInternalProperty(String key) {
return dubboConfig.getProperty(key, null);
} | 3.68 |
framework_VMenuBar_hideChildMenu | /**
* Hides the submenu of an item.
*
* @param item
*/
public void hideChildMenu(CustomMenuItem item) {
if (visibleChildMenu != null && visibleChildMenu != item.getSubMenu()) {
popup.hide();
}
} | 3.68 |
flink_MemorySize_getBytes | /** Gets the memory size in bytes. */
public long getBytes() {
return bytes;
} | 3.68 |
rocketmq-connect_Serdes_ByteBuffer | /**
* A serde for nullable {@code ByteBuffer} type.
*/
static public Serde<ByteBuffer> ByteBuffer() {
return new ByteBufferSerde();
} | 3.68 |
flink_DecimalData_copy | /** Returns a copy of this {@link DecimalData} object. */
public DecimalData copy() {
return new DecimalData(precision, scale, longVal, decimalVal);
} | 3.68 |
framework_LogSection_maybeScroll | /**
* Schedules a scoll if scroll lock is not active.
*/
private void maybeScroll() {
if (scrollTimer != null) {
scrollTimer.cancel();
scrollTimer.schedule(SCROLL_DELAY);
}
} | 3.68 |
hadoop_DirectBufferPool_returnBuffer | /**
* Return a buffer into the pool. After being returned,
* the buffer may be recycled, so the user must not
* continue to use it in any way.
* @param buf the buffer to return
*/
public void returnBuffer(ByteBuffer buf) {
buf.clear(); // reset mark, limit, etc
int size = buf.capacity();
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
list = new ConcurrentLinkedQueue<WeakReference<ByteBuffer>>();
Queue<WeakReference<ByteBuffer>> prev = buffersBySize.putIfAbsent(size, list);
// someone else put a queue in the map before we did
if (prev != null) {
list = prev;
}
}
list.add(new WeakReference<ByteBuffer>(buf));
} | 3.68 |
hbase_PersistentIOEngine_getFileSize | /**
* Using Linux command du to get file's real size
* @param filePath the file
* @return file's real size
* @throws IOException something happened like file not exists
*/
private static long getFileSize(String filePath) throws IOException {
DU.setExecCommand(filePath);
DU.execute();
String size = DU.getOutput().split("\t")[0];
return StringUtils.isEmpty(size.trim()) ? 0 : Long.parseLong(size);
} | 3.68 |
framework_StringToByteConverter_getFormat | /**
* Returns the format used by
* {@link #convertToPresentation(Byte, Class, Locale)} and
* {@link #convertToModel(String, Class, Locale)}.
*
* @param locale
* The locale to use
* @return A NumberFormat instance
*/
@Override
protected NumberFormat getFormat(Locale locale) {
if (locale == null) {
locale = Locale.getDefault();
}
return NumberFormat.getIntegerInstance(locale);
} | 3.68 |
framework_Upload_setButtonCaptionAsHtml | /**
* In addition to the actual file chooser, upload components have button
* that starts actual upload progress. This method is used to set whether
* the caption on that button is rendered as HTML.
* <p>
* If set to {@code true}, the caption is rendered in the browser as HTML
* and the developer is responsible for ensuring no harmful HTML is used. If
* set to {@code false}, the caption is rendered in the browser as plain
* text.
* <p>
* The default is {@code false}, i.e. to render the caption as plain text.
*
* @param buttonCaptionAsHtml
* {@code true} if the caption is rendered as HTML, {@code false}
* if rendered as plain text
* @since 8.11
*/
public void setButtonCaptionAsHtml(boolean buttonCaptionAsHtml) {
getState().buttonCaptionAsHtml = buttonCaptionAsHtml;
} | 3.68 |
flink_SkipListUtils_putValuePointer | /**
* Puts the value pointer to key space.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
* @param valuePointer the value pointer.
*/
public static void putValuePointer(MemorySegment memorySegment, int offset, long valuePointer) {
memorySegment.putLong(offset + VALUE_POINTER_OFFSET, valuePointer);
} | 3.68 |
hbase_CellCreator_getVisibilityExpressionResolver | /** Returns Visibility expression resolver */
public VisibilityExpressionResolver getVisibilityExpressionResolver() {
return this.visExpResolver;
} | 3.68 |
pulsar_MBeanStatsGenerator_createMetricsByDimension | /**
* Creates a MBean dimension key for metrics.
*
* @param objectName
* @return
*/
private Metrics createMetricsByDimension(ObjectName objectName) {
Map<String, String> dimensionMap = new HashMap<>();
dimensionMap.put("MBean", objectName.toString());
// create with current version
return Metrics.create(dimensionMap);
} | 3.68 |
rocketmq-connect_Serdes_ByteArray | /**
* A serde for nullable {@code byte[]} type.
*/
static public Serde<byte[]> ByteArray() {
return new ByteArraySerde();
} | 3.68 |
framework_Margins_getHorizontal | /**
* Returns the combined width of the left and the right margins.
*
* @return the sum of the left and the right margins (in pixels)
*/
public int getHorizontal() {
return horizontal;
} | 3.68 |
framework_Window_setAssistivePostfix | /**
* Sets the accessibility postfix for the window caption.
*
* This postfix is read to assistive device users after the window caption,
* but not visible on the page.
*
* @param assistivePostfix
* String that is placed after the window caption
*/
public void setAssistivePostfix(String assistivePostfix) {
getState().assistivePostfix = assistivePostfix;
} | 3.68 |
flink_TypeExtractionUtils_getRawClass | /**
* Returns the raw class of both parameterized types and generic arrays. Returns
* java.lang.Object for all other types.
*/
public static Class<?> getRawClass(Type t) {
if (isClassType(t)) {
return typeToClass(t);
} else if (t instanceof GenericArrayType) {
Type component = ((GenericArrayType) t).getGenericComponentType();
return Array.newInstance(getRawClass(component), 0).getClass();
}
return Object.class;
} | 3.68 |
hadoop_Lz4Codec_getCompressorType | /**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
return Lz4Compressor.class;
} | 3.68 |
flink_DeduplicateFunctionHelper_processLastRowOnProcTime | /**
* Processes element to deduplicate on keys with process time semantic, sends current element as
* last row, retracts previous element if needed.
*
* @param currentRow latest row received by deduplicate function
* @param generateUpdateBefore whether need to send UPDATE_BEFORE message for updates
* @param state state of function, null if generateUpdateBefore is false
* @param out underlying collector
* @param isStateTtlEnabled whether state ttl is disabled
* @param equaliser the record equaliser used to equal RowData.
*/
static void processLastRowOnProcTime(
RowData currentRow,
boolean generateUpdateBefore,
boolean generateInsert,
ValueState<RowData> state,
Collector<RowData> out,
boolean isStateTtlEnabled,
RecordEqualiser equaliser)
throws Exception {
checkInsertOnly(currentRow);
if (generateUpdateBefore || generateInsert) {
// use state to keep the previous row content if we need to generate UPDATE_BEFORE
// or use to distinguish the first row, if we need to generate INSERT
RowData preRow = state.value();
state.update(currentRow);
if (preRow == null) {
// the first row, send INSERT message
currentRow.setRowKind(RowKind.INSERT);
out.collect(currentRow);
} else {
if (!isStateTtlEnabled && equaliser.equals(preRow, currentRow)) {
// currentRow is the same as preRow and state cleaning is not enabled.
// We do not emit retraction and update message.
// If state cleaning is enabled, we have to emit messages to prevent too early
// state eviction of downstream operators.
return;
} else {
if (generateUpdateBefore) {
preRow.setRowKind(RowKind.UPDATE_BEFORE);
out.collect(preRow);
}
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
}
} else {
// always send UPDATE_AFTER if INSERT is not needed
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
} | 3.68 |
hbase_FileChangeWatcher_stop | /**
* Tells the background thread to stop. Does not wait for it to exit.
*/
public void stop() {
if (compareAndSetState(new State[] { State.RUNNING, State.STARTING }, State.STOPPING)) {
watcherThread.interrupt();
}
} | 3.68 |
hibernate-validator_ConstraintAnnotationVisitor_visitTypeAsEnum | /**
* <p>
* Checks whether the given annotations are correctly specified at the given
* enum type declaration. The following checks are performed:
* </p>
* <ul>
* <li>
* Constraint annotations may at types supported by the constraints.</li>
* <li>
* </ul>
*/
@Override
public Void visitTypeAsEnum(TypeElement e, List<AnnotationMirror> p) {
checkConstraints( e, p );
return null;
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_shardingItemParameters | /**
* Set mapper of sharding items and sharding parameters.
*
* <p>
* sharding item and sharding parameter split by =, multiple sharding items and sharding parameters split by comma, just like map.
* Sharding item start from zero, cannot equal to great than sharding total count.
* For example:
* 0=a,1=b,2=c
* </p>
*
* @param shardingItemParameters mapper of sharding items and sharding parameters
* @return job configuration builder
*/
public Builder shardingItemParameters(final String shardingItemParameters) {
if (null != shardingItemParameters) {
this.shardingItemParameters = shardingItemParameters;
}
return this;
} | 3.68 |
flink_ExecutionJobVertex_getAggregateJobVertexState | /**
* A utility function that computes an "aggregated" state for the vertex.
*
* <p>This state is not used anywhere in the coordination, but can be used for display in
* dashboards to as a summary for how the particular parallel operation represented by this
* ExecutionJobVertex is currently behaving.
*
* <p>For example, if at least one parallel task is failed, the aggregate state is failed. If
* not, and at least one parallel task is cancelling (or cancelled), the aggregate state is
* cancelling (or cancelled). If all tasks are finished, the aggregate state is finished, and so
* on.
*
* @param verticesPerState The number of vertices in each state (indexed by the ordinal of the
* ExecutionState values).
* @param parallelism The parallelism of the ExecutionJobVertex
* @return The aggregate state of this ExecutionJobVertex.
*/
public static ExecutionState getAggregateJobVertexState(
int[] verticesPerState, int parallelism) {
if (verticesPerState == null || verticesPerState.length != ExecutionState.values().length) {
throw new IllegalArgumentException(
"Must provide an array as large as there are execution states.");
}
if (verticesPerState[ExecutionState.FAILED.ordinal()] > 0) {
return ExecutionState.FAILED;
}
if (verticesPerState[ExecutionState.CANCELING.ordinal()] > 0) {
return ExecutionState.CANCELING;
} else if (verticesPerState[ExecutionState.CANCELED.ordinal()] > 0) {
return ExecutionState.CANCELED;
} else if (verticesPerState[ExecutionState.INITIALIZING.ordinal()] > 0) {
return ExecutionState.INITIALIZING;
} else if (verticesPerState[ExecutionState.RUNNING.ordinal()] > 0) {
return ExecutionState.RUNNING;
} else if (verticesPerState[ExecutionState.FINISHED.ordinal()] > 0) {
return verticesPerState[ExecutionState.FINISHED.ordinal()] == parallelism
? ExecutionState.FINISHED
: ExecutionState.RUNNING;
} else {
// all else collapses under created
return ExecutionState.CREATED;
}
} | 3.68 |
hadoop_MutableQuantiles_setInterval | /**
* Set the rollover interval (in seconds) of the estimator.
*
* @param pIntervalSecs of the estimator.
*/
public synchronized void setInterval(int pIntervalSecs) {
this.intervalSecs = pIntervalSecs;
} | 3.68 |
hbase_HRegion_getRegionServicesForStores | /** Returns store services for this region, to access services required by store level needs */
public RegionServicesForStores getRegionServicesForStores() {
return regionServicesForStores;
} | 3.68 |
flink_SkipListUtils_compareSegmentAndNode | /**
* Compare the first skip list key in the given memory segment with the second skip list key in
* the given node.
*
* @param keySegment memory segment storing the first key.
* @param keyOffset offset of the first key in memory segment.
* @param targetNode the node storing the second key.
* @param spaceAllocator the space allocator.
* @return Returns a negative integer, zero, or a positive integer as the first key is less
* than, equal to, or greater than the second.
*/
static int compareSegmentAndNode(
MemorySegment keySegment,
int keyOffset,
long targetNode,
@Nonnull Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(targetNode));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(targetNode);
MemorySegment targetKeySegment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
int level = getLevel(targetKeySegment, offsetInByteBuffer);
int targetKeyOffset = offsetInByteBuffer + getKeyDataOffset(level);
return SkipListKeyComparator.compareTo(
keySegment, keyOffset, targetKeySegment, targetKeyOffset);
} | 3.68 |
hbase_MasterObserver_postBalanceRSGroup | /**
* Called after a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param groupName group name
* @param request the request sent to the balancer
* @param response the response returned by the balancer
*/
default void postBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, BalanceRequest request, BalanceResponse response) throws IOException {
} | 3.68 |
flink_DefaultDelegationTokenManager_stop | /** Stops re-occurring token obtain task. */
@Override
public void stop() {
LOG.info("Stopping credential renewal");
stopTokensUpdate();
LOG.info("Stopped credential renewal");
} | 3.68 |
hbase_QuotaState_isBypass | /** Returns true if there is no quota information associated to this object */
public synchronized boolean isBypass() {
return globalLimiter == NoopQuotaLimiter.get();
} | 3.68 |
flink_Broker_get | /** Blocking retrieval and removal of the object to share. */
public V get(String key) {
try {
BlockingQueue<V> queue = retrieveSharedQueue(key);
V objToShare = queue.take();
if (!queue.offer(objToShare)) {
throw new RuntimeException(
"Error: Concurrent modification of the broker slot for key '" + key + "'.");
}
return objToShare;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | 3.68 |
hbase_TableState_getState | /** Returns table state */
public State getState() {
return state;
} | 3.68 |
zxing_PDF417Common_getCodeword | /**
* @param symbol encoded symbol to translate to a codeword
* @return the codeword corresponding to the symbol.
*/
public static int getCodeword(int symbol) {
int i = Arrays.binarySearch(SYMBOL_TABLE, symbol & 0x3FFFF);
if (i < 0) {
return -1;
}
return (CODEWORD_TABLE[i] - 1) % NUMBER_OF_CODEWORDS;
} | 3.68 |
pulsar_BatchMessageIdImpl_toMessageIdImpl | // MessageIdImpl is widely used as the key of a hash map, in this case, we should convert the batch message id to
// have the correct hash code.
@Deprecated
public MessageIdImpl toMessageIdImpl() {
return (MessageIdImpl) MessageIdAdvUtils.discardBatch(this);
} | 3.68 |
hadoop_NameCache_initialized | /**
* Mark the name cache as initialized. The use count is no longer tracked
* and the transient map used for initializing the cache is discarded to
* save heap space.
*/
void initialized() {
LOG.info("initialized with " + size() + " entries " + lookups + " lookups");
this.initialized = true;
transientMap.clear();
transientMap = null;
} | 3.68 |
morf_AbstractSqlDialectTest_testDateToYyyymmddHHmmss | /**
* Test that YYYYMMDDHHmmssToDate functionality behaves as expected.
*/
@Test
public void testDateToYyyymmddHHmmss() {
String result = testDialect.getSqlFrom(dateToYyyyMMddHHmmss(field("testField")));
assertEquals(expectedDateToYyyymmddHHmmss(), result);
} | 3.68 |
flink_FutureUtils_handleCompletedFuture | /**
* Method which increments the atomic completion counter and completes or fails the
* WaitingFutureImpl.
*/
private void handleCompletedFuture(Object ignored, Throwable throwable) {
if (throwable == null) {
if (numTotal == numCompleted.incrementAndGet()) {
complete(null);
}
} else {
completeExceptionally(throwable);
}
} | 3.68 |
hadoop_ServerCommand_getAction | /**
* Get server command action.
* @return action code.
*/
public int getAction() {
return this.action;
} | 3.68 |
pulsar_LoadSimulationController_getResourceQuotas | // Recursively acquire all resource quotas by getting the ZK children of the given path and calling this function
// on the children if there are any, or getting the data from this ZNode otherwise.
private void getResourceQuotas(final String path, final ZooKeeper zkClient,
final Map<String, ResourceQuota>[] threadLocalMaps) throws Exception {
final List<String> children = zkClient.getChildren(path, false);
if (children.isEmpty()) {
threadLocalMaps[random.nextInt(clients.length)].put(path, ObjectMapperFactory.getMapper().getObjectMapper()
.readValue(zkClient.getData(path, false, null), ResourceQuota.class));
} else {
for (final String child : children) {
getResourceQuotas(String.format("%s/%s", path, child), zkClient, threadLocalMaps);
}
}
} | 3.68 |
hadoop_StepType_getName | /**
* Returns step type name.
*
* @return String step type name
*/
public String getName() {
return name;
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_generateFavoredNodes | /*
* Generate favored nodes for a set of regions when we know where they are currently hosted.
*/
private Map<RegionInfo, List<ServerName>>
generateFavoredNodes(Map<RegionInfo, ServerName> primaryRSMap) {
Map<RegionInfo, List<ServerName>> generatedFavNodes = new HashMap<>();
Map<RegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
placeSecondaryAndTertiaryRS(primaryRSMap);
for (Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
List<ServerName> favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM);
RegionInfo region = entry.getKey();
ServerName primarySN = entry.getValue();
favoredNodesForRegion
.add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), NON_STARTCODE));
ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region);
if (secondaryAndTertiaryNodes != null) {
favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(),
secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE));
favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(),
secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE));
}
generatedFavNodes.put(region, favoredNodesForRegion);
}
return generatedFavNodes;
} | 3.68 |
streampipes_ElasticsearchApiCallBridge_createRequestIndex | /**
* Creates an RequestIndexer instance.
*
* @param bulkProcessor The instance of BulkProcessor
* @param flushOnCheckpoint If true, the producer will wait until all outstanding action requests have been
* sent to Elasticsearch.
* @param numPendingRequests Number of pending action requests not yet acknowledged by Elasticsearch.
* @return The created RequestIndexer.
*/
public RequestIndexer createRequestIndex(
BulkProcessor bulkProcessor,
boolean flushOnCheckpoint,
AtomicLong numPendingRequests) {
return new BulkProcessorIndexer(bulkProcessor, flushOnCheckpoint, numPendingRequests);
} | 3.68 |
hudi_ClusteringPlanStrategy_checkAndGetClusteringPlanStrategy | /**
* Check if the given class is deprecated.
* If it is, then try to convert it to suitable one and update the write config accordingly.
* @param config write config
* @return class name of clustering plan strategy
*/
public static String checkAndGetClusteringPlanStrategy(HoodieWriteConfig config) {
String className = config.getClusteringPlanStrategyClass();
String sparkSizeBasedClassName = HoodieClusteringConfig.SPARK_SIZED_BASED_CLUSTERING_PLAN_STRATEGY;
String sparkSelectedPartitionsClassName = "org.apache.hudi.client.clustering.plan.strategy.SparkSelectedPartitionsClusteringPlanStrategy";
String sparkRecentDaysClassName = "org.apache.hudi.client.clustering.plan.strategy.SparkRecentDaysClusteringPlanStrategy";
String javaSelectedPartitionClassName = "org.apache.hudi.client.clustering.plan.strategy.JavaRecentDaysClusteringPlanStrategy";
String javaSizeBasedClassName = HoodieClusteringConfig.JAVA_SIZED_BASED_CLUSTERING_PLAN_STRATEGY;
String logStr = "The clustering plan '%s' is deprecated. Please set the plan as '%s' and set '%s' as '%s' to achieve the same behaviour";
if (sparkRecentDaysClassName.equals(className)) {
config.setValue(HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME, ClusteringPlanPartitionFilterMode.RECENT_DAYS.name());
LOG.warn(String.format(logStr, className, sparkSizeBasedClassName, HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key(), ClusteringPlanPartitionFilterMode.RECENT_DAYS.name()));
return sparkSizeBasedClassName;
} else if (sparkSelectedPartitionsClassName.equals(className)) {
config.setValue(HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME, ClusteringPlanPartitionFilterMode.SELECTED_PARTITIONS.name());
LOG.warn(String.format(logStr, className, sparkSizeBasedClassName, HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key(), ClusteringPlanPartitionFilterMode.SELECTED_PARTITIONS.name()));
return sparkSizeBasedClassName;
} else if (javaSelectedPartitionClassName.equals(className)) {
config.setValue(HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME, ClusteringPlanPartitionFilterMode.RECENT_DAYS.name());
LOG.warn(String.format(logStr, className, javaSizeBasedClassName, HoodieClusteringConfig.PLAN_PARTITION_FILTER_MODE_NAME.key(), ClusteringPlanPartitionFilterMode.SELECTED_PARTITIONS.name()));
return javaSizeBasedClassName;
}
return className;
} | 3.68 |
hadoop_Chain_getCurrentKey | /**
* Get the current key.
*
* @return the current key object or null if there isn't one
* @throws IOException
* @throws InterruptedException
*/
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return this.key;
} | 3.68 |
dubbo_HttpHeaderUtil_appendPrefixToAttachRealHeader | /**
* append prefix to rest header distinguish from normal header
*
* @param header
* @return
*/
public static String appendPrefixToAttachRealHeader(String header) {
return RestHeaderEnum.REST_HEADER_PREFIX.getHeader() + header;
} | 3.68 |
hbase_RegionStates_getAssignedRegions | // ============================================================================================
// TODO:
// ============================================================================================
public List<RegionInfo> getAssignedRegions() {
final List<RegionInfo> result = new ArrayList<RegionInfo>();
for (RegionStateNode node : regionsMap.values()) {
if (!node.isInTransition()) {
result.add(node.getRegionInfo());
}
}
return result;
} | 3.68 |
hudi_SparkHoodieHBaseIndex_addShutDownHook | /**
* Since we are sharing the HBaseConnection across tasks in a JVM, make sure the HBaseConnection is closed when JVM
* exits.
*/
private void addShutDownHook() {
if (null == shutdownThread) {
shutdownThread = new Thread(() -> {
try {
hbaseConnection.close();
} catch (Exception e) {
// fail silently for any sort of exception
}
});
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
} | 3.68 |
flink_SharedBuffer_isEmpty | /**
* Checks if there is no elements in the buffer.
*
* @return true if there is no elements in the buffer
* @throws Exception Thrown if the system cannot access the state.
*/
public boolean isEmpty() throws Exception {
return Iterables.isEmpty(eventsBufferCache.asMap().keySet())
&& Iterables.isEmpty(eventsBuffer.keys());
} | 3.68 |
hbase_Append_addColumn | /**
* Add the specified column and value to this Append operation.
* @param family family name
* @param qualifier column qualifier
* @param value value to append to specified column
*/
public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value);
return add(kv);
} | 3.68 |
querydsl_ExpressionUtils_in | /**
* Create a {@code left in right} expression
*
* @param <D> element type
* @param left lhs of expression
* @param right rhs of expression
* @return left in right
*/
public static <D> Predicate in(Expression<D> left, Collection<? extends D> right) {
if (right.size() == 1) {
return eqConst(left, right.iterator().next());
} else {
return predicate(Ops.IN, left, ConstantImpl.create(right));
}
} | 3.68 |
flink_ShadeOptionalChecker_isCommonCompileDependency | /**
* These are compile dependencies that are set up in the root pom. We do not require modules to
* mark these as optional because all modules depend on them anyway; whether they leak through
* or not is therefore irrelevant.
*/
private static boolean isCommonCompileDependency(Dependency dependency) {
return "flink-shaded-force-shading".equals(dependency.getArtifactId())
|| "jsr305".equals(dependency.getArtifactId())
|| "slf4j-api".equals(dependency.getArtifactId());
} | 3.68 |
hudi_CleanPlanner_getPartitionPathsForFullCleaning | /**
* Scan and list all partitions for cleaning.
* @return all partitions paths for the dataset.
*/
private List<String> getPartitionPathsForFullCleaning() {
// Go to brute force mode of scanning all partitions
return FSUtils.getAllPartitionPaths(context, config.getMetadataConfig(), config.getBasePath());
} | 3.68 |
MagicPlugin_UndoRegistry_removeDamage | /**
* Subtract some amount of damage
* @param block The block to remove damage from.
* @return The amount of damage remaining, or null if no damage was removed.
*/
@Nullable
public Double removeDamage(BlockData block) {
double amount = block.getDamage();
if (amount <= 0) return null;
Double currentAmount = breaking.get(block.getId());
if (currentAmount == null) return null;
currentAmount -= amount;
if (currentAmount <= 0) {
removeBreaking(block);
return 0.0;
} else {
breaking.put(block.getId(), currentAmount);
}
return currentAmount;
} | 3.68 |
hbase_StripeStoreFileManager_getStripeCopy | /**
* @param index Index of the stripe we need.
* @return A lazy stripe copy from current stripes.
*/
private final ArrayList<HStoreFile> getStripeCopy(int index) {
List<HStoreFile> stripeCopy = this.stripeFiles.get(index);
ArrayList<HStoreFile> result = null;
if (stripeCopy instanceof ImmutableList<?>) {
result = new ArrayList<>(stripeCopy);
this.stripeFiles.set(index, result);
} else {
result = (ArrayList<HStoreFile>) stripeCopy;
}
return result;
} | 3.68 |
flink_JobGraphGenerator_compileJobGraph | /**
* Translates a {@link org.apache.flink.optimizer.plan.OptimizedPlan} into a {@link
* org.apache.flink.runtime.jobgraph.JobGraph}.
*
* @param program Optimized plan that is translated into a JobGraph.
* @return JobGraph generated from the plan.
*/
public JobGraph compileJobGraph(OptimizedPlan program) {
return compileJobGraph(program, null);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getLocations | /**
* Returns all the Paths where this input-split resides.
*/
@Override
public String[] getLocations() throws IOException {
return inputSplitShim.getLocations();
} | 3.68 |
hadoop_Paths_resetTempFolderCache | /**
* Reset the temp folder cache; useful in tests.
*/
@VisibleForTesting
public static void resetTempFolderCache() {
tempFolders.invalidateAll();
} | 3.68 |
hudi_HoodieHeartbeatClient_stopHeartbeatTimers | /**
* Stops all timers of heartbeats started via this instance of the client.
*
* @throws HoodieException
*/
public void stopHeartbeatTimers() throws HoodieException {
instantToHeartbeatMap.values().stream().filter(this::isHeartbeatStarted).forEach(this::stopHeartbeatTimer);
} | 3.68 |
hbase_QuotaObserverChore_getInitialDelay | /**
* Extracts the initial delay for the chore from the configuration.
* @param conf The configuration object.
* @return The configured chore initial delay or the default value in the given timeunit.
* @see #getTimeUnit(Configuration)
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(QUOTA_OBSERVER_CHORE_DELAY_KEY, QUOTA_OBSERVER_CHORE_DELAY_DEFAULT);
} | 3.68 |
flink_MemoryManager_release | /**
* Tries to release many memory segments together.
*
* <p>The segment is only freed and made eligible for reclamation by the GC. Each segment will
* be returned to the memory pool, increasing its available limit for the later allocations.
*
* @param segments The segments to be released.
*/
public void release(Collection<MemorySegment> segments) {
if (segments == null) {
return;
}
Preconditions.checkState(!isShutDown, "Memory manager has been shut down.");
// since concurrent modifications to the collection
// can disturb the release, we need to try potentially multiple times
boolean successfullyReleased = false;
do {
// We could just pre-sort the segments by owner and release them in a loop by owner.
// It would simplify the code but require this additional step and memory for the sorted
// map of segments by owner.
// Current approach is more complicated but it traverses the input segments only once
// w/o any additional buffer.
// Later, we can check whether the simpler approach actually leads to any performance
// penalty and
// if not, we can change it to the simpler approach for the better readability.
Iterator<MemorySegment> segmentsIterator = segments.iterator();
try {
MemorySegment segment = null;
while (segment == null && segmentsIterator.hasNext()) {
segment = segmentsIterator.next();
}
while (segment != null) {
segment = releaseSegmentsForOwnerUntilNextOwner(segment, segmentsIterator);
}
segments.clear();
// the only way to exit the loop
successfullyReleased = true;
} catch (ConcurrentModificationException | NoSuchElementException e) {
// this may happen in the case where an asynchronous
// call releases the memory. fall through the loop and try again
}
} while (!successfullyReleased);
} | 3.68 |
querydsl_DateTimeExpression_month | /**
* Create a month expression (range 1-12 / JAN-DEC)
*
* @return month
*/
public NumberExpression<Integer> month() {
if (month == null) {
month = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.MONTH, mixin);
}
return month;
} | 3.68 |
framework_ApplicationConnection_getActiveConnector | /**
* Gets the active connector for the focused element in the browser.
*
* @return the connector for the focused element or <code>null</code> if
* none found or no element is focused.
*/
private ComponentConnector getActiveConnector() {
Element focusedElement = WidgetUtil.getFocusedElement();
if (focusedElement == null) {
return null;
}
return Util.getConnectorForElement(this, RootPanel.get(),
focusedElement);
} | 3.68 |
hbase_MultiResponse_getException | /** Returns the exception for the region, if any. Null otherwise. */
public Throwable getException(byte[] regionName) {
return exceptions.get(regionName);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.