name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_Calendar_getEndOfDay | /**
* Calculates the end time of the day using the given calendar and date
*
* @param date
* @param calendar
* the calendar instance to be used in the calculation. The given
* instance is unchanged in this operation.
* @return the given date, with time set to the end of the day
*/
private static Date getEndOfDay(java.util.Calendar calendar, Date date) {
java.util.Calendar calendarClone = (java.util.Calendar) calendar
.clone();
calendarClone.setTime(date);
calendarClone.set(java.util.Calendar.MILLISECOND,
calendarClone.getActualMaximum(java.util.Calendar.MILLISECOND));
calendarClone.set(java.util.Calendar.SECOND,
calendarClone.getActualMaximum(java.util.Calendar.SECOND));
calendarClone.set(java.util.Calendar.MINUTE,
calendarClone.getActualMaximum(java.util.Calendar.MINUTE));
calendarClone.set(java.util.Calendar.HOUR,
calendarClone.getActualMaximum(java.util.Calendar.HOUR));
calendarClone.set(java.util.Calendar.HOUR_OF_DAY,
calendarClone.getActualMaximum(java.util.Calendar.HOUR_OF_DAY));
return calendarClone.getTime();
} | 3.68 |
framework_Table_setRowGenerator | /**
* Assigns a row generator to the table. The row generator will be able to
* replace rows in the table when it is rendered.
*
* @param generator
* the new row generator
*/
public void setRowGenerator(RowGenerator generator) {
rowGenerator = generator;
refreshRowCache();
} | 3.68 |
morf_SqlScriptExecutor_withParameterMetadata | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.QueryBuilder#withParameterMetadata(java.lang.Iterable)
*/
@Override
public QueryBuilder withParameterMetadata(Iterable<SqlParameter> parameterMetadata) {
this.parameterMetadata = parameterMetadata;
return this;
} | 3.68 |
hbase_SingleColumnValueFilter_getFamily | /** Returns the family */
public byte[] getFamily() {
return columnFamily;
} | 3.68 |
hbase_CommonFSUtils_getTableName | /**
* Returns the {@link org.apache.hadoop.hbase.TableName} object representing the table directory
* under path rootdir
* @param tablePath path of table
* @return {@link org.apache.hadoop.fs.Path} for table
*/
public static TableName getTableName(Path tablePath) {
return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
} | 3.68 |
open-banking-gateway_ProtocolFacingConsentImpl_getConsentCache | /**
* Returns cached data (i.e. transaction list) related to the consent.
*/
@Override
public String getConsentCache() {
return consent.getCache(encryptionService);
} | 3.68 |
hbase_Scan_setAsyncPrefetch | /**
* @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async
* client, the implementation is always 'async prefetch', so this flag is useless now.
*/
@Deprecated
public Scan setAsyncPrefetch(boolean asyncPrefetch) {
this.asyncPrefetch = asyncPrefetch;
return this;
} | 3.68 |
hadoop_NMTokenCache_getNMToken | /**
* Returns NMToken, null if absent. Only the singleton obtained from
* {@link #getSingleton()} is looked at for the tokens. If you are using your
* own NMTokenCache that is different from the singleton, use
* {@link #getToken(String) }
*
* @param nodeAddr
* @return {@link Token} NMToken required for communicating with node manager
*/
@Public
public static Token getNMToken(String nodeAddr) {
return NM_TOKEN_CACHE.getToken(nodeAddr);
} | 3.68 |
hbase_FirstKeyOnlyFilter_setFoundKV | /**
* Set or clear the indication if the first KV has been found.
* @param value update {@link #foundKV} flag with value.
*/
protected void setFoundKV(boolean value) {
this.foundKV = value;
} | 3.68 |
framework_VContextMenu_ensureHidden | /**
* Hides context menu if it is currently shown by given action owner.
*
* @param actionOwner
*/
public void ensureHidden(ActionOwner actionOwner) {
if (this.actionOwner == actionOwner) {
hide();
}
} | 3.68 |
hadoop_TaskAttemptContextImpl_progress | /**
* Report progress.
*/
@Override
public void progress() {
reporter.progress();
} | 3.68 |
hbase_HFileBlockIndex_blockWritten | /**
* Called after an inline block has been written so that we can add an entry referring to that
* block to the parent-level index.
*/
@Override
public void blockWritten(long offset, int onDiskSize, int uncompressedSize) {
// Add leaf index block size
totalBlockOnDiskSize += onDiskSize;
totalBlockUncompressedSize += uncompressedSize;
if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
if (firstKey == null) {
throw new IllegalStateException(
"Trying to add second-level index " + "entry with offset=" + offset + " and onDiskSize="
+ onDiskSize + "but the first key was not set in writeInlineBlock");
}
if (rootChunk.getNumEntries() == 0) {
// We are writing the first leaf block, so increase index level.
expectNumLevels(1);
numLevels = 2;
}
// Add another entry to the second-level index. Include the number of
// entries in all previous leaf-level chunks for mid-key calculation.
rootChunk.add(firstKey, offset, onDiskSize, totalNumEntries);
firstKey = null;
} | 3.68 |
hadoop_TypedBytesOutput_get | /**
* Get a thread-local typed bytes output for the supplied {@link DataOutput}.
*
* @param out data output object
* @return typed bytes output corresponding to the supplied
* {@link DataOutput}.
*/
public static TypedBytesOutput get(DataOutput out) {
TypedBytesOutput bout = TB_OUT.get();
bout.setDataOutput(out);
return bout;
} | 3.68 |
flink_KeyGroupsStateHandle_getGroupRangeOffsets | /** @return the internal key-group range to offsets metadata */
public KeyGroupRangeOffsets getGroupRangeOffsets() {
return groupRangeOffsets;
} | 3.68 |
incubator-hugegraph-toolchain_FileLineFetcher_readHeader | /**
* Read the first line of the first non-empty file as a header
*/
@Override
public String[] readHeader(List<Readable> readables) {
String[] header = null;
for (Readable readable : readables) {
this.openReader(readable);
assert this.reader != null;
try {
String line = this.reader.readLine();
if (!StringUtils.isEmpty(line)) {
header = this.parser.split(line);
break;
}
} catch (IOException e) {
throw new LoadException("Failed to read header from '%s'",
e, readable);
} finally {
try {
this.closeReader();
} catch (IOException e) {
LOG.warn("Failed to close reader of '{}'", readable);
}
}
}
return header;
} | 3.68 |
flink_FlinkContainers_withTestcontainersSettings | /**
* Allows to optionally provide Testcontainers settings. {@link TestcontainersSettings}
* based on defaults will be used otherwise.
*
* @param testcontainersSettings The Testcontainers settings.
* @return A reference to this Builder.
*/
public Builder withTestcontainersSettings(TestcontainersSettings testcontainersSettings) {
this.testcontainersSettings = testcontainersSettings;
return this;
} | 3.68 |
hbase_ColumnCount_getLength | /** Returns the length */
public int getLength() {
return this.length;
} | 3.68 |
hudi_WriteProfile_smallFilesProfile | /**
* Returns a list of small files in the given partition path from the latest filesystem view.
*/
protected List<SmallFile> smallFilesProfile(String partitionPath) {
// smallFiles only for partitionPath
List<SmallFile> smallFileLocations = new ArrayList<>();
HoodieTimeline commitTimeline = metaClient.getCommitsTimeline().filterCompletedInstants();
if (!commitTimeline.empty()) { // if we have some commits
HoodieInstant latestCommitTime = commitTimeline.lastInstant().get();
List<HoodieBaseFile> allFiles = fsView
.getLatestBaseFilesBeforeOrOn(partitionPath, latestCommitTime.getTimestamp()).collect(Collectors.toList());
for (HoodieBaseFile file : allFiles) {
// filter out the corrupted files.
if (file.getFileSize() < config.getParquetSmallFileLimit() && file.getFileSize() > 0) {
SmallFile sf = new SmallFile();
sf.location = new HoodieRecordLocation(file.getCommitTime(), file.getFileId());
sf.sizeBytes = file.getFileSize();
smallFileLocations.add(sf);
}
}
}
return smallFileLocations;
} | 3.68 |
hbase_MergeTableRegionsProcedure_getMergedRegion | /** Returns The merged region. Maybe be null if called to early or we failed. */
RegionInfo getMergedRegion() {
return this.mergedRegion;
} | 3.68 |
flink_JsonSerdeUtil_hasJsonCreatorAnnotation | /** Return true if the given class's constructors have @JsonCreator annotation, else false. */
public static boolean hasJsonCreatorAnnotation(Class<?> clazz) {
for (Constructor<?> constructor : clazz.getDeclaredConstructors()) {
for (Annotation annotation : constructor.getAnnotations()) {
if (annotation instanceof JsonCreator) {
return true;
}
}
}
return false;
} | 3.68 |
flink_ApplicationDispatcherBootstrap_runApplicationEntryPoint | /**
* Runs the user program entrypoint and completes the given {@code jobIdsFuture} with the {@link
* JobID JobIDs} of the submitted jobs.
*
* <p>This should be executed in a separate thread (or task).
*/
private void runApplicationEntryPoint(
final CompletableFuture<List<JobID>> jobIdsFuture,
final Set<JobID> tolerateMissingResult,
final DispatcherGateway dispatcherGateway,
final ScheduledExecutor scheduledExecutor,
final boolean enforceSingleJobExecution,
final boolean submitFailedJobOnApplicationError) {
if (submitFailedJobOnApplicationError && !enforceSingleJobExecution) {
jobIdsFuture.completeExceptionally(
new ApplicationExecutionException(
String.format(
"Submission of failed job in case of an application error ('%s') is not supported in non-HA setups.",
DeploymentOptions.SUBMIT_FAILED_JOB_ON_APPLICATION_ERROR
.key())));
return;
}
final List<JobID> applicationJobIds = new ArrayList<>(recoveredJobIds);
try {
final PipelineExecutorServiceLoader executorServiceLoader =
new EmbeddedExecutorServiceLoader(
applicationJobIds, dispatcherGateway, scheduledExecutor);
ClientUtils.executeProgram(
executorServiceLoader,
configuration,
application,
enforceSingleJobExecution,
true /* suppress sysout */);
if (applicationJobIds.isEmpty()) {
jobIdsFuture.completeExceptionally(
new ApplicationExecutionException(
"The application contains no execute() calls."));
} else {
jobIdsFuture.complete(applicationJobIds);
}
} catch (Throwable t) {
// If we're running in a single job execution mode, it's safe to consider re-submission
// of an already finished a success.
final Optional<DuplicateJobSubmissionException> maybeDuplicate =
ExceptionUtils.findThrowable(t, DuplicateJobSubmissionException.class);
if (enforceSingleJobExecution
&& maybeDuplicate.isPresent()
&& maybeDuplicate.get().isGloballyTerminated()) {
final JobID jobId = maybeDuplicate.get().getJobID();
tolerateMissingResult.add(jobId);
jobIdsFuture.complete(Collections.singletonList(jobId));
} else if (submitFailedJobOnApplicationError && applicationJobIds.isEmpty()) {
final JobID failedJobId =
JobID.fromHexString(
configuration.get(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID));
dispatcherGateway
.submitFailedJob(failedJobId, FAILED_JOB_NAME, t)
.thenAccept(
ignored ->
jobIdsFuture.complete(
Collections.singletonList(failedJobId)));
} else {
jobIdsFuture.completeExceptionally(
new ApplicationExecutionException("Could not execute application.", t));
}
}
} | 3.68 |
flink_GSCommitRecoverableSerializer_serializeCommitRecoverable | /**
* Writes a commit recoverable to a data output stream.
*
* @param recoverable The commit recoverable
* @param dataOutputStream The data output stream
* @throws IOException On underlyilng failure
*/
static void serializeCommitRecoverable(
GSCommitRecoverable recoverable, DataOutputStream dataOutputStream) throws IOException {
// finalBlobIdentifier
dataOutputStream.writeUTF(recoverable.finalBlobIdentifier.bucketName);
dataOutputStream.writeUTF(recoverable.finalBlobIdentifier.objectName);
// componentObjectIds
dataOutputStream.writeInt(recoverable.componentObjectIds.size());
for (UUID componentObjectId : recoverable.componentObjectIds) {
dataOutputStream.writeLong(componentObjectId.getMostSignificantBits());
dataOutputStream.writeLong(componentObjectId.getLeastSignificantBits());
}
} | 3.68 |
framework_VDebugWindow_writeStoredState | /**
* Writes the persistent state to localStorage.
*/
private void writeStoredState() {
if (isClosed()) {
return;
}
Storage storage = Storage.getLocalStorageIfSupported();
if (storage == null) {
return;
}
writeState(storage, STORAGE_FULL_X, fullX);
writeState(storage, STORAGE_FULL_Y, fullY);
writeState(storage, STORAGE_FULL_W, fullW);
writeState(storage, STORAGE_FULL_H, fullH);
writeState(storage, STORAGE_MIN_X, minX);
writeState(storage, STORAGE_MIN_Y, minY);
writeState(storage, STORAGE_FONT_SIZE, fontSize);
int activeIdx = getActiveSection();
if (activeIdx >= 0) {
writeState(storage, STORAGE_ACTIVE_SECTION, activeIdx);
}
writeState(storage, STORAGE_IS_MINIMIZED, minimized);
} | 3.68 |
flink_SkipListUtils_getKeyMetaLen | /**
* Returns the length of key meta with the given level.
*
* @param level level of the key.
*/
public static int getKeyMetaLen(int level) {
Preconditions.checkArgument(
level >= 0 && level < KEY_META_LEN_BY_LEVEL_ARRAY.length,
"level " + level + " out of range [0, " + KEY_META_LEN_BY_LEVEL_ARRAY.length + ")");
return KEY_META_LEN_BY_LEVEL_ARRAY[level];
} | 3.68 |
hibernate-validator_ValueContext_setTypeParameter | /**
* Sets the container element information.
*
* @param containerClass the class of the container
* @param typeParameterIndex the index of the actual type parameter
*
* @see TypeVariables#getContainerClass(TypeVariable)
* @see TypeVariables#getActualTypeParameter(TypeVariable)
* @see AnnotatedObject
* @see ArrayElement
*/
public final void setTypeParameter(Class<?> containerClass, Integer typeParameterIndex) {
if ( containerClass == null ) {
return;
}
propertyPath.setLeafNodeTypeParameter( containerClass, typeParameterIndex );
} | 3.68 |
flink_AvroParquetRecordFormat_getProducedType | /**
* Gets the type produced by this format. This type will be the type produced by the file source
* as a whole.
*/
@Override
public TypeInformation<E> getProducedType() {
return type;
} | 3.68 |
flink_NFACompiler_createTimesState | /**
* Creates a "complex" state consisting of given number of states with same {@link
* IterativeCondition}.
*
* @param sinkState the state that the created state should point to
* @param proceedState state that the state being converted should proceed to
* @param times number of times the state should be copied
* @return the first state of the "complex" state, next state should point to it
*/
@SuppressWarnings("unchecked")
private State<T> createTimesState(
final State<T> sinkState, final State<T> proceedState, Times times) {
State<T> lastSink = sinkState;
setCurrentGroupPatternFirstOfLoop(false);
final IterativeCondition<T> untilCondition =
(IterativeCondition<T>) currentPattern.getUntilCondition();
final IterativeCondition<T> innerIgnoreCondition =
extendWithUntilCondition(
getInnerIgnoreCondition(currentPattern), untilCondition, false);
final IterativeCondition<T> takeCondition =
extendWithUntilCondition(
getTakeCondition(currentPattern), untilCondition, true);
if (currentPattern.getQuantifier().hasProperty(Quantifier.QuantifierProperty.GREEDY)
&& times.getFrom() != times.getTo()) {
if (untilCondition != null) {
State<T> sinkStateCopy = copy(sinkState);
originalStateMap.put(sinkState.getName(), sinkStateCopy);
}
updateWithGreedyCondition(sinkState, takeCondition);
}
for (int i = times.getFrom(); i < times.getTo(); i++) {
lastSink =
createSingletonState(
lastSink, proceedState, takeCondition, innerIgnoreCondition, true);
addStopStateToLooping(lastSink);
}
for (int i = 0; i < times.getFrom() - 1; i++) {
lastSink =
createSingletonState(
lastSink, null, takeCondition, innerIgnoreCondition, false);
addStopStateToLooping(lastSink);
}
// we created the intermediate states in the loop, now we create the start of the loop.
setCurrentGroupPatternFirstOfLoop(true);
return createSingletonState(
lastSink,
proceedState,
takeCondition,
getIgnoreCondition(currentPattern),
isPatternOptional(currentPattern));
} | 3.68 |
hadoop_IOStatisticsContext_enabled | /**
* Static probe to check if the thread-level IO statistics enabled.
*
* @return if the thread-level IO statistics enabled.
*/
static boolean enabled() {
return IOStatisticsContextIntegration.isIOStatisticsThreadLevelEnabled();
} | 3.68 |
hbase_ProcedureCoordinator_close | /**
* Shutdown the thread pools and release rpc resources
*/
public void close() throws IOException {
// have to use shutdown now to break any latch waiting
pool.shutdownNow();
rpcs.close();
} | 3.68 |
framework_TableQuery_removeRowIdChangeListener | /**
* Removes the given RowIdChangeListener from this query.
*/
@Override
public void removeRowIdChangeListener(RowIdChangeListener listener) {
if (rowIdChangeListeners != null) {
rowIdChangeListeners.remove(listener);
}
} | 3.68 |
flink_BinaryHashBucketArea_startLookup | /** Probe start lookup joined build rows. */
void startLookup(int hashCode) {
final int posHashCode = findBucket(hashCode);
// get the bucket for the given hash code
final int bucketArrayPos = posHashCode >> table.bucketsPerSegmentBits;
final int bucketInSegmentOffset =
(posHashCode & table.bucketsPerSegmentMask) << BUCKET_SIZE_BITS;
final MemorySegment bucket = this.buckets[bucketArrayPos];
table.bucketIterator.set(
bucket, overflowSegments, partition, hashCode, bucketInSegmentOffset);
} | 3.68 |
Activiti_DelegateExpressionExecutionListener_getExpressionText | /**
* returns the expression text for this execution listener. Comes in handy if you want to check which listeners you already have.
*/
public String getExpressionText() {
return expression.getExpressionText();
} | 3.68 |
flink_PathPattern_pattern | /** Returns the pattern given at the constructor, without slashes at both ends. */
public String pattern() {
return pattern;
} | 3.68 |
flink_CheckpointStatsTracker_createSnapshot | /**
* Creates a new snapshot of the available stats.
*
* @return The latest statistics snapshot.
*/
public CheckpointStatsSnapshot createSnapshot() {
CheckpointStatsSnapshot snapshot = latestSnapshot;
// Only create a new snapshot if dirty and no update in progress,
// because we don't want to block the coordinator.
if (dirty && statsReadWriteLock.tryLock()) {
try {
// Create a new snapshot
snapshot =
new CheckpointStatsSnapshot(
counts.createSnapshot(),
summary.createSnapshot(),
history.createSnapshot(),
latestRestoredCheckpoint);
latestSnapshot = snapshot;
dirty = false;
} finally {
statsReadWriteLock.unlock();
}
}
return snapshot;
} | 3.68 |
flink_TwoPhaseCommitSinkFunction_finishProcessing | /**
* This method is called at the end of data processing.
*
* <p>The method is expected to flush all remaining buffered data. Exceptions will cause the
* pipeline to be recognized as failed, because the last data items are not processed properly.
* You may use this method to flush remaining buffered elements in the state into the current
* transaction which will be committed in the last checkpoint.
*/
protected void finishProcessing(@Nullable TXN transaction) {}
// ------ entry points for above methods implementing {@CheckPointedFunction} and
// {@CheckpointListener} | 3.68 |
flink_DataStream_windowAll | /**
* Windows this data stream to a {@code AllWindowedStream}, which evaluates windows over a non
* key grouped stream. Elements are put into windows by a {@link
* org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}. The grouping of elements
* is done by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code
* Trigger} that is used if a {@code Trigger} is not specified.
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* @param assigner The {@code WindowAssigner} that assigns elements to windows.
* @return The trigger windows data stream.
*/
@PublicEvolving
public <W extends Window> AllWindowedStream<T, W> windowAll(
WindowAssigner<? super T, W> assigner) {
return new AllWindowedStream<>(this, assigner);
} | 3.68 |
pulsar_SchemaUtils_jsonifyKeyValueSchemaInfo | /**
* Jsonify the key/value schema info.
*
* @param kvSchemaInfo the key/value schema info
* @return the jsonified schema info
*/
public static String jsonifyKeyValueSchemaInfo(KeyValue<SchemaInfo, SchemaInfo> kvSchemaInfo) {
GsonBuilder gsonBuilder = new GsonBuilder()
.registerTypeHierarchyAdapter(SchemaInfo.class, SCHEMAINFO_ADAPTER)
.registerTypeHierarchyAdapter(Map.class, SCHEMA_PROPERTIES_SERIALIZER);
return gsonBuilder.create().toJson(kvSchemaInfo);
} | 3.68 |
flink_ResourceManagerPartitionTrackerImpl_setHostedDataSetsAndCheckCorruption | /**
* Updates the data sets for which the given task executor is hosting partitions and returns
* data sets that were corrupted due to a loss of partitions.
*
* @param taskExecutorId ID of the hosting TaskExecutor
* @param reportEntries IDs of data sets for which partitions are hosted
* @return corrupted data sets
*/
private Set<IntermediateDataSetID> setHostedDataSetsAndCheckCorruption(
ResourceID taskExecutorId,
Collection<ClusterPartitionReport.ClusterPartitionReportEntry> reportEntries) {
final Set<IntermediateDataSetID> currentlyHostedDatasets =
reportEntries.stream()
.map(ClusterPartitionReport.ClusterPartitionReportEntry::getDataSetId)
.collect(Collectors.toSet());
final Set<IntermediateDataSetID> previouslyHostedDataSets =
taskExecutorToDataSets.put(taskExecutorId, currentlyHostedDatasets);
// previously tracked data sets may be corrupted since we may be tracking less partitions
// than before
final Set<IntermediateDataSetID> potentiallyCorruptedDataSets =
Optional.ofNullable(previouslyHostedDataSets).orElse(new HashSet<>(0));
// update data set -> task executor mapping and find datasets for which lost a partition
reportEntries.forEach(
hostedPartition -> {
final Map<ResourceID, Set<ResultPartitionID>> taskExecutorHosts =
dataSetToTaskExecutors.computeIfAbsent(
hostedPartition.getDataSetId(), ignored -> new HashMap<>());
final Set<ResultPartitionID> previouslyHostedPartitions =
taskExecutorHosts.put(
taskExecutorId, hostedPartition.getHostedPartitions());
final boolean noPartitionLost =
previouslyHostedPartitions == null
|| hostedPartition
.getHostedPartitions()
.containsAll(previouslyHostedPartitions);
if (noPartitionLost) {
potentiallyCorruptedDataSets.remove(hostedPartition.getDataSetId());
}
});
// now only contains data sets for which a partition is no longer tracked
return potentiallyCorruptedDataSets;
} | 3.68 |
framework_BeanItemContainer_addItem | /**
* Adds the bean to the Container.
*
* The bean is used both as the item contents and as the item identifier.
*
* @see com.vaadin.v7.data.Container#addItem(Object) Container#addItem(Object)
*/
@Override
@SuppressWarnings("unchecked")
public BeanItem<BEANTYPE> addItem(Object itemId) {
return super.addBean((BEANTYPE) itemId);
} | 3.68 |
hbase_ReplicationPeerConfigBuilder_putAllPeerData | /**
* Sets all of the provided serialized peer configuration data.
* @return {@code this}
*/
@InterfaceAudience.Private
default ReplicationPeerConfigBuilder putAllPeerData(Map<byte[], byte[]> peerData) {
peerData.forEach(this::putPeerData);
return this;
} | 3.68 |
framework_VaadinService_isSessionActive | /**
* Returns whether the given session is active or whether it can be closed.
* <p>
* A session is active if and only if its {@link VaadinSession#getState()}
* returns {@link State#OPEN} and
* {@link #getUidlRequestTimeout(VaadinSession) getUidlRequestTimeout} is
* negative or has not yet expired.
*
* @param session
* The session whose status to check
*
* @return true if the session is active, false if it could be closed.
*/
private boolean isSessionActive(VaadinSession session) {
if (session.getState() != State.OPEN || session.getSession() == null) {
return false;
} else {
long now = System.currentTimeMillis();
int timeout = 1000 * getUidlRequestTimeout(session);
return timeout < 0
|| now - session.getLastRequestTimestamp() < timeout;
}
} | 3.68 |
hadoop_RollingFileSystemSink_findCurrentDirectory | /**
* Use the given time to determine the current directory. The current
* directory will be based on the {@link #rollIntervalMinutes}.
*
* @param now the current time
* @return the current directory
*/
private Path findCurrentDirectory(Date now) {
long offset = ((now.getTime() - nextFlush.getTimeInMillis())
/ rollIntervalMillis) * rollIntervalMillis;
String currentDir =
DATE_FORMAT.format(new Date(nextFlush.getTimeInMillis() + offset));
return new Path(basePath, currentDir);
} | 3.68 |
hudi_HoodieTable_finalizeWrite | /**
* Finalize the written data onto storage. Perform any final cleanups.
*
* @param context HoodieEngineContext
* @param stats List of HoodieWriteStats
* @throws HoodieIOException if some paths can't be finalized on storage
*/
public void finalizeWrite(HoodieEngineContext context, String instantTs, List<HoodieWriteStat> stats) throws HoodieIOException {
reconcileAgainstMarkers(context, instantTs, stats, config.getConsistencyGuardConfig().isConsistencyCheckEnabled());
} | 3.68 |
hudi_HoodieMergedLogRecordReader_newBuilder | /**
* Returns the builder for {@code HoodieMergedLogRecordReader}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
dubbo_GenericBeanPostProcessorAdapter_doPostProcessAfterInitialization | /**
* Adapter BeanPostProcessor#postProcessAfterInitialization(Object, String) method , sub-type
* could override this method.
*
* @param bean Bean Object
* @param beanName Bean Name
* @return Bean Object
* @see BeanPostProcessor#postProcessAfterInitialization(Object, String)
*/
protected T doPostProcessAfterInitialization(T bean, String beanName) throws BeansException {
processAfterInitialization(bean, beanName);
return bean;
} | 3.68 |
hbase_HFileBlockIndex_getTotalUncompressedSize | /**
* The total uncompressed size of the root index block, intermediate-level index blocks, and
* leaf-level index blocks.
* @return the total uncompressed size of all index blocks
*/
public long getTotalUncompressedSize() {
return totalBlockUncompressedSize;
} | 3.68 |
flink_Pattern_timesOrMore | /**
* Specifies that this pattern can occur the specified times at least with interval corresponds
* to the maximum time gap between previous and current event for each times. This means at
* least the specified times and at most infinite number of events can be matched to this
* pattern.
*
* @param times number of times at least matching event must appear
* @param windowTime time of the matching window between times
* @return The same pattern with a {@link Quantifier#looping(ConsumingStrategy)} quantifier
* applied.
* @throws MalformedPatternException if the quantifier is not applicable to this pattern.
*/
public Pattern<T, F> timesOrMore(int times, @Nullable Time windowTime) {
checkIfNoNotPattern();
checkIfQuantifierApplied();
this.quantifier = Quantifier.looping(quantifier.getConsumingStrategy());
this.times = Times.of(times, windowTime);
return this;
} | 3.68 |
framework_TwinColSelectElement_deselectByText | /**
* Deselects the option with the given option text, i.e. removes it from the
* right side column.
*
* @param text
* the text of the option to deselect
*/
public void deselectByText(String text) {
if (isReadOnly()) {
throw new ReadOnlyException();
}
selectedOptions.deselectAll();
selectedOptions.selectByVisibleText(text);
deselButton.click();
} | 3.68 |
flink_AbstractFileStateBackend_getCheckpointPath | /**
* Gets the checkpoint base directory. Jobs will create job-specific subdirectories for
* checkpoints within this directory. May be null, if not configured.
*
* @return The checkpoint base directory
*/
@Nullable
public Path getCheckpointPath() {
return baseCheckpointPath;
} | 3.68 |
morf_SqlUtils_asDecimal | /**
* Returns a SQL DSL expression to return the field CASTed to
* a decimal of the specified length
*
* @param length length of the decimal cast
* @param scale scale of the decimal cast
* @return {@link Cast} as decimal of given length
*/
public Cast asDecimal(int length, int scale) {
return asType(DataType.DECIMAL, length, scale);
} | 3.68 |
querydsl_MetaDataExporter_setBeanSerializerClass | /**
* Set the Bean serializer class to create bean types as well
*
* @param beanSerializerClass serializer for JavaBeans (default: null)
*/
public void setBeanSerializerClass(Class<? extends Serializer> beanSerializerClass) {
module.bind(SQLCodegenModule.BEAN_SERIALIZER, beanSerializerClass);
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_characters | // @Override
public void characters(char[] ch, int start, int length) throws SAXException {
textElementIdx++;
if (flush) {
flushBlock();
flush = false;
}
if (inIgnorableElement != 0) {
return;
}
char c;
boolean startWhitespace = false;
boolean endWhitespace = false;
if (length == 0) {
return;
}
final int end = start + length;
for (int i = start; i < end; i++) {
if (Character.isWhitespace(ch[i])) {
ch[i] = ' ';
}
}
while (start < end) {
c = ch[start];
if (c == ' ') {
startWhitespace = true;
start++;
length--;
} else {
break;
}
}
while (length > 0) {
c = ch[start + length - 1];
if (c == ' ') {
endWhitespace = true;
length--;
} else {
break;
}
}
if (length == 0) {
if (startWhitespace || endWhitespace) {
if (!sbLastWasWhitespace) {
textBuffer.append(' ');
tokenBuffer.append(' ');
}
sbLastWasWhitespace = true;
} else {
sbLastWasWhitespace = false;
}
lastEvent = Event.WHITESPACE;
return;
}
if (startWhitespace) {
if (!sbLastWasWhitespace) {
textBuffer.append(' ');
tokenBuffer.append(' ');
}
}
if (blockTagLevel == -1) {
blockTagLevel = tagLevel;
}
textBuffer.append(ch, start, length);
tokenBuffer.append(ch, start, length);
if (endWhitespace) {
textBuffer.append(' ');
tokenBuffer.append(' ');
}
sbLastWasWhitespace = endWhitespace;
lastEvent = Event.CHARACTERS;
currentContainedTextElements.set(textElementIdx);
} | 3.68 |
hbase_WALEntryBatch_getLastWalPosition | /** Returns the position in the last WAL that was read. */
public long getLastWalPosition() {
return lastWalPosition;
} | 3.68 |
framework_SimpleDayCell_setHeightPX | /*
* Events and whole cell content are drawn by this method. By the
* clear-argument, you can choose to clear all old content. Notice that
* clearing will also remove all element's event handlers.
*/
public void setHeightPX(int px, boolean clear) {
// measure from DOM if needed
if (px < 0) {
intHeight = getOffsetHeight() - BORDERPADDINGSIZE;
} else {
intHeight = px - BORDERPADDINGSIZE;
}
// Couldn't measure height or it ended up negative. Don't bother
// continuing
if (intHeight == -1) {
return;
}
if (clear) {
while (getWidgetCount() > 1) {
remove(1);
}
}
// How many events can be shown in UI
int slots = 0;
if (scrollable) {
for (int i = 0; i < events.length; i++) {
if (events[i] != null) {
slots = i + 1;
}
}
setHeight(intHeight + "px"); // Fixed height
} else {
// Dynamic height by the content
DOM.removeElementAttribute(getElement(), "height");
slots = (intHeight - caption.getOffsetHeight() - bottomSpacerHeight)
/ eventHeight;
if (slots > 10) {
slots = 10;
}
}
updateEvents(slots, clear);
} | 3.68 |
pulsar_LedgerOffloader_scanLedgers | /**
* Scans all the ManagedLedgers stored on this Offloader (usually a Bucket).
* The callback should not modify/delete the ledgers.
* @param consumer receives the
* @param offloadDriverMetadata additional metadata
* @throws ManagedLedgerException
*/
default void scanLedgers(OffloadedLedgerMetadataConsumer consumer,
Map<String, String> offloadDriverMetadata) throws ManagedLedgerException {
throw ManagedLedgerException.getManagedLedgerException(new UnsupportedOperationException());
} | 3.68 |
framework_LayoutManager_registerDependency | /**
* Registers that a ManagedLayout is depending on the size of an Element.
* This causes this layout manager to measure the element in the beginning
* of every layout phase and call the appropriate layout method of the
* managed layout if the size of the element has changed.
*
* @param owner
* the ManagedLayout that depends on an element
* @param element
* the Element that should be measured
*/
public void registerDependency(ManagedLayout owner, Element element) {
MeasuredSize measuredSize = ensureMeasured(element);
setNeedsLayout(owner);
measuredSize.addDependent(owner.getConnectorId());
} | 3.68 |
zxing_PDF417ResultMetadata_getChecksum | /**
* 16-bit CRC checksum using CCITT-16
*
* @return crc checksum, -1 if not set
*/
public int getChecksum() {
return checksum;
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_incrementStreamWindow | /**
* Increments the flow control window for this stream by the given delta and returns the new value.
*/
int incrementStreamWindow(int delta) throws Http2Exception {
if (delta > 0 && Integer.MAX_VALUE - delta < window) {
throw streamError(stream.id(), FLOW_CONTROL_ERROR,
"Window size overflow for stream: %d", stream.id());
}
window += delta;
streamByteDistributor.updateStreamableBytes(this);
return window;
} | 3.68 |
morf_H2Dialect_connectionTestStatement | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#connectionTestStatement()
*/
@Override
public String connectionTestStatement() {
return "select 1";
} | 3.68 |
flink_ExecutionConfig_setAutoWatermarkInterval | /**
* Sets the interval of the automatic watermark emission. Watermarks are used throughout the
* streaming system to keep track of the progress of time. They are used, for example, for time
* based windowing.
*
* <p>Setting an interval of {@code 0} will disable periodic watermark emission.
*
* @param interval The interval between watermarks in milliseconds.
*/
@PublicEvolving
public ExecutionConfig setAutoWatermarkInterval(long interval) {
Preconditions.checkArgument(interval >= 0, "Auto watermark interval must not be negative.");
return setAutoWatermarkInterval(Duration.ofMillis(interval));
} | 3.68 |
flink_AbstractPagedOutputView_seekOutput | /**
* Sets the internal state to the given memory segment and the given position within the
* segment.
*
* @param seg The memory segment to write the next bytes to.
* @param position The position to start writing the next bytes to.
*/
protected void seekOutput(MemorySegment seg, int position) {
this.currentSegment = seg;
this.positionInSegment = position;
} | 3.68 |
hadoop_VersionInfoMojo_determineSCM | /**
* Determines which SCM is in use (git or none) and captures
* output of the SCM command for later parsing.
*
* @return SCM in use for this build
* @throws Exception if any error occurs attempting to determine SCM
*/
private SCM determineSCM() throws Exception {
Exec exec = new Exec(this);
SCM scm = SCM.NONE;
scmOut = new ArrayList<String>();
int ret;
ret = exec.run(Arrays.asList(gitCommand, "branch"), scmOut);
if (ret == 0) {
ret = exec.run(Arrays.asList(gitCommand, "remote", "-v"), scmOut);
if (ret != 0) {
scm = SCM.NONE;
scmOut = null;
} else {
ret = exec.run(Arrays.asList(gitCommand, "log", "-n", "1"), scmOut);
if (ret != 0) {
scm = SCM.NONE;
scmOut = null;
} else {
scm = SCM.GIT;
}
}
}
if (scmOut != null) {
getLog().debug(scmOut.toString());
}
getLog().info("SCM: " + scm);
return scm;
} | 3.68 |
flink_CreditBasedPartitionRequestClientHandler_writeAndFlushNextMessageIfPossible | /**
* Tries to write&flush unannounced credits for the next input channel in queue.
*
* <p>This method may be called by the first input channel enqueuing, or the complete future's
* callback in previous input channel, or the channel writability changed event.
*/
private void writeAndFlushNextMessageIfPossible(Channel channel) {
if (channelError.get() != null || !channel.isWritable()) {
return;
}
while (true) {
ClientOutboundMessage outboundMessage = clientOutboundMessages.poll();
// The input channel may be null because of the write callbacks
// that are executed after each write.
if (outboundMessage == null) {
return;
}
// It is no need to notify credit or resume data consumption for the released channel.
if (!outboundMessage.inputChannel.isReleased()) {
Object msg = outboundMessage.buildMessage();
if (msg == null) {
continue;
}
// Write and flush and wait until this is done before
// trying to continue with the next input channel.
channel.writeAndFlush(msg).addListener(writeListener);
return;
}
}
} | 3.68 |
hadoop_SinglePendingCommit_getFilename | /**
* This is the filename of the pending file itself.
* Used during processing; it's persistent value, if any, is ignored.
* @return filename
*/
public String getFilename() {
return filename;
} | 3.68 |
hadoop_MetricsCache_getTag | /**
* Lookup a tag value
* @param key name of the tag
* @return the tag value
*/
public String getTag(String key) {
return tags.get(key);
} | 3.68 |
framework_Calendar_setEventCaptionAsHtml | /**
* Sets whether the event captions are rendered as HTML.
* <p>
* If set to true, the captions are rendered in the browser as HTML and the
* developer is responsible for ensuring no harmful HTML is used. If set to
* false, the caption is rendered in the browser as plain text.
* <p>
* The default is false, i.e. to render that caption as plain text.
*
* @param eventCaptionAsHtml
* {@code true} if the captions are rendered as HTML,
* {@code false} if rendered as plain text
*/
public void setEventCaptionAsHtml(boolean eventCaptionAsHtml) {
getState().eventCaptionAsHtml = eventCaptionAsHtml;
} | 3.68 |
framework_VAbstractOrderedLayout_updateExpandedSizes | /**
* Assigns relative sizes to the children that should expand based on their
* expand ratios.
*/
@SuppressWarnings("deprecation")
public void updateExpandedSizes() {
// Ensure the expand wrapper is in place
if (expandWrapper == null) {
expandWrapper = DOM.createDiv();
expandWrapper.setClassName("v-expand");
// Detach all widgets before modifying DOM
for (Widget widget : getChildren()) {
orphan(widget);
}
while (getElement().getChildCount() > 0) {
Node el = getElement().getChild(0);
expandWrapper.appendChild(el);
}
getElement().appendChild(expandWrapper);
// Attach all widgets again
for (Widget widget : getChildren()) {
adopt(widget);
}
}
// Sum up expand ratios to get the denominator
double total = 0;
for (Slot slot : widgetToSlot.values()) {
// FIXME expandRatio might be <0
total += slot.getExpandRatio();
}
// Give each expanded child its own share
for (Slot slot : widgetToSlot.values()) {
Element slotElement = slot.getElement();
slotElement.removeAttribute("aria-hidden");
Style slotStyle = slotElement.getStyle();
slotStyle.clearVisibility();
slotStyle.clearMarginLeft();
slotStyle.clearMarginTop();
if (slot.getExpandRatio() != 0) {
// FIXME expandRatio might be <0
double size = 100 * (slot.getExpandRatio() / total);
if (vertical) {
slot.setHeight(size + "%");
if (slot.hasRelativeHeight()) {
Util.notifyParentOfSizeChange(this, true);
}
} else {
slot.setWidth(size + "%");
if (slot.hasRelativeWidth()) {
Util.notifyParentOfSizeChange(this, true);
}
}
} else if (slot.isRelativeInDirection(vertical)) {
// Relative child without expansion gets no space at all
if (vertical) {
slot.setHeight("0");
} else {
slot.setWidth("0");
}
slotStyle.setVisibility(Visibility.HIDDEN);
slotElement.setAttribute("aria-hidden", "true");
} else {
// Non-relative child without expansion should be unconstrained
if (vertical) {
slotStyle.clearHeight();
} else {
slotStyle.clearWidth();
}
}
}
} | 3.68 |
hadoop_OBSCommonUtils_innerListStatus | /**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param owner the owner OBSFileSystem instance
* @param f given path
* @param recursive flag indicating if list is recursive
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
* @throws IOException due to an IO problem.
* @throws ObsException on failures inside the OBS SDK
*/
static FileStatus[] innerListStatus(final OBSFileSystem owner, final Path f,
final boolean recursive)
throws FileNotFoundException, IOException, ObsException {
Path path = qualify(owner, f);
String key = pathToKey(owner, path);
List<FileStatus> result;
final FileStatus fileStatus = owner.getFileStatus(path);
if (fileStatus.isDirectory()) {
key = maybeAddTrailingSlash(key);
String delimiter = recursive ? null : "/";
ListObjectsRequest request = createListObjectsRequest(owner, key,
delimiter);
LOG.debug(
"listStatus: doing listObjects for directory {} - recursive {}",
f, recursive);
OBSListing.FileStatusListingIterator files = owner.getObsListing()
.createFileStatusListingIterator(
path, request, OBSListing.ACCEPT_ALL,
new OBSListing.AcceptAllButSelfAndS3nDirs(path));
result = new ArrayList<>(files.getBatchSize());
while (files.hasNext()) {
result.add(files.next());
}
return result.toArray(new FileStatus[0]);
} else {
LOG.debug("Adding: rd (not a dir): {}", path);
FileStatus[] stats = new FileStatus[1];
stats[0] = fileStatus;
return stats;
}
} | 3.68 |
flink_UpsertKeyUtil_getSmallestKey | /**
* Returns the smallest key of given upsert keys. The rule of 'small' is an upsert key
* represented by {@link ImmutableBitSet} has smaller cardinality or has a smaller leading
* element when the same cardinality. E.g., '{0,1}' is smaller than '{0,1,2}' and '{0,1}' is
* smaller than '{0,2}'.
*
* @param upsertKeys input upsert keys
* @return the smallest key
*/
@Nonnull
public static int[] getSmallestKey(@Nullable Set<ImmutableBitSet> upsertKeys) {
if (null == upsertKeys || upsertKeys.isEmpty()) {
return new int[0];
}
return upsertKeys.stream()
.map(ImmutableBitSet::toArray)
.reduce(
(k1, k2) -> {
if (k1.length < k2.length) {
return k1;
}
if (k1.length == k2.length) {
for (int index = 0; index < k1.length; index++) {
if (k1[index] < k2[index]) {
return k1;
}
}
}
return k2;
})
.get();
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_updateBytesReadInLastSecond | /**
* Sets the current gauge value for how many bytes were read in the last
* second.
* @param currentBytesRead The number of bytes.
*/
public void updateBytesReadInLastSecond(long currentBytesRead) {
bytesReadInLastSecond.set(currentBytesRead);
} | 3.68 |
hudi_RepairUtils_getBaseAndLogFilePathsFromTimeline | /**
* Gets the base and log file paths written for a given instant from the timeline.
* This reads the details of the instant metadata.
*
* @param timeline {@link HoodieTimeline} instance, can be active or archived timeline.
* @param instant Instant for lookup.
* @return A {@link Option} of {@link Set} of relative file paths to base path
* if the instant action is supported; empty {@link Option} otherwise.
* @throws IOException if reading instant details fail.
*/
public static Option<Set<String>> getBaseAndLogFilePathsFromTimeline(
HoodieTimeline timeline, HoodieInstant instant) throws IOException {
if (!instant.isCompleted()) {
throw new HoodieException("Cannot get base and log file paths from "
+ "instant not completed: " + instant.getTimestamp());
}
switch (instant.getAction()) {
case COMMIT_ACTION:
case DELTA_COMMIT_ACTION:
final HoodieCommitMetadata commitMetadata =
HoodieCommitMetadata.fromBytes(
timeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
return Option.of(commitMetadata.getPartitionToWriteStats().values().stream().flatMap(List::stream)
.map(HoodieWriteStat::getPath).collect(Collectors.toSet()));
case REPLACE_COMMIT_ACTION:
final HoodieReplaceCommitMetadata replaceCommitMetadata =
HoodieReplaceCommitMetadata.fromBytes(
timeline.getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class);
return Option.of(replaceCommitMetadata.getPartitionToWriteStats().values().stream().flatMap(List::stream)
.map(HoodieWriteStat::getPath).collect(Collectors.toSet()));
default:
return Option.empty();
}
} | 3.68 |
cron-utils_CronParserField_parse | /**
* Parses a String cron expression.
*
* @param expression - cron expression
* @return parse result as CronFieldParseResult instance - never null. May throw a RuntimeException if cron expression is bad.
*/
public CronField parse(final String expression) {
String newExpression = expression;
if (getField().equals(CronFieldName.DAY_OF_WEEK) && newExpression.endsWith("L")) {
Integer value = constraints.getStringMappingValue(newExpression.substring(0, newExpression.length() - 1));
if (value != null) {
newExpression = value + "L";
}
}
return new CronField(field, parser.parse(newExpression), constraints);
} | 3.68 |
hbase_ProcedureStoreTracker_isDeleted | /**
* If {@link #partial} is false, returns state from the bitmap. If no state is found for
* {@code procId}, returns YES. If partial is true, tracker doesn't have complete view of system
* state, so it returns MAYBE if there is no update for the procedure or if it doesn't have a
* state in bitmap. Otherwise, returns state from the bitmap.
*/
public DeleteState isDeleted(long procId) {
Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
if (entry != null && entry.getValue().contains(procId)) {
BitSetNode node = entry.getValue();
DeleteState state = node.isDeleted(procId);
return partial && !node.isModified(procId) ? DeleteState.MAYBE : state;
}
return partial ? DeleteState.MAYBE : DeleteState.YES;
} | 3.68 |
hadoop_ExecutingStoreOperation_executeOnlyOnce | /**
* Check that the operation has not been invoked twice.
* This is an atomic check.
* After the check: activates the span.
* @throws IllegalStateException on a second invocation.
*/
protected void executeOnlyOnce() {
Preconditions.checkState(
!executed.getAndSet(true),
"Operation attempted twice");
activateAuditSpan();
} | 3.68 |
flink_LogicalTypeChecks_hasNested | /** Checks whether a (possibly nested) logical type fulfills the given predicate. */
public static boolean hasNested(LogicalType logicalType, Predicate<LogicalType> predicate) {
final NestedTypeSearcher typeSearcher = new NestedTypeSearcher(predicate);
return logicalType.accept(typeSearcher).isPresent();
} | 3.68 |
dubbo_RegistryBuilder_parameter | /**
* @param name the parameter name
* @param value the parameter value
* @return {@link RegistryBuilder}
* @since 2.7.8
*/
public RegistryBuilder parameter(String name, String value) {
return appendParameter(name, value);
} | 3.68 |
hbase_VisibilityClient_addLabel | /**
* Utility method for adding label to the system.
*/
public static VisibilityLabelsResponse addLabel(Connection connection, final String label)
throws Throwable {
return addLabels(connection, new String[] { label });
} | 3.68 |
shardingsphere-elasticjob_TriggerNode_getLocalTriggerPath | /**
* Get local trigger path.
*
* @return local trigger path
*/
public String getLocalTriggerPath() {
return getTriggerPath(JobRegistry.getInstance().getJobInstance(jobName).getJobInstanceId());
} | 3.68 |
framework_StateChangeEvent_isInitialStateChange | /**
* Checks if the state change event is the first one for the given
* connector.
*
* @since 7.1
* @return true if this is the first state change event for the connector,
* false otherwise
*/
public boolean isInitialStateChange() {
return initialStateChange;
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_updateToken | /**
* Updates the TokenInformation of an existing TokenIdentifier in
* the SQL database.
* @param ident Existing TokenIdentifier in the SQL database.
* @param tokenInfo Updated DelegationTokenInformation associated with the TokenIdentifier.
*/
@Override
protected void updateToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (DataOutputStream dos = new DataOutputStream(bos)) {
tokenInfo.write(dos);
// Update token in SQL database
updateToken(ident.getSequenceNumber(), ident.getBytes(), bos.toByteArray());
// Update token in local cache
super.updateToken(ident, tokenInfo);
}
} catch (SQLException e) {
throw new IOException("Failed to update token in SQL secret manager", e);
}
} | 3.68 |
hudi_BoundedInMemoryQueue_insertRecord | /**
* Inserts record into queue after applying transformation.
*
* @param t Item to be queued
*/
@Override
public void insertRecord(I t) throws Exception {
// If already closed, throw exception
if (isWriteDone.get()) {
throw new IllegalStateException("Queue closed for enqueueing new entries");
}
// We need to stop queueing if queue-reader has failed and exited.
throwExceptionIfFailed();
rateLimiter.acquire();
// We are retrieving insert value in the record queueing thread to offload computation
// around schema validation
// and record creation to it.
final O payload = transformFunction.apply(t);
adjustBufferSizeIfNeeded(payload);
queue.put(Option.of(payload));
} | 3.68 |
morf_OracleDialect_getSqlForLastDayOfMonth | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForLastDayOfMonth
*/
@Override
protected String getSqlForLastDayOfMonth(AliasedField date) {
return "LAST_DAY(" + getSqlFrom(date) + ")";
} | 3.68 |
morf_ExistingViewStateLoader_isEmpty | /**
* @return true if there are no views to deploy or drop.
*/
public boolean isEmpty() {
return viewsToDrop.isEmpty() && viewsToDeploy.isEmpty();
} | 3.68 |
hadoop_CachedDNSToSwitchMapping_isSingleSwitch | /**
* Delegate the switch topology query to the raw mapping, via
* {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
* @return true iff the raw mapper is considered single-switch.
*/
@Override
public boolean isSingleSwitch() {
return isMappingSingleSwitch(rawMapping);
} | 3.68 |
pulsar_MetadataStore_sync | /**
* Ensure that the next value read from the local client will be up-to-date with the latest version of the value
* as it can be seen by all the other clients.
* @param path
* @return a handle to the operation
*/
default CompletableFuture<Void> sync(String path) {
return CompletableFuture.completedFuture(null);
} | 3.68 |
pulsar_BrokerService_getTopicPolicies | /**
* Get {@link TopicPolicies} for the parameterized topic.
* @param topicName
* @return TopicPolicies, if they exist. Otherwise, the value will not be present.
*/
public Optional<TopicPolicies> getTopicPolicies(TopicName topicName) {
if (!pulsar().getConfig().isTopicLevelPoliciesEnabled()) {
return Optional.empty();
}
return Optional.ofNullable(pulsar.getTopicPoliciesService()
.getTopicPoliciesIfExists(topicName));
} | 3.68 |
hudi_HoodieRepairTool_restoreFiles | /**
* Restores dangling files from backup path to table base path.
*
* @param relativeFilePaths A {@link List} of relative file paths for restoring.
* @return {@code true} if all successful; {@code false} otherwise.
*/
boolean restoreFiles(List<String> relativeFilePaths) {
return copyFiles(context, relativeFilePaths, cfg.backupPath, cfg.basePath);
} | 3.68 |
hudi_LSMTimelineWriter_newFileName | /**
* Returns a new file name.
*/
private static String newFileName(String minInstant, String maxInstant, int layer) {
return String.format("%s_%s_%d%s", minInstant, maxInstant, layer, HoodieFileFormat.PARQUET.getFileExtension());
} | 3.68 |
hbase_HFileBlock_readBlockData | /**
* Reads a version 2 block (version 1 blocks not supported and not expected). Tries to do as
* little memory allocation as possible, using the provided on-disk size.
* @param offset the offset in the stream to read at
* @param onDiskSizeWithHeaderL the on-disk size of the block, including the header, or -1 if
* unknown; i.e. when iterating over blocks reading in the file
* metadata info.
* @param pread whether to use a positional read
* @param updateMetrics whether to update the metrics
* @param intoHeap allocate ByteBuff of block from heap or off-heap.
* @see FSReader#readBlockData(long, long, boolean, boolean, boolean) for more details about the
* useHeap.
*/
@Override
public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean pread,
boolean updateMetrics, boolean intoHeap) throws IOException {
// Get a copy of the current state of whether to validate
// hbase checksums or not for this read call. This is not
// thread-safe but the one constraint is that if we decide
// to skip hbase checksum verification then we are
// guaranteed to use hdfs checksum verification.
boolean doVerificationThruHBaseChecksum = streamWrapper.shouldUseHBaseChecksum();
FSDataInputStream is = streamWrapper.getStream(doVerificationThruHBaseChecksum);
final Context context = Context.current().with(CONTEXT_KEY,
new HFileContextAttributesBuilderConsumer(fileContext)
.setSkipChecksum(doVerificationThruHBaseChecksum)
.setReadType(pread ? ReadType.POSITIONAL_READ : ReadType.SEEK_PLUS_READ));
try (Scope ignored = context.makeCurrent()) {
HFileBlock blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread,
doVerificationThruHBaseChecksum, updateMetrics, intoHeap);
if (blk == null) {
HFile.LOG.warn("HBase checksum verification failed for file {} at offset {} filesize {}."
+ " Retrying read with HDFS checksums turned on...", pathName, offset, fileSize);
if (!doVerificationThruHBaseChecksum) {
String msg = "HBase checksum verification failed for file " + pathName + " at offset "
+ offset + " filesize " + fileSize + " but this cannot happen because doVerify is "
+ doVerificationThruHBaseChecksum;
HFile.LOG.warn(msg);
throw new IOException(msg); // cannot happen case here
}
HFile.CHECKSUM_FAILURES.increment(); // update metrics
// If we have a checksum failure, we fall back into a mode where
// the next few reads use HDFS level checksums. We aim to make the
// next CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD reads avoid
// hbase checksum verification, but since this value is set without
// holding any locks, it can so happen that we might actually do
// a few more than precisely this number.
is = this.streamWrapper.fallbackToFsChecksum(CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD);
doVerificationThruHBaseChecksum = false;
blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread,
doVerificationThruHBaseChecksum, updateMetrics, intoHeap);
if (blk != null) {
HFile.LOG.warn(
"HDFS checksum verification succeeded for file {} at offset {} filesize" + " {}",
pathName, offset, fileSize);
}
}
if (blk == null && !doVerificationThruHBaseChecksum) {
String msg =
"readBlockData failed, possibly due to " + "checksum verification failed for file "
+ pathName + " at offset " + offset + " filesize " + fileSize;
HFile.LOG.warn(msg);
throw new IOException(msg);
}
// If there is a checksum mismatch earlier, then retry with
// HBase checksums switched off and use HDFS checksum verification.
// This triggers HDFS to detect and fix corrupt replicas. The
// next checksumOffCount read requests will use HDFS checksums.
// The decrementing of this.checksumOffCount is not thread-safe,
// but it is harmless because eventually checksumOffCount will be
// a negative number.
streamWrapper.checksumOk();
return blk;
}
} | 3.68 |
hadoop_DynamicIOStatistics_addMeanStatisticFunction | /**
* add a mapping of a key to a meanStatistic function.
* @param key the key
* @param eval the evaluator
*/
void addMeanStatisticFunction(String key,
Function<String, MeanStatistic> eval) {
meanStatistics.addFunction(key, eval);
} | 3.68 |
hudi_HoodieFlinkClusteringJob_start | /**
* Main method to start clustering service.
*/
public void start(boolean serviceMode) throws Exception {
if (serviceMode) {
clusteringScheduleService.start(null);
try {
clusteringScheduleService.waitForShutdown();
} catch (Exception e) {
throw new HoodieException(e.getMessage(), e);
} finally {
LOG.info("Shut down hoodie flink clustering");
}
} else {
LOG.info("Hoodie Flink Clustering running only single round");
try {
clusteringScheduleService.cluster();
} catch (ApplicationExecutionException aee) {
if (aee.getMessage().contains(NO_EXECUTE_KEYWORD)) {
LOG.info("Clustering is not performed");
} else {
LOG.error("Got error trying to perform clustering. Shutting down", aee);
throw aee;
}
} catch (Exception e) {
LOG.error("Got error running delta sync once. Shutting down", e);
throw e;
} finally {
LOG.info("Shut down hoodie flink clustering");
}
}
} | 3.68 |
framework_AbstractLayout_writeMargin | /**
* Writes margin attributes from a MarginInfo object to a design. This
* helper method should be called from the
* {@link #readDesign(Element, DesignContext) writeDesign} method of layouts
* that implement {@link MarginHandler}.
*
*
* @since 7.5
*
* @param design
* the design to write to
* @param margin
* the margin state to write
* @param defMargin
* the default margin state to compare against
* @param context
* the DesignContext instance used for parsing the design
*/
protected void writeMargin(Element design, MarginInfo margin,
MarginInfo defMargin, DesignContext context) {
if (defMargin.getBitMask() == margin.getBitMask()) {
// Default, no need to write
} else if (margin.hasNone()) {
// Write "margin='false'"
DesignAttributeHandler.writeAttribute("margin", design.attributes(),
false, true, boolean.class, context);
} else if (margin.hasAll()) {
// Write "margin"
DesignAttributeHandler.writeAttribute("margin", design.attributes(),
true, false, boolean.class, context);
} else {
DesignAttributeHandler.writeAttribute("margin-left",
design.attributes(), margin.hasLeft(), defMargin.hasLeft(),
boolean.class, context);
DesignAttributeHandler.writeAttribute("margin-right",
design.attributes(), margin.hasRight(),
defMargin.hasRight(), boolean.class, context);
DesignAttributeHandler.writeAttribute("margin-top",
design.attributes(), margin.hasTop(), defMargin.hasTop(),
boolean.class, context);
DesignAttributeHandler.writeAttribute("margin-bottom",
design.attributes(), margin.hasBottom(),
defMargin.hasBottom(), boolean.class, context);
}
} | 3.68 |
flink_CountWindow_getId | /** Gets the id (0-based) of the window. */
public long getId() {
return id;
} | 3.68 |
morf_XmlDataSetProducer_records | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#records(java.lang.String)
*/
@Override
public Iterable<Record> records(final String tableName) {
return new Iterable<>() {
@Override
public Iterator<Record> iterator() {
final InputStream inputStream = xmlStreamProvider.openInputStreamForTable(tableName);
XMLStreamReader xmlStreamReader = openPullParser(inputStream);
return new PullProcessorRecordIterator(xmlStreamReader) {
@Override
public boolean hasNext() {
boolean result = super.hasNext();
if (!result) {
try {
inputStream.close();
} catch (IOException e) {
throw new RuntimeException("Error closing input stream", e);
}
}
return result;
}
};
}
};
} | 3.68 |
hudi_MarkerHandler_getAllMarkers | /**
* @param markerDir marker directory path
* @return all marker paths in the marker directory
*/
public Set<String> getAllMarkers(String markerDir) {
MarkerDirState markerDirState = getMarkerDirState(markerDir);
return markerDirState.getAllMarkers();
} | 3.68 |
querydsl_StringExpression_max | /**
* Create a {@code max(this)} expression
*
* <p>Get the maximum value of this expression (aggregation)</p>
*
* @return max(this)
*/
public StringExpression max() {
if (max == null) {
max = Expressions.stringOperation(Ops.AggOps.MAX_AGG, mixin);
}
return max;
} | 3.68 |
AreaShop_RegionSign_getLocation | /**
* Get the location of this sign.
* @return The location of this sign
*/
public Location getLocation() {
return Utils.configToLocation(getRegion().getConfig().getConfigurationSection("general.signs." + key + ".location"));
} | 3.68 |
framework_MenuBar_getLastItem | /**
* Gest the last item from the menu or null if no items.
*
* @since 7.2.6
* @return the last item from the menu or null if no items.
*/
public MenuItem getLastItem() {
return items != null && !items.isEmpty() ? items.get(items.size() - 1)
: null;
} | 3.68 |
hudi_HoodieAppendHandle_isUpdateRecord | /**
* Returns whether the hoodie record is an UPDATE.
*/
protected boolean isUpdateRecord(HoodieRecord<T> hoodieRecord) {
// If currentLocation is present, then this is an update
return hoodieRecord.getCurrentLocation() != null;
} | 3.68 |
morf_AbstractSelectStatement_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder();
if (fields.isEmpty()) {
result.append("*");
} else {
result.append(fields);
}
if (table != null) {
result.append(" FROM [").append(table).append("]");
}
if (!fromSelects.isEmpty()) {
result.append(" FROM ").append(fromSelects);
}
if (!joins.isEmpty()) result.append(" ");
result.append(StringUtils.join(joins, " "));
if (whereCriterion != null) result.append(" WHERE [").append(whereCriterion).append("]");
if (!orderBys.isEmpty()) result.append(" ORDER BY ").append(orderBys);
return result.toString();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.