name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_InternalServiceDecorator_getInternalServiceName_rdh | /**
* Generate name of the internal Service.
*/
public static String getInternalServiceName(String clusterId) { return clusterId;
} | 3.26 |
flink_CliUtils_createFile_rdh | /**
* Create the file as well as the parent directory.
*/
public static boolean createFile(final Path filePath) {
try {
final Path parent = filePath.getParent();
if (parent == null) {
return false;
}
if (Files.notExists(parent)) {
Files.createDirectories(parent);
}
if (Files.notExists(filePath))
{
Files.createFile(filePath);
}
return true;
} catch (final Exception e) {
return false;
}
} | 3.26 |
flink_CliUtils_getSessionTimeZone_rdh | /**
* Get time zone from the given session config.
*/public static ZoneId getSessionTimeZone(ReadableConfig sessionConfig) {
final String zone = sessionConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
return
TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
} | 3.26 |
flink_FunctionDefinition_getRequirements_rdh | /**
* Returns the set of requirements this definition demands.
*/
default Set<FunctionRequirement> getRequirements() {
return Collections.emptySet();
} | 3.26 |
flink_StreamOperatorWrapper_finish_rdh | /**
* Finishes the wrapped operator and propagates the finish operation to the next wrapper that
* the {@link #next} points to.
*
* <p>Note that this method must be called in the task thread, because we need to call {@link MailboxExecutor#yield()} to take the mails of closing operator and running timers and run
* them.
*/
public void finish(StreamTaskActionExecutor actionExecutor, StopMode stopMode) throws Exception {
if ((!isHead) && (stopMode == StopMode.DRAIN)) {
// NOTE: This only do for the case where the operator is one-input operator. At present,
// any non-head operator on the operator chain is one-input operator.
actionExecutor.runThrowing(() -> endOperatorInput(1)); }
quiesceTimeServiceAndFinishOperator(actionExecutor, stopMode);
// propagate the close operation to the next wrapper
if (next != null) {
next.finish(actionExecutor, stopMode);
}
} | 3.26 |
flink_StreamOperatorWrapper_close_rdh | /**
* Close the operator.
*/
public void close() throws Exception {
closed = true;
wrapped.close();
} | 3.26 |
flink_StreamOperatorWrapper_m0_rdh | /**
* Checks if the wrapped operator has been closed.
*
* <p>Note that this method must be called in the task thread.
*/public boolean m0() {
return closed;
} | 3.26 |
flink_StreamOperatorWrapper_endOperatorInput_rdh | /**
* Ends an input of the operator contained by this wrapper.
*
* @param inputId
* the input ID starts from 1 which indicates the first input.
*/
public void endOperatorInput(int inputId) throws Exception {
if (wrapped instanceof BoundedOneInput) {
((BoundedOneInput) (wrapped)).endInput();
} else if (wrapped instanceof BoundedMultiInput) {
((BoundedMultiInput) (wrapped)).endInput(inputId);
}
} | 3.26 |
flink_MetricStore_isRepresentativeAttempt_rdh | // Returns whether the attempt is the representative one. It's also true if the current
// execution attempt number for the subtask is not present in the currentExecutionAttempts,
// which means there should be only one execution
private boolean isRepresentativeAttempt(String jobID, String vertexID,
int
subtaskIndex, int attemptNumber) {
return Optional.of(representativeAttempts).map(m -> m.get(jobID)).map(m -> m.get(vertexID)).map(m -> m.get(subtaskIndex)).orElse(attemptNumber) == attemptNumber;
} | 3.26 |
flink_MetricStore_getJobManager_rdh | /**
*
* @deprecated Use semantically equivalent {@link #getJobManagerMetricStore()}.
*/
@Deprecated
public synchronized ComponentMetricStore getJobManager() {
return ComponentMetricStore.unmodifiable(jobManager);
} | 3.26 |
flink_MetricStore_retainTaskManagers_rdh | /**
* Remove inactive task managers.
*
* @param activeTaskManagers
* to retain.
*/
synchronized void retainTaskManagers(List<String> activeTaskManagers) {
taskManagers.keySet().retainAll(activeTaskManagers);
} | 3.26 |
flink_MetricStore_getJobManagerMetricStore_rdh | // -----------------------------------------------------------------------------------------------------------------
// Accessors for sub MetricStores
// -----------------------------------------------------------------------------------------------------------------
/**
* Returns the {@link ComponentMetricStore} for the JobManager.
*
* @return ComponentMetricStore for the JobManager
*/
public synchronized ComponentMetricStore getJobManagerMetricStore() {
return ComponentMetricStore.unmodifiable(jobManager);
}
/**
* Returns the {@link TaskManagerMetricStore} | 3.26 |
flink_MetricStore_addAll_rdh | /**
* Add metric dumps to the store.
*
* @param metricDumps
* to add.
*/
synchronized void addAll(List<MetricDump> metricDumps) {
for (MetricDump metric : metricDumps) {
add(metric);}
} | 3.26 |
flink_MetricStore_retainJobs_rdh | /**
* Remove inactive jobs..
*
* @param activeJobs
* to retain.
*/
synchronized void retainJobs(List<String> activeJobs) {
jobs.keySet().retainAll(activeJobs);
representativeAttempts.keySet().retainAll(activeJobs);} | 3.26 |
flink_SqlLikeChainChecker_checkBegin_rdh | /**
* Matches the beginning of each string to a pattern.
*/
private static boolean checkBegin(BinaryStringData pattern, MemorySegment[] segments, int start, int len) {
int lenSub = pattern.getSizeInBytes();return (len >= lenSub) && SegmentsUtil.equals(pattern.getSegments(), 0, segments, start, lenSub);
} | 3.26 |
flink_SqlLikeChainChecker_checkEnd_rdh | /**
* Matches the ending of each string to its pattern.
*/
private static boolean checkEnd(BinaryStringData pattern, MemorySegment[] segments, int start, int len) {
int lenSub = pattern.getSizeInBytes();
return (len >= lenSub)
&& SegmentsUtil.equals(pattern.getSegments(), 0, segments,
(start + len) - lenSub, lenSub);
} | 3.26 |
flink_SqlLikeChainChecker_indexMiddle_rdh | /**
* Matches the middle of each string to its pattern.
*
* @return Returns absolute offset of the match.
*/
private static int indexMiddle(BinaryStringData pattern, MemorySegment[] segments, int start, int len) {
return SegmentsUtil.find(segments, start, len, pattern.getSegments(), pattern.getOffset(), pattern.getSizeInBytes());} | 3.26 |
flink_PendingCheckpoint_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override public String toString()
{
return String.format("Pending Checkpoint %d @ %d - confirmed=%d, pending=%d", checkpointId, checkpointTimestamp, getNumberOfAcknowledgedTasks(), getNumberOfNonAcknowledgedTasks());
} | 3.26 |
flink_PendingCheckpoint_getJobId_rdh | // --------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public JobID getJobId() {
return jobId;
} | 3.26 |
flink_PendingCheckpoint_abort_rdh | // ------------------------------------------------------------------------
// Cancellation
// ------------------------------------------------------------------------
/**
* Aborts a checkpoint with reason and cause.
*/
public void abort(CheckpointFailureReason reason, @Nullable
Throwable cause, CheckpointsCleaner checkpointsCleaner, Runnable postCleanup, Executor executor, CheckpointStatsTracker statsTracker)
{
try {
failureCause = new CheckpointException(reason, cause);
f1.completeExceptionally(failureCause);
masterTriggerCompletionPromise.completeExceptionally(failureCause);
assertAbortSubsumedForced(reason);
} finally {
dispose(true, checkpointsCleaner, postCleanup, executor);
}
} | 3.26 |
flink_PendingCheckpoint_canBeSubsumed_rdh | /**
* Checks whether this checkpoint can be subsumed or whether it should always continue,
* regardless of newer checkpoints in progress.
*
* @return True if the checkpoint can be subsumed, false otherwise.
*/
public boolean canBeSubsumed() {
// If the checkpoint is forced, it cannot be subsumed.
return !props.isSavepoint();
} | 3.26 |
flink_PendingCheckpoint_acknowledgeTask_rdh | /**
* Acknowledges the task with the given execution attempt id and the given subtask state.
*
* @param executionAttemptId
* of the acknowledged task
* @param operatorSubtaskStates
* of the acknowledged task
* @param metrics
* Checkpoint metrics for the stats
* @return TaskAcknowledgeResult of the operation
*/
public TaskAcknowledgeResult acknowledgeTask(ExecutionAttemptID executionAttemptId, TaskStateSnapshot operatorSubtaskStates, CheckpointMetrics metrics) {
synchronized(lock) {
if (disposed) {
return TaskAcknowledgeResult.DISCARDED;
}
final ExecutionVertex vertex = notYetAcknowledgedTasks.remove(executionAttemptId);
if (vertex == null) {
if (acknowledgedTasks.contains(executionAttemptId)) {
return TaskAcknowledgeResult.DUPLICATE;
} else {
return TaskAcknowledgeResult.UNKNOWN;
}
} else {
acknowledgedTasks.add(executionAttemptId);
}
long ackTimestamp = System.currentTimeMillis();
if ((operatorSubtaskStates != null) && operatorSubtaskStates.isTaskDeployedAsFinished()) {
checkpointPlan.reportTaskFinishedOnRestore(vertex);
} else {
List<OperatorIDPair> operatorIDs = vertex.getJobVertex().getOperatorIDs();
for (OperatorIDPair operatorID : operatorIDs) {
updateOperatorState(vertex, operatorSubtaskStates, operatorID);
}
if ((operatorSubtaskStates != null) && operatorSubtaskStates.isTaskFinished()) {
checkpointPlan.reportTaskHasFinishedOperators(vertex);
}
}
++numAcknowledgedTasks;
// publish the checkpoint statistics
// to prevent null-pointers from concurrent modification, copy reference onto stack
if (pendingCheckpointStats != null) {
// Do this in millis because the web frontend works with them
long alignmentDurationMillis = metrics.getAlignmentDurationNanos() / 1000000;
long checkpointStartDelayMillis = metrics.getCheckpointStartDelayNanos() / 1000000;
SubtaskStateStats subtaskStateStats = new SubtaskStateStats(vertex.getParallelSubtaskIndex(), ackTimestamp, metrics.getBytesPersistedOfThisCheckpoint(), metrics.getTotalBytesPersisted(), metrics.getSyncDurationMillis(), metrics.getAsyncDurationMillis(), metrics.getBytesProcessedDuringAlignment(), metrics.getBytesPersistedDuringAlignment(), alignmentDurationMillis, checkpointStartDelayMillis, metrics.getUnalignedCheckpoint(), true);f0.trace("Checkpoint {} stats for {}: size={}Kb, duration={}ms, sync part={}ms, async part={}ms", checkpointId, vertex.getTaskNameWithSubtaskIndex(), subtaskStateStats.getStateSize() == 0 ? 0 : subtaskStateStats.getStateSize() / 1024, subtaskStateStats.getEndToEndDuration(pendingCheckpointStats.getTriggerTimestamp()), subtaskStateStats.getSyncCheckpointDuration(), subtaskStateStats.getAsyncCheckpointDuration());
pendingCheckpointStats.reportSubtaskStats(vertex.getJobvertexId(), subtaskStateStats);
}
return TaskAcknowledgeResult.SUCCESS;
}
} | 3.26 |
flink_PendingCheckpoint_discard_rdh | /**
* Discard state. Must be called after {@link #dispose(boolean, CheckpointsCleaner,
* Runnable, Executor) dispose}.
*/
@Override
public void discard() {
synchronized(lock) {
if (discarded) {
Preconditions.checkState(disposed, "Checkpoint should be disposed before being discarded");
return;
} else {
discarded = true;
}
}
// discard the private states.
// unregistered shared states are still considered private at this point.
try {
StateUtil.bestEffortDiscardAllStateObjects(operatorStates.values());
if (targetLocation != null) {
targetLocation.disposeOnFailure();
}
} catch (Throwable t) {
f0.warn("Could not properly dispose the private states in the pending checkpoint {} of job {}.", checkpointId, jobId, t);
} finally {
operatorStates.clear();
}
} | 3.26 |
flink_PendingCheckpoint_m1_rdh | /**
* Sets the handle for the canceller to this pending checkpoint. This method fails with an
* exception if a handle has already been set.
*
* @return true, if the handle was set, false, if the checkpoint is already disposed;
*/
public boolean m1(ScheduledFuture<?> cancellerHandle) {
synchronized(lock) {if (this.f2 == null) {
if (!disposed) {
this.f2 =
cancellerHandle;
return true;
} else {
return false;
}
} else {
throw new IllegalStateException("A canceller handle was already set");
}
}
} | 3.26 |
flink_PendingCheckpoint_acknowledgeMasterState_rdh | /**
* Acknowledges a master state (state generated on the checkpoint coordinator) to the pending
* checkpoint.
*
* @param identifier
* The identifier of the master state
* @param state
* The state to acknowledge
*/
public void acknowledgeMasterState(String identifier, @Nullable
MasterState state) {
synchronized(lock) {
if (!disposed) {
if (notYetAcknowledgedMasterStates.remove(identifier) && (state != null)) {
masterStates.add(state);
}
}
}
} | 3.26 |
flink_PendingCheckpoint_getCompletionFuture_rdh | // ------------------------------------------------------------------------
// Progress and Completion
// ------------------------------------------------------------------------
/**
* Returns the completion future.
*
* @return A future to the completed checkpoint
*/
public CompletableFuture<CompletedCheckpoint> getCompletionFuture() {
return f1;
} | 3.26 |
flink_BloomFilter_estimateFalsePositiveProbability_rdh | /**
* Compute the false positive probability based on given input entries and bits size. Note: this
* is just the math expected value, you should not expect the fpp in real case would under the
* return value for certain.
*
* @param inputEntries
* @param bitSize
* @return */
public static double estimateFalsePositiveProbability(long inputEntries, int bitSize) {
int numFunction = optimalNumOfHashFunctions(inputEntries, bitSize);
double p = Math.pow(Math.E,
((-((double) (numFunction))) * inputEntries) / bitSize);
double estimatedFPP = Math.pow(1 - p, numFunction);
return estimatedFPP;
} | 3.26 |
flink_BloomFilter_toBytes_rdh | /**
* Serializing to bytes, note that only heap memory is currently supported.
*/
public static byte[] toBytes(BloomFilter filter) {
byte[] data = filter.bitSet.toBytes();
int byteSize = data.length;
byte[] bytes = new byte[8 + byteSize];
UNSAFE.putInt(bytes, BYTE_ARRAY_BASE_OFFSET, filter.numHashFunctions);
UNSAFE.putInt(bytes, BYTE_ARRAY_BASE_OFFSET + 4, byteSize);
UNSAFE.copyMemory(data, BYTE_ARRAY_BASE_OFFSET, bytes,
BYTE_ARRAY_BASE_OFFSET + 8, byteSize);
return bytes;
} | 3.26 |
flink_BloomFilter_fromBytes_rdh | /**
* Deserializing bytes array to BloomFilter. Currently, only heap memory is supported.
*/
public static BloomFilter fromBytes(byte[] bytes) {int numHashFunctions = UNSAFE.getInt(bytes, BYTE_ARRAY_BASE_OFFSET);
int byteSize = UNSAFE.getInt(bytes, BYTE_ARRAY_BASE_OFFSET + 4);
byte[] data = new byte[byteSize];
UNSAFE.copyMemory(bytes, BYTE_ARRAY_BASE_OFFSET
+ 8, data, BYTE_ARRAY_BASE_OFFSET, byteSize);
BitSet bitSet
= new BitSet(byteSize);
bitSet.setMemorySegment(MemorySegmentFactory.wrap(data), 0);
return new BloomFilter(bitSet, numHashFunctions);
} | 3.26 |
flink_BloomFilter_optimalNumOfHashFunctions_rdh | /**
* compute the optimal hash function number with given input entries and bits size, which would
* make the false positive probability lowest.
*
* @param expectEntries
* @param bitSize
* @return hash function number
*/
static int optimalNumOfHashFunctions(long expectEntries, long bitSize) {
return Math.max(1, ((int) (Math.round((((double) (bitSize)) / expectEntries) * Math.log(2)))));
} | 3.26 |
flink_BloomFilter_mergeSerializedBloomFilters_rdh | /**
* Merge the bf2 bytes to bf1. After merge completes, the contents of bf1 will be changed.
*/
private static byte[] mergeSerializedBloomFilters(byte[] bf1Bytes, int bf1Start, int bf1Length, byte[] bf2Bytes, int bf2Start, int bf2Length) {
if (bf1Length != bf2Length) {
throw new IllegalArgumentException(String.format("bf1Length %s does not match bf2Length %s when merging", bf1Length, bf2Length));
}
// Validation on hash functions
if (UNSAFE.getByte(bf1Bytes, BYTE_ARRAY_BASE_OFFSET + bf1Start) != UNSAFE.getByte(bf2Bytes, BYTE_ARRAY_BASE_OFFSET + bf2Start)) {
throw new IllegalArgumentException("bf1 numHashFunctions does not match bf2 when merging");
}for (int idx = 8 + BYTE_ARRAY_BASE_OFFSET; idx < (bf1Length
+ BYTE_ARRAY_BASE_OFFSET); idx
+= 1) {
byte l1 = UNSAFE.getByte(bf1Bytes,
bf1Start + idx);
byte l2 = UNSAFE.getByte(bf2Bytes, bf2Start + idx);
UNSAFE.putByte(bf1Bytes, bf1Start +
idx, ((byte) (l1 | l2)));
}
return bf1Bytes;
} | 3.26 |
flink_BloomFilter_m0_rdh | /**
* Compute optimal bits number with given input entries and expected false positive probability.
*
* @param inputEntries
* @param fpp
* @return optimal bits number
*/
public static int m0(long inputEntries, double fpp) {
int numBits = ((int) (((-inputEntries) * Math.log(fpp)) / (Math.log(2) * Math.log(2)))); return numBits;
} | 3.26 |
flink_SingleInputPlanNode_getComparator_rdh | /**
* Gets the specified comparator from this PlanNode.
*
* @param id
* The ID of the requested comparator.
* @return The specified comparator.
*/
public TypeComparatorFactory<?> getComparator(int id) {
return comparators[id];
} | 3.26 |
flink_SingleInputPlanNode_getTrueArray_rdh | // --------------------------------------------------------------------------------------------
protected static boolean[] getTrueArray(int length) {
final boolean[] a = new boolean[length];
for
(int i = 0; i < length; i++) {
a[i] = true;
}
return a;
} | 3.26 |
flink_SingleInputPlanNode_getKeys_rdh | /**
* Gets the key field indexes for the specified driver comparator.
*
* @param id
* The id of the driver comparator for which the key field indexes are requested.
* @return The key field indexes of the specified driver comparator.
*/
public FieldList getKeys(int id)
{
return this.driverKeys[id];
} | 3.26 |
flink_SingleInputPlanNode_setDriverKeyInfo_rdh | /**
* Sets the key field information for the specified driver comparator.
*
* @param keys
* The key field indexes for the specified driver comparator.
* @param sortOrder
* The key sort order for the specified driver comparator.
* @param id
* The ID of the driver comparator.
*/public void setDriverKeyInfo(FieldList keys, boolean[] sortOrder, int
id) {
if ((id <
0) || (id >= driverKeys.length)) {
throw new CompilerException(("Invalid id for driver key information. DriverStrategy requires only " + super.getDriverStrategy().getNumRequiredComparators()) + " comparators.");
}
this.driverKeys[id] = keys;
this.driverSortOrders[id] = sortOrder;
} | 3.26 |
flink_SingleInputPlanNode_getPredecessor_rdh | /**
* Gets the predecessor of this node, i.e. the source of the input channel.
*
* @return The predecessor of this node.
*/
public PlanNode getPredecessor() {
return
this.input.getSource();
} | 3.26 |
flink_SingleInputPlanNode_setComparator_rdh | /**
* Sets the specified comparator for this PlanNode.
*
* @param comparator
* The comparator to set.
* @param id
* The ID of the comparator to set.
*/
public void setComparator(TypeComparatorFactory<?> comparator, int id) {this.comparators[id] = comparator;
} | 3.26 |
flink_SingleInputPlanNode_accept_rdh | // --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<PlanNode> visitor) {
if (visitor.preVisit(this)) {
this.input.getSource().accept(visitor);
for (Channel broadcastInput : getBroadcastInputs()) {
broadcastInput.getSource().accept(visitor);
}
visitor.postVisit(this);
}
} | 3.26 |
flink_SingleInputPlanNode_getSortOrders_rdh | /**
* Gets the sort order for the specified driver comparator.
*
* @param id
* The id of the driver comparator for which the sort order is requested.
* @return The sort order of the specified driver comparator.
*/public boolean[] getSortOrders(int id) {
return driverSortOrders[id];
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setCleanUpInterval_rdh | /**
* Sets {@code cleanUpInterval}.
*
* @param cleanUpInterval
* Clean up interval for completed stats.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setCleanUpInterval(Duration cleanUpInterval) {
this.cleanUpInterval = cleanUpInterval;
return this;
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setJobVertexStatsCache_rdh | /**
* Sets {@code jobVertexStatsCache}. This is currently only used for testing.
*
* @param jobVertexStatsCache
* The Cache instance to use for caching statistics. Will use the
* default defined in {@link VertexThreadInfoTrackerBuilder#defaultCache()} if not set.
* @return Builder.
*/
@VisibleForTesting
VertexThreadInfoTrackerBuilder setJobVertexStatsCache(Cache<VertexThreadInfoTracker.JobVertexKey, VertexThreadInfoStats> jobVertexStatsCache) {
this.jobVertexStatsCache = jobVertexStatsCache;
return this;
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_newBuilder_rdh | /**
* Create a new {@link VertexThreadInfoTrackerBuilder}.
*
* @return Builder.
*/
public static VertexThreadInfoTrackerBuilder newBuilder(GatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever, ScheduledExecutorService executor, Time restTimeout) {
return new VertexThreadInfoTrackerBuilder(resourceManagerGatewayRetriever, executor, restTimeout);
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_build_rdh | /**
* Constructs a new {@link VertexThreadInfoTracker}.
*
* @return a new {@link VertexThreadInfoTracker} instance.
*/
public VertexThreadInfoTracker build() {
if (jobVertexStatsCache == null) {
jobVertexStatsCache = defaultCache();
}
if (executionVertexStatsCache == null) {
executionVertexStatsCache = defaultCache();
}return new VertexThreadInfoTracker(coordinator, resourceManagerGatewayRetriever, executor, cleanUpInterval, numSamples, statsRefreshInterval, delayBetweenSamples, maxThreadInfoDepth, restTimeout, jobVertexStatsCache, executionVertexStatsCache);
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setMaxThreadInfoDepth_rdh | /**
* Sets {@code delayBetweenSamples}.
*
* @param maxThreadInfoDepth
* Limit for the depth of the stack traces included when sampling
* threads.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setMaxThreadInfoDepth(int maxThreadInfoDepth) {
this.maxThreadInfoDepth = maxThreadInfoDepth;
return this;
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setExecutionVertexStatsCache_rdh | /**
* Sets {@code executionVertexStatsCache}. This is currently only used for testing.
*
* @param executionVertexStatsCache
* The Cache instance to use for caching statistics. Will use
* the default defined in {@link VertexThreadInfoTrackerBuilder#defaultCache()} if not set.
* @return Builder.
*/
@VisibleForTesting
VertexThreadInfoTrackerBuilder setExecutionVertexStatsCache(Cache<VertexThreadInfoTracker.ExecutionVertexKey, VertexThreadInfoStats> executionVertexStatsCache) {
this.executionVertexStatsCache = executionVertexStatsCache;
return this;
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setStatsRefreshInterval_rdh | /**
* Sets {@code statsRefreshInterval}.
*
* @param statsRefreshInterval
* Time interval after which the available thread info stats are
* deprecated and need to be refreshed.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setStatsRefreshInterval(Duration statsRefreshInterval) {
this.statsRefreshInterval = statsRefreshInterval;
return this;
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setDelayBetweenSamples_rdh | /**
* Sets {@code delayBetweenSamples}.
*
* @param delayBetweenSamples
* Delay between individual samples per task.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setDelayBetweenSamples(Duration delayBetweenSamples) {
this.delayBetweenSamples =
delayBetweenSamples;return this;
} | 3.26 |
flink_VertexThreadInfoTrackerBuilder_setNumSamples_rdh | /**
* Sets {@code numSamples}.
*
* @param numSamples
* Number of thread info samples to collect for each subtask.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setNumSamples(int numSamples) {
this.numSamples = numSamples;
return this;
} | 3.26 |
flink_AsyncDataStream_unorderedWaitWithRetry_rdh | /**
* Adds an AsyncWaitOperator with an AsyncRetryStrategy to support retry of AsyncFunction. The
* order of output stream records may be reordered.
*
* @param in
* Input {@link DataStream}
* @param func
* {@link AsyncFunction}
* @param timeout
* from first invoke to final completion of asynchronous operation, may include
* multiple retries, and will be reset in case of restart
* @param timeUnit
* of the given timeout
* @param capacity
* The max number of async i/o operation that can be triggered
* @param asyncRetryStrategy
* The strategy of reattempt async i/o operation that can be triggered
* @param <IN>
* Type of input record
* @param <OUT>
* Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/
public static <IN, OUT> SingleOutputStreamOperator<OUT> unorderedWaitWithRetry(DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit, int capacity, AsyncRetryStrategy<OUT> asyncRetryStrategy) {
return addOperator(in, func, timeUnit.toMillis(timeout), capacity, OutputMode.UNORDERED, asyncRetryStrategy);
} | 3.26 |
flink_AsyncDataStream_orderedWaitWithRetry_rdh | /**
* Adds an AsyncWaitOperator with an AsyncRetryStrategy to support retry of AsyncFunction. The
* order to process input records is guaranteed to be the same as * input ones.
*
* @param in
* Input {@link DataStream}
* @param func
* {@link AsyncFunction}
* @param timeout
* from first invoke to final completion of asynchronous operation, may include
* multiple retries, and will be reset in case of restart
* @param timeUnit
* of the given timeout
* @param asyncRetryStrategy
* The strategy of reattempt async i/o operation that can be triggered
* @param <IN>
* Type of input record
* @param <OUT>
* Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/
public static <IN, OUT> SingleOutputStreamOperator<OUT> orderedWaitWithRetry(DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit, AsyncRetryStrategy<OUT> asyncRetryStrategy) {
return addOperator(in, func, timeUnit.toMillis(timeout), DEFAULT_QUEUE_CAPACITY, OutputMode.ORDERED, asyncRetryStrategy);
}
/**
* Adds an AsyncWaitOperator with an AsyncRetryStrategy to support retry of AsyncFunction. The
* order to process input records is guaranteed to be the same as * input ones.
*
* @param in
* Input {@link DataStream}
* @param func
* {@link AsyncFunction}
* @param timeout
* from first invoke to final completion of asynchronous operation, may include
* multiple retries, and will be reset in case of restart
* @param timeUnit
* of the given timeout
* @param capacity
* The max number of async i/o operation that can be triggered
* @param asyncRetryStrategy
* The strategy of reattempt async i/o operation that can be triggered
* @param <IN>
* Type of input record
* @param <OUT>
* Type of output record
* @return A new {@link SingleOutputStreamOperator} | 3.26 |
flink_AsyncDataStream_orderedWait_rdh | /**
* Adds an AsyncWaitOperator. The order to process input records is guaranteed to be the same as
* input ones.
*
* @param in
* Input {@link DataStream}
* @param func
* {@link AsyncFunction}
* @param timeout
* for the asynchronous operation to complete
* @param timeUnit
* of the given timeout
* @param <IN>
* Type of input record
* @param <OUT>
* Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/
public static <IN, OUT> SingleOutputStreamOperator<OUT> orderedWait(DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit) {
return addOperator(in, func, timeUnit.toMillis(timeout), DEFAULT_QUEUE_CAPACITY, OutputMode.ORDERED, NO_RETRY_STRATEGY);
} | 3.26 |
flink_AsyncDataStream_m0_rdh | /**
* Adds an AsyncWaitOperator. The order of output stream records may be reordered.
*
* @param in
* Input {@link DataStream}
* @param func
* {@link AsyncFunction}
* @param timeout
* for the asynchronous operation to complete
* @param timeUnit
* of the given timeout
* @param capacity
* The max number of async i/o operation that can be triggered
* @param <IN>
* Type of input record
* @param <OUT>
* Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/public static <IN, OUT> SingleOutputStreamOperator<OUT> m0(DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit, int capacity) {
return addOperator(in, func, timeUnit.toMillis(timeout), capacity, OutputMode.UNORDERED, NO_RETRY_STRATEGY);
} | 3.26 |
flink_AsyncDataStream_unorderedWait_rdh | /**
* Adds an AsyncWaitOperator. The order of output stream records may be reordered.
*
* @param in
* Input {@link DataStream}
* @param func
* {@link AsyncFunction}
* @param timeout
* for the asynchronous operation to complete
* @param timeUnit
* of the given timeout
* @param <IN>
* Type of input record
* @param <OUT>
* Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/
public static <IN, OUT> SingleOutputStreamOperator<OUT> unorderedWait(DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit) {
return addOperator(in, func, timeUnit.toMillis(timeout), DEFAULT_QUEUE_CAPACITY, OutputMode.UNORDERED, NO_RETRY_STRATEGY);
} | 3.26 |
flink_StreamConfig_setManagedMemoryFractionOperatorOfUseCase_rdh | /**
* Fraction of managed memory reserved for the given use case that this operator should use.
*/
public void setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase managedMemoryUseCase, double fraction) {
final ConfigOption<Double> configOption = getManagedMemoryFractionConfigOption(managedMemoryUseCase);
checkArgument((fraction >= 0.0) && (fraction <= 1.0), String.format("%s should be in range [0.0, 1.0], but was: %s", configOption.key(), fraction));
config.setDouble(configOption, fraction);
} | 3.26 |
flink_StreamConfig_setCheckpointingEnabled_rdh | // --------------------- checkpointing -----------------------
public void setCheckpointingEnabled(boolean
enabled) {
config.setBoolean(CHECKPOINTING_ENABLED, enabled); } | 3.26 |
flink_StreamConfig_clearInitialConfigs_rdh | /**
* In general, we don't clear any configuration. However, the {@link #SERIALIZED_UDF} may be
* very large when operator includes some large objects, the SERIALIZED_UDF is used to create a
* StreamOperator and usually only needs to be called once. {@link #CHAINED_TASK_CONFIG} may be
* large as well due to the StreamConfig of all non-head operators in OperatorChain will be
* serialized and stored in CHAINED_TASK_CONFIG. They can be cleared to reduce the memory after
* StreamTask is initialized. If so, TM will have more memory during running. See FLINK-33315
* and FLINK-33317 for more information.
*/
public void clearInitialConfigs() {
removedKeys.add(SERIALIZED_UDF);
config.removeKey(SERIALIZED_UDF);
removedKeys.add(CHAINED_TASK_CONFIG);
config.removeKey(CHAINED_TASK_CONFIG);
} | 3.26 |
flink_StreamConfig_setChainStart_rdh | // ------------------------------------------------------------------------
// Miscellaneous
// ------------------------------------------------------------------------
public void setChainStart() {
config.setBoolean(IS_CHAINED_VERTEX, true);
} | 3.26 |
flink_StreamConfig_setVertexNonChainedOutputs_rdh | /**
* Sets the job vertex level non-chained outputs. The given output list must have the same order
* with {@link JobVertex#getProducedDataSets()}.
*/
public void setVertexNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
f2.put(VERTEX_NONCHAINED_OUTPUTS, nonChainedOutputs);
} | 3.26 |
flink_StreamConfig_serializeAllConfigs_rdh | /**
* Serialize all object configs synchronously. Only used for operators which need to reconstruct
* the StreamConfig internally or test.
*/
public void serializeAllConfigs() {
f2.forEach((key, object) -> {
try {
InstantiationUtil.writeObjectToConfig(object, this.config, key);
} catch (IOException e) {
throw new StreamTaskException(String.format("Could not serialize object for key %s.", key), e);
}
});
} | 3.26 |
flink_StreamConfig_getManagedMemoryFractionOperatorUseCaseOfSlot_rdh | /**
* Fraction of total managed memory in the slot that this operator should use for the given use
* case.
*/
public double getManagedMemoryFractionOperatorUseCaseOfSlot(ManagedMemoryUseCase managedMemoryUseCase, Configuration taskManagerConfig, ClassLoader cl) {
return ManagedMemoryUtils.convertToFractionOfSlot(managedMemoryUseCase, config.getDouble(getManagedMemoryFractionConfigOption(managedMemoryUseCase)), getAllManagedMemoryUseCases(), taskManagerConfig,
config.getOptional(STATE_BACKEND_USE_MANAGED_MEMORY), cl);
} | 3.26 |
flink_StreamConfig_setStateBackend_rdh | // ------------------------------------------------------------------------
// State backend
// ------------------------------------------------------------------------
public void setStateBackend(StateBackend backend) {
if (backend != null) {
f2.put(STATE_BACKEND, backend);
setStateBackendUsesManagedMemory(backend.useManagedMemory());
}
} | 3.26 |
flink_StreamConfig_triggerSerializationAndReturnFuture_rdh | /**
* Trigger the object config serialization and return the completable future.
*/
public CompletableFuture<StreamConfig> triggerSerializationAndReturnFuture(Executor ioExecutor) {
FutureUtils.combineAll(f3.values()).thenAcceptAsync(chainedConfigs -> {
try {
// Serialize all the objects to config.
serializeAllConfigs();
InstantiationUtil.writeObjectToConfig(chainedConfigs.stream().collect(Collectors.toMap(StreamConfig::getVertexID, Function.identity())),
this.config, CHAINED_TASK_CONFIG);
serializationFuture.complete(this);
} catch (Throwable throwable) {
serializationFuture.completeExceptionally(throwable);
}
}, ioExecutor);
return serializationFuture;
} | 3.26 |
flink_StreamConfig_setOperatorNonChainedOutputs_rdh | /**
* Sets the operator level non-chained outputs.
*/
public void setOperatorNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
f2.put(OP_NONCHAINED_OUTPUTS, nonChainedOutputs);
} | 3.26 |
flink_OperatorSubtaskState_getManagedOperatorState_rdh | // --------------------------------------------------------------------------------------------
public StateObjectCollection<OperatorStateHandle> getManagedOperatorState() {
return managedOperatorState;
} | 3.26 |
flink_OperatorSubtaskState_equals_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}if ((o == null)
|| (getClass() != o.getClass())) {
return false;
}
OperatorSubtaskState that = ((OperatorSubtaskState) (o));
if (getStateSize() != that.getStateSize()) {
return false;
}
if
(!getManagedOperatorState().equals(that.getManagedOperatorState())) {
return
false;
}
if (!m0().equals(that.m0())) {
return false;
}
if (!getManagedKeyedState().equals(that.getManagedKeyedState())) {
return false;
}
if (!getInputChannelState().equals(that.getInputChannelState())) {
return false;
}
if (!getResultSubpartitionState().equals(that.getResultSubpartitionState())) {
return false;
}
if (!getInputRescalingDescriptor().equals(that.getInputRescalingDescriptor())) {
return false;
}
if (!getOutputRescalingDescriptor().equals(that.getOutputRescalingDescriptor())) {
return false;
}
return m1().equals(that.m1());
} | 3.26 |
flink_SharedStateRegistryKey_forStreamStateHandle_rdh | /**
* Create a unique key based on physical id.
*/
public static SharedStateRegistryKey forStreamStateHandle(StreamStateHandle handle) {
String keyString = handle.getStreamStateHandleID().getKeyString();
// key strings tend to be longer, so we use the MD5 of the key string to save memory
return new SharedStateRegistryKey(UUID.nameUUIDFromBytes(keyString.getBytes(StandardCharsets.UTF_8)).toString());
} | 3.26 |
flink_SlotSharingGroup_setTaskHeapMemoryMB_rdh | /**
* Set the task heap memory for this SlotSharingGroup in MB.
*/
public Builder setTaskHeapMemoryMB(int taskHeapMemoryMB) {
checkArgument(taskHeapMemoryMB > 0, "The task heap memory should be positive.");
this.taskHeapMemory = MemorySize.ofMebiBytes(taskHeapMemoryMB);
return this;
} | 3.26 |
flink_SlotSharingGroup_setTaskHeapMemory_rdh | /**
* Set the task heap memory for this SlotSharingGroup.
*/
public Builder setTaskHeapMemory(MemorySize taskHeapMemory) {
checkArgument(taskHeapMemory.compareTo(MemorySize.ZERO) > 0, "The task heap memory should be positive.");
this.taskHeapMemory = taskHeapMemory;
return this;} | 3.26 |
flink_SlotSharingGroup_build_rdh | /**
* Build the SlotSharingGroup.
*/
public SlotSharingGroup build() {
if ((cpuCores != null) && (taskHeapMemory != null)) {
taskOffHeapMemory = Optional.ofNullable(taskOffHeapMemory).orElse(MemorySize.ZERO);
managedMemory = Optional.ofNullable(managedMemory).orElse(MemorySize.ZERO);
return new SlotSharingGroup(name, cpuCores, taskHeapMemory, taskOffHeapMemory, managedMemory, externalResources);
} else if (((((cpuCores != null) || (taskHeapMemory != null)) || (taskOffHeapMemory != null)) || (managedMemory != null)) || (!externalResources.isEmpty())) {throw new IllegalArgumentException("The cpu cores and task heap memory are required when specifying the resource of a slot sharing group. " + "You need to explicitly configure them with positive value.");
} else {
return new SlotSharingGroup(name);
}
} | 3.26 |
flink_SlotSharingGroup_m0_rdh | /**
* Set the CPU cores for this SlotSharingGroup.
*/
public Builder m0(double cpuCores) {
checkArgument(cpuCores > 0,
"The cpu cores should be positive.");
this.cpuCores = new CPUResource(cpuCores);
return this;
} | 3.26 |
flink_SlotSharingGroup_m1_rdh | /**
* Set the task off-heap memory for this SlotSharingGroup.
*/
public Builder m1(MemorySize taskOffHeapMemory) {
this.taskOffHeapMemory = taskOffHeapMemory;
return this;
} | 3.26 |
flink_SlotSharingGroup_setTaskOffHeapMemoryMB_rdh | /**
* Set the task off-heap memory for this SlotSharingGroup in MB.
*/
public Builder setTaskOffHeapMemoryMB(int taskOffHeapMemoryMB) {
this.taskOffHeapMemory = MemorySize.ofMebiBytes(taskOffHeapMemoryMB);
return this;
} | 3.26 |
flink_BlobServer_globalCleanupAsync_rdh | /**
* Removes all BLOBs from local and HA store belonging to the given {@link JobID}.
*
* @param jobId
* ID of the job this blob belongs to
*/@Override
public CompletableFuture<Void> globalCleanupAsync(JobID jobId, Executor executor) {
checkNotNull(jobId);
return runAsyncWithWriteLock(() -> {
IOException exception = null;
try {
internalLocalCleanup(jobId);
} catch (IOException e) {
exception = e;
}
if (!blobStore.deleteAll(jobId)) {
exception = ExceptionUtils.firstOrSuppressed(new IOException("Error while cleaning up the BlobStore for job " + jobId), exception);
}
if (exception != null) {
throw new IOException(exception);
}
}, executor);
} | 3.26 |
flink_BlobServer_getReadWriteLock_rdh | /**
* Returns the lock used to guard file accesses.
*/
ReadWriteLock getReadWriteLock() {
return readWriteLock;
} | 3.26 |
flink_BlobServer_moveTempFileToStore_rdh | /**
* Moves the temporary <tt>incomingFile</tt> to its permanent location where it is available for
* use.
*
* @param incomingFile
* temporary file created during transfer
* @param jobId
* ID of the job this blob belongs to or <tt>null</tt> if job-unrelated
* @param digest
* BLOB content digest, i.e. hash
* @param blobType
* whether this file is a permanent or transient BLOB
* @return unique BLOB key that identifies the BLOB on the server
* @throws IOException
* thrown if an I/O error occurs while moving the file or uploading it to
* the HA store
*/
BlobKey moveTempFileToStore(File incomingFile, @Nullable
JobID jobId,
byte[] digest, BlobKey.BlobType blobType) throws IOException {
int retries = 10;
int attempt = 0;
while (true) {
// add unique component independent of the BLOB content
BlobKey blobKey = BlobKey.createKey(blobType, digest);
File storageFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey);
// try again until the key is unique (put the existence check into the lock!)
readWriteLock.writeLock().lock();
try {
if (!storageFile.exists()) {
BlobUtils.moveTempFileToStore(incomingFile, jobId, blobKey, storageFile, LOG, blobKey instanceof PermanentBlobKey ? blobStore : null);
// add TTL for transient BLOBs:
if (blobKey instanceof TransientBlobKey) {
// must be inside read or write lock to add a TTL
blobExpiryTimes.put(Tuple2.of(jobId, ((TransientBlobKey) (blobKey))), System.currentTimeMillis() + cleanupInterval);
}
return blobKey;
}
} finally {
readWriteLock.writeLock().unlock();
}
++attempt;
if (attempt >= retries) {
String message = ((("Failed to find a unique key for BLOB of job " + jobId) + " (last tried ") + storageFile.getAbsolutePath()) + ".";
LOG.error(message + " No retries left.");
throw new IOException(message);
} else if (LOG.isDebugEnabled()) {
LOG.debug("Trying to find a unique key for BLOB of job {} (retry {}, last tried {})", jobId, attempt, storageFile.getAbsolutePath());
}
}
} | 3.26 |
flink_BlobServer_getStorageDir_rdh | // --------------------------------------------------------------------------------------------
// Path Accessors
// --------------------------------------------------------------------------------------------
public File getStorageDir() {
return storageDir.deref();
} | 3.26 |
flink_BlobServer_createTemporaryFilename_rdh | /**
* Returns a temporary file inside the BLOB server's incoming directory.
*
* @return a temporary file inside the BLOB server's incoming directory
* @throws IOException
* if creating the directory fails
*/
File createTemporaryFilename() throws IOException {
return new File(BlobUtils.getIncomingDirectory(storageDir.deref()), String.format("temp-%08d", tempFileCounter.getAndIncrement()));
} | 3.26 |
flink_BlobServer_getCurrentActiveConnections_rdh | /**
* Returns all the current active connections in the BlobServer.
*
* @return the list of all the active in current BlobServer
*/
List<BlobServerConnection> getCurrentActiveConnections()
{
synchronized(activeConnections) {
return new ArrayList<>(activeConnections);
}
} | 3.26 |
flink_BlobServer_getBlobExpiryTimes_rdh | /**
* Returns the blob expiry times - for testing purposes only!
*
* @return blob expiry times (internal state!)
*/
@VisibleForTesting
ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> getBlobExpiryTimes() {
return blobExpiryTimes;
} | 3.26 |
flink_BlobServer_putBuffer_rdh | /**
* Uploads the data of the given byte array for the given job to the BLOB server.
*
* @param jobId
* the ID of the job the BLOB belongs to
* @param value
* the buffer to upload
* @param blobType
* whether to make the data permanent or transient
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException
* thrown if an I/O error occurs while writing it to a local file, or
* uploading it to the HA store
*/
private BlobKey putBuffer(@Nullable
JobID jobId, byte[] value, BlobKey.BlobType blobType) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Received PUT call for BLOB of job {}.", jobId);
}
File incomingFile = createTemporaryFilename();
MessageDigest md = BlobUtils.createMessageDigest();
BlobKey blobKey = null;
try (FileOutputStream fos = new FileOutputStream(incomingFile)) {
md.update(value);
fos.write(value);
} catch (IOException ioe) {
// delete incomingFile from a failed download
if ((!incomingFile.delete()) && incomingFile.exists()) {
LOG.warn("Could not delete the staging file {} for job {}.", incomingFile, jobId);
}
throw ioe;
}
try {
// persist file
blobKey = moveTempFileToStore(incomingFile, jobId, md.digest(), blobType);
return blobKey;
} finally {
// delete incomingFile from a failed download
if ((!incomingFile.delete()) &&
incomingFile.exists()) {
LOG.warn("Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId);
}
}
} | 3.26 |
flink_BlobServer_getFile_rdh | /**
* Returns the path to a local copy of the file associated with the provided job ID and blob
* key.
*
* <p>We will first attempt to serve the BLOB from the local storage. If the BLOB is not in
* there, we will try to download it from the HA store.
*
* @param jobId
* ID of the job this blob belongs to
* @param key
* blob key associated with the requested file
* @return The path to the file.
* @throws java.io.FileNotFoundException
* if the BLOB does not exist;
* @throws IOException
* if any other error occurs when retrieving the file
*/
@Override
public File getFile(JobID jobId, PermanentBlobKey key) throws IOException
{
checkNotNull(jobId);return getFileInternalWithReadLock(jobId, key);
} | 3.26 |
flink_BlobServer_getStorageLocation_rdh | /**
* Returns a file handle to the file associated with the given blob key on the blob server.
*
* <p><strong>This is only called from {@link BlobServerConnection} or unit tests.</strong>
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param key
* identifying the file
* @return file handle to the file
* @throws IOException
* if creating the directory fails
*/
@VisibleForTesting
public File getStorageLocation(@Nullable
JobID jobId, BlobKey key) throws IOException {
return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key);
} | 3.26 |
flink_BlobServer_getFileInternalWithReadLock_rdh | /**
* Retrieves the local path of a file associated with a job and a blob key.
*
* <p>The blob server looks the blob key up in its local storage. If the file exists, it is
* returned. If the file does not exist, it is retrieved from the HA blob store (if available)
* or a {@link FileNotFoundException} is thrown.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey
* blob key associated with the requested file
* @return file referring to the local storage location of the BLOB
* @throws IOException
* Thrown if the file retrieval failed.
*/
private File getFileInternalWithReadLock(@Nullable
JobID jobId, BlobKey blobKey) throws IOException {
checkArgument(blobKey != null, "BLOB key cannot be null.");
readWriteLock.readLock().lock();
try {
return getFileInternal(jobId, blobKey);
} finally {
readWriteLock.readLock().unlock();
}
} | 3.26 |
flink_BlobServer_getPort_rdh | /**
* Returns the port on which the server is listening.
*
* @return port on which the server is listening
*/
@Override public int getPort() {
return this.serverSocket.getLocalPort();
} | 3.26 |
flink_BlobServer_getServerSocket_rdh | /**
* Access to the server socket, for testing.
*/
ServerSocket getServerSocket() {
return this.serverSocket;
} | 3.26 |
flink_BlobServer_putInputStream_rdh | /**
* Uploads the data from the given input stream for the given job to the BLOB server.
*
* @param jobId
* the ID of the job the BLOB belongs to
* @param inputStream
* the input stream to read the data from
* @param blobType
* whether to make the data permanent or transient
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException
* thrown if an I/O error occurs while reading the data from the input
* stream, writing it to a local file, or uploading it to the HA store
*/
private BlobKey putInputStream(@Nullable
JobID jobId, InputStream inputStream, BlobKey.BlobType blobType) throws IOException
{
if (LOG.isDebugEnabled()) {
LOG.debug("Received PUT call for BLOB of job {}.", jobId);
}
File incomingFile = createTemporaryFilename();
BlobKey blobKey = null;
try {
MessageDigest md = writeStreamToFileAndCreateDigest(inputStream, incomingFile);
// persist file
blobKey = moveTempFileToStore(incomingFile, jobId, md.digest(), blobType);
return blobKey;
} finally {
// delete incomingFile from a failed download
if ((!incomingFile.delete()) && incomingFile.exists()) {
LOG.warn("Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId);
}
}} | 3.26 |
flink_BlobServer_getFileInternal_rdh | /**
* Helper to retrieve the local path of a file associated with a job and a blob key.
*
* <p>The blob server looks the blob key up in its local storage. If the file exists, it is
* returned. If the file does not exist, it is retrieved from the HA blob store (if available)
* or a {@link FileNotFoundException} is thrown.
*
* <p><strong>Assumes the read lock has already been acquired.</strong>
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey
* blob key associated with the requested file
* @throws IOException
* Thrown if the file retrieval failed.
* @return the retrieved local blob file
*/
File getFileInternal(@Nullable
JobID jobId, BlobKey blobKey) throws IOException {
// assume readWriteLock.readLock() was already locked (cannot really check that)
final File localFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey);
if (localFile.exists()) {
// update TTL for transient BLOBs:
if (blobKey instanceof TransientBlobKey) {
// regarding concurrent operations, it is not really important which timestamp makes
// it into the map as they are close to each other anyway, also we can simply
// overwrite old values as long as we are in the read (or write) lock
blobExpiryTimes.put(Tuple2.of(jobId, ((TransientBlobKey) (blobKey))), System.currentTimeMillis() + cleanupInterval);
}
return localFile;
} else if (blobKey instanceof PermanentBlobKey) {
// Try the HA blob store
// first we have to release the read lock in order to acquire the write lock
readWriteLock.readLock().unlock();
// use a temporary file (thread-safe without locking)
File incomingFile = null;
try {
incomingFile = createTemporaryFilename();blobStore.get(jobId, blobKey, incomingFile);
readWriteLock.writeLock().lock();
try {
BlobUtils.moveTempFileToStore(incomingFile, jobId, blobKey, localFile, LOG, null);
} finally {
readWriteLock.writeLock().unlock();
}
return localFile; } finally {
// delete incomingFile from a failed download
if (((incomingFile != null) && (!incomingFile.delete())) && incomingFile.exists()) {
LOG.warn("Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId);
}
// re-acquire lock so that it can be unlocked again outside
readWriteLock.readLock().lock();
}
}
throw new FileNotFoundException((("Local file " + localFile) + " does not exist ") + "and failed to copy from blob store.");
} | 3.26 |
flink_BlobServer_close_rdh | /**
* Shuts down the BLOB server.
*/
@Override
public void close() throws IOException {
cleanupTimer.cancel();
if (shutdownRequested.compareAndSet(false, true)) {
Exception exception = null;
try {
this.serverSocket.close();
} catch (IOException ioe) {
exception = ioe;
}
// wake the thread up, in case it is waiting on some operation
interrupt();
try {
join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.debug("Error while waiting for this thread to die.", ie);
}
synchronized(activeConnections) {
if (!activeConnections.isEmpty()) {
for (BlobServerConnection conn : activeConnections) {LOG.debug("Shutting down connection {}.", conn.getName());
conn.close();
}
activeConnections.clear();
}
}
// Clean up the storage directory if it is owned
try {
storageDir.owned().ifPresent(FunctionUtils.uncheckedConsumer(FileUtils::deleteDirectory));
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// Remove shutdown hook to prevent resource leaks
ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
if (LOG.isInfoEnabled()) {
LOG.info("Stopped BLOB server at {}:{}", serverSocket.getInetAddress().getHostAddress(), getPort());
}
ExceptionUtils.tryRethrowIOException(exception);
}
} | 3.26 |
flink_CopyOnWriteStateMapSnapshot_getSnapshotVersion_rdh | /**
* Returns the internal version of the {@link CopyOnWriteStateMap} when this snapshot was
* created. This value must be used to tell the {@link CopyOnWriteStateMap} when to release this
* snapshot.
*/
int getSnapshotVersion() {
return snapshotVersion;
} | 3.26 |
flink_CopyOnWriteStateMapSnapshot_moveChainsToBackOfArray_rdh | /**
* Move the chains in snapshotData to the back of the array, and return the index of the
* first chain from the front.
*/
int moveChainsToBackOfArray() {
int index = snapshotData.length - 1;
// find the first null chain from the back
while (index >= 0) {
if (snapshotData[index] == null) {
break;
}
index--;
}
int lastNullIndex = index;
index--;
// move the chains to the back
while (index >= 0) {
CopyOnWriteStateMap.StateMapEntry<K, N, S> entry = snapshotData[index];
if (entry != null) {
snapshotData[lastNullIndex] = entry;
snapshotData[index] = null;
lastNullIndex--;
}
index--;
}
// return the index of the first chain from the front
return lastNullIndex + 1;
} | 3.26 |
flink_VoidNamespace_hashCode_rdh | // ------------------------------------------------------------------------
// Standard Utilities
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return 99;
} | 3.26 |
flink_VoidNamespace_readResolve_rdh | // make sure that we preserve the singleton properly on serialization
private Object readResolve() throws ObjectStreamException {
return
INSTANCE;
} | 3.26 |
flink_VoidNamespace_get_rdh | /**
* Getter for the singleton instance.
*/
public static VoidNamespace get() {
return INSTANCE;
} | 3.26 |
flink_ApiSpecGeneratorUtils_findAdditionalFieldType_rdh | /**
* Find whether the class contains dynamic fields that need to be documented.
*
* @param clazz
* class to check
* @return optional that is non-empty if the class is annotated with {@link FlinkJsonSchema.AdditionalFields}
*/public static Optional<Class<?>> findAdditionalFieldType(Class<?> clazz) {
final FlinkJsonSchema.AdditionalFields annotation = clazz.getAnnotation(AdditionalFields.class);
return Optional.ofNullable(annotation).map(FlinkJsonSchema.AdditionalFields::type);} | 3.26 |
flink_ApiSpecGeneratorUtils_shouldBeDocumented_rdh | /**
* Checks whether the given endpoint should be documented.
*
* @param spec
* endpoint to check
* @return true if the endpoint should be documented
*/
public static boolean shouldBeDocumented(MessageHeaders<? extends RequestBody, ? extends ResponseBody, ? extends MessageParameters> spec) {
return spec.getClass().getAnnotation(ExcludeFromDocumentation.class) == null;
} | 3.26 |
flink_CompletedOperationCache_registerOngoingOperation_rdh | /**
* Registers an ongoing operation with the cache.
*
* @param operationResultFuture
* A future containing the operation result.
* @throws IllegalStateException
* if the cache is already shutting down
*/
public void registerOngoingOperation(final K operationKey,
final CompletableFuture<R> operationResultFuture) { final ResultAccessTracker<R> inProgress = ResultAccessTracker.inProgress();
synchronized(lock) {
checkState(isRunning(), "The CompletedOperationCache has already been closed.");
registeredOperationTriggers.put(operationKey, inProgress);
}
operationResultFuture.whenComplete((result, error) -> {
if (error == null) {
completedOperations.put(operationKey, inProgress.finishOperation(OperationResult.success(result)));
} else {
completedOperations.put(operationKey, inProgress.finishOperation(OperationResult.failure(error))); }
registeredOperationTriggers.remove(operationKey);
});
} | 3.26 |
flink_CompletedOperationCache_accessOperationResultOrError_rdh | /**
* Returns the {@link OperationResult} of the asynchronous operation. If the operation is
* finished, marks the result as accessed.
*/
public OperationResult<R> accessOperationResultOrError() {if (operationResult.isFinished()) {
markAccessed();
}
return operationResult;
} | 3.26 |
flink_CompletedOperationCache_get_rdh | /**
* Returns an optional containing the {@link OperationResult} for the specified key, or an empty
* optional if no operation is registered under the key.
*/
public Optional<OperationResult<R>> get(final K operationKey) {
ResultAccessTracker<R> resultAccessTracker;if (((resultAccessTracker = registeredOperationTriggers.get(operationKey)) == null) && ((resultAccessTracker = completedOperations.getIfPresent(operationKey)) == null)) {
return Optional.empty();
}
return Optional.of(resultAccessTracker.accessOperationResultOrError());
} | 3.26 |
flink_StructuredOptionsSplitter_splitEscaped_rdh | /**
* Splits the given string on the given delimiter. It supports quoting parts of the string with
* either single (') or double quotes ("). Quotes can be escaped by doubling the quotes.
*
* <p>Examples:
*
* <ul>
* <li>'A;B';C => [A;B], [C]
* <li>"AB'D";B;C => [AB'D], [B], [C]
* <li>"AB'""D;B";C => [AB'\"D;B], [C]
* </ul>
*
* <p>For more examples check the tests.
*
* @param string
* a string to split
* @param delimiter
* delimiter to split on
* @return a list of splits
*/
static List<String> splitEscaped(String string, char delimiter) {
List<Token> tokens = tokenize(checkNotNull(string), delimiter);
return processTokens(tokens);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.