name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_InternalTimersSnapshotReaderWriters_getReaderForVersion_rdh | // -------------------------------------------------------------------------------
// Readers
// - pre-versioned: Flink 1.4.0
// - v1: Flink 1.4.1
// -------------------------------------------------------------------------------
public static <K, N> InternalTimersSnapshotReader<K, N> getReaderForVersion(int version, ClassLoader userCodeClassLoader) {
switch (version) {
case NO_VERSION :
case 1 :
throw new UnsupportedOperationException("Since Flink 1.17 not versioned (<= Flink 1.4.0) and version 1 (< Flink 1.8.0) of " + "InternalTimersSnapshotReader is no longer supported.");
case InternalTimerServiceSerializationProxy.VERSION
:
return new InternalTimersSnapshotReaderV2<>(userCodeClassLoader);
default :
// guard for future
throw new IllegalStateException("Unrecognized internal timers snapshot writer version: " + version);
}
} | 3.26 |
flink_MemoryUtils_allocateUnsafe_rdh | /**
* Allocates unsafe native memory.
*
* @param size
* size of the unsafe memory to allocate.
* @return address of the allocated unsafe memory
*/
static long allocateUnsafe(long size) {
return UNSAFE.allocateMemory(Math.max(1L, size));
} | 3.26 |
flink_MemoryUtils_getByteBufferAddress_rdh | /**
* Get native memory address wrapped by the given {@link ByteBuffer}.
*
* @param buffer
* {@link ByteBuffer} which wraps the native memory address to get
* @return native memory address wrapped by the given {@link ByteBuffer}
*/
static long getByteBufferAddress(ByteBuffer buffer) {
Preconditions.checkNotNull(buffer, "buffer is null");
Preconditions.checkArgument(buffer.isDirect(), "Can't get address of a non-direct ByteBuffer.");
long offHeapAddress;
try {
offHeapAddress = UNSAFE.getLong(buffer, BUFFER_ADDRESS_FIELD_OFFSET);
} catch (Throwable t) {
throw new Error("Could not access direct byte buffer address field.", t);
}
Preconditions.checkState(offHeapAddress > 0, "negative pointer or size");
Preconditions.checkState(offHeapAddress < (Long.MAX_VALUE - Integer.MAX_VALUE), (("Segment initialized with too large address: " + offHeapAddress) + " ; Max allowed address is ") + ((Long.MAX_VALUE - Integer.MAX_VALUE) - 1));
return offHeapAddress;
} | 3.26 |
flink_MemoryUtils_createMemoryCleaner_rdh | /**
* Creates a cleaner to release the unsafe memory.
*
* @param address
* address of the unsafe memory to release
* @param customCleanup
* A custom action to clean up GC
* @return action to run to release the unsafe memory manually
*/
static Runnable createMemoryCleaner(long address, Runnable customCleanup) {
return () -> {
releaseUnsafe(address);
customCleanup.run();
};
} | 3.26 |
flink_MemoryUtils_wrapUnsafeMemoryWithByteBuffer_rdh | /**
* Wraps the unsafe native memory with a {@link ByteBuffer}.
*
* @param address
* address of the unsafe memory to wrap
* @param size
* size of the unsafe memory to wrap
* @return a {@link ByteBuffer} which is a view of the given unsafe memory
*/
static ByteBuffer wrapUnsafeMemoryWithByteBuffer(long address, int size) {
// noinspection OverlyBroadCatchBlock
try {
ByteBuffer buffer = ((ByteBuffer) (UNSAFE.allocateInstance(DIRECT_BYTE_BUFFER_CLASS)));
UNSAFE.putLong(buffer, BUFFER_ADDRESS_FIELD_OFFSET, address);
UNSAFE.putInt(buffer, BUFFER_CAPACITY_FIELD_OFFSET, size);
buffer.clear();
return buffer;} catch (Throwable t) {
throw new Error("Failed to wrap unsafe off-heap memory with ByteBuffer", t);
}
} | 3.26 |
flink_TimestampsAndWatermarksTransformation_getWatermarkStrategy_rdh | /**
* Returns the {@code WatermarkStrategy} to use.
*/
public WatermarkStrategy<IN> getWatermarkStrategy() {
return watermarkStrategy;
} | 3.26 |
flink_TimestampsAndWatermarksTransformation_getInputType_rdh | /**
* Returns the {@code TypeInformation} for the elements of the input.
*/
public TypeInformation<IN> getInputType() {
return input.getOutputType();
} | 3.26 |
flink_ChannelStatePersister_parseEvent_rdh | /**
* Parses the buffer as an event and returns the {@link CheckpointBarrier} if the event is
* indeed a barrier or returns null in all other cases.
*/
@Nullable
protected AbstractEvent parseEvent(Buffer buffer) throws IOException {
if (buffer.isBuffer()) {
return null;
} else {
AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
// reset the buffer because it would be deserialized again in SingleInputGate while
// getting next buffer.
// we can further improve to avoid double deserialization in the future.
buffer.setReaderIndex(0);
return event;
}
} | 3.26 |
flink_SavepointRestoreSettings_toConfiguration_rdh | // -------------------------- Parsing to and from a configuration object
// ------------------------------------
public static void toConfiguration(final SavepointRestoreSettings savepointRestoreSettings, final Configuration configuration) {
configuration.set(SavepointConfigOptions.SAVEPOINT_IGNORE_UNCLAIMED_STATE, savepointRestoreSettings.allowNonRestoredState());
configuration.set(SavepointConfigOptions.RESTORE_MODE, savepointRestoreSettings.getRestoreMode());
final String savepointPath = savepointRestoreSettings.getRestorePath();
if (savepointPath !=
null) {
configuration.setString(SavepointConfigOptions.SAVEPOINT_PATH, savepointPath);
}} | 3.26 |
flink_SavepointRestoreSettings_none_rdh | // ------------------------------------------------------------------------
public static SavepointRestoreSettings none() {
return NONE;
} | 3.26 |
flink_SavepointRestoreSettings_getRestoreMode_rdh | /**
* Tells how to restore from the given savepoint.
*/
public RestoreMode getRestoreMode() {
return restoreMode;
} | 3.26 |
flink_InternalSourceReaderMetricGroup_watermarkEmitted_rdh | /**
* Called when a watermark was emitted.
*
* <p>Note this function should be called before the actual watermark is emitted such that
* chained processing does not influence the statistics.
*/
public void watermarkEmitted(long watermark) {
if (watermark == MAX_WATERMARK_TIMESTAMP) {
return;
}lastWatermark = watermark;
if (firstWatermark) {
parentMetricGroup.gauge(MetricNames.WATERMARK_LAG, this::getWatermarkLag);
firstWatermark = false;
}} | 3.26 |
flink_InternalSourceReaderMetricGroup_getLastEmitTime_rdh | /**
* This is a rough approximation. If the source is busy, we assume that <code>
* emit time == now()
* </code>. If it's idling, we just take the time it started idling as the last emit time.
*/
private long getLastEmitTime() {
return isIdling() ? idleStartTime : clock.absoluteTimeMillis();
} | 3.26 |
flink_InternalSourceReaderMetricGroup_recordEmitted_rdh | /**
* Called when a new record was emitted with the given timestamp. {@link TimestampAssigner#NO_TIMESTAMP} should be indicated that the record did not have a timestamp.
*
* <p>Note this function should be called before the actual record is emitted such that chained
* processing does not influence the statistics.
*/
public void recordEmitted(long timestamp) {
idleStartTime = ACTIVE;
lastEventTime = timestamp;
} | 3.26 |
flink_ParquetRowDataWriter_write_rdh | /**
* It writes a record to Parquet.
*
* @param record
* Contains the record that is going to be written.
*/
public void
write(final RowData record) {
recordConsumer.startMessage();
rowWriter.write(record);
recordConsumer.endMessage();
} | 3.26 |
flink_ProcessingTimeServiceUtil_getProcessingTimeDelay_rdh | /**
* Returns the remaining delay of the processing time specified by {@code processingTimestamp}.
* This delay guarantees that the timer will be fired at least 1ms after the time it's
* registered for.
*
* @param processingTimestamp
* the processing time in milliseconds
* @param currentTimestamp
* the current processing timestamp; it usually uses {@link ProcessingTimeService#getCurrentProcessingTime()} to get
* @return the remaining delay of the processing time
*/
public static long getProcessingTimeDelay(long processingTimestamp, long currentTimestamp) {
// Two cases of timers here:
// (1) future/now timers(processingTimestamp >= currentTimestamp): delay the firing of the
// timer by 1 ms to align the semantics with watermark. A watermark T says we
// won't see elements in the future with a timestamp smaller or equal to T. Without this
// 1ms delay, if we had fired the timer for T at the timestamp T, it would be possible
// that we would process another record for timestamp == T in the same millisecond, but
// after the timer for the timsetamp T has already been fired.
// (2) past timers(processingTimestamp < currentTimestamp): do not need to delay the firing
// because currentTimestamp is larger than processingTimestamp pluses the 1ms offset.
// TODO. The processing timers' performance can be further improved.
// see FLINK-23690 and https://github.com/apache/flink/pull/16744
if (processingTimestamp >= currentTimestamp) {
return (processingTimestamp - currentTimestamp) + 1;
} else {
return 0;
}
} | 3.26 |
flink_InputProperty_keepInputAsIsDistribution_rdh | /**
* A special distribution which indicators the data distribution is the same as its input.
*
* @param inputDistribution
* the input distribution
* @param strict
* whether the input distribution is strictly guaranteed
*/
public static KeepInputAsIsDistribution keepInputAsIsDistribution(RequiredDistribution inputDistribution, boolean strict) {
return new KeepInputAsIsDistribution(inputDistribution, strict);
} | 3.26 |
flink_InputProperty_hashDistribution_rdh | /**
* The input will read the records whose keys hash to a particular hash value.
*
* @param keys
* hash keys
*/
public static HashDistribution hashDistribution(int[] keys) {
return new HashDistribution(keys);
} | 3.26 |
flink_SignalHandler_handle_rdh | /**
* Handle an incoming signal.
*
* @param signal
* The incoming signal
*/
@Override
public void handle(Signal signal) {
LOG.info("RECEIVED SIGNAL {}: SIG{}. Shutting down as requested.",
signal.getNumber(), signal.getName());
prevHandler.handle(signal);
} | 3.26 |
flink_SignalHandler_register_rdh | /**
* Register some signal handlers.
*
* @param LOG
* The slf4j logger
*/
public static void register(final Logger LOG) {
synchronized(SignalHandler.class) {
if (registered) {
return;
}
registered = true;
final String[] SIGNALS =
(OperatingSystem.isWindows())
? new String[]{ "TERM", "INT"
} : new String[]{ "TERM", "HUP", "INT" };
StringBuilder bld
= new StringBuilder();
bld.append("Registered UNIX signal handlers for [");
String separator = "";
for (String signalName : SIGNALS) {
try {
new Handler(signalName, LOG);
bld.append(separator);
bld.append(signalName);
separator = ", ";
} catch (Exception e) {
LOG.info("Error while registering signal handler", e);
}
}
bld.append("]");
LOG.info(bld.toString());
}
} | 3.26 |
flink_HiveParserJoinTypeCheckCtx_getInputRRList_rdh | /**
*
* @return the inputRR List
*/
public List<HiveParserRowResolver> getInputRRList() {
return inputRRLst;
} | 3.26 |
flink_VertexThreadInfoStats_getEndTime_rdh | /**
* Returns the timestamp, when all samples where collected.
*
* @return Timestamp, when all samples where collected
*/
@Override
public long getEndTime() {
return endTime;
} | 3.26 |
flink_VertexThreadInfoStats_getSamplesBySubtask_rdh | /**
* Returns the a map of thread info samples by subtask (execution ID).
*
* @return Map of thread info samples by task (execution ID)
*/
public Map<ExecutionAttemptID, Collection<ThreadInfoSample>> getSamplesBySubtask()
{
return samplesBySubtask;
} | 3.26 |
flink_UpsertTestSinkBuilder_setValueSerializationSchema_rdh | /**
* Sets the value {@link SerializationSchema} that transforms incoming records to byte[].
*
* @param valueSerializationSchema
* @return {@link UpsertTestSinkBuilder}
*/
public UpsertTestSinkBuilder<IN> setValueSerializationSchema(SerializationSchema<IN> valueSerializationSchema) {
this.valueSerializationSchema = checkNotNull(valueSerializationSchema);
return this;
} | 3.26 |
flink_UpsertTestSinkBuilder_build_rdh | /**
* Constructs the {@link UpsertTestSink} with the configured properties.
*
* @return {@link UpsertTestSink}
*/
public UpsertTestSink<IN> build() {
checkNotNull(outputFile);
checkNotNull(keySerializationSchema);
checkNotNull(valueSerializationSchema);
return new UpsertTestSink<>(outputFile, keySerializationSchema, valueSerializationSchema);
} | 3.26 |
flink_UpsertTestSinkBuilder_setKeySerializationSchema_rdh | /**
* Sets the key {@link SerializationSchema} that transforms incoming records to byte[].
*
* @param keySerializationSchema
* @return {@link UpsertTestSinkBuilder}
*/
public UpsertTestSinkBuilder<IN> setKeySerializationSchema(SerializationSchema<IN> keySerializationSchema) {
this.keySerializationSchema = checkNotNull(keySerializationSchema);return this;
} | 3.26 |
flink_UpsertTestSinkBuilder_setOutputFile_rdh | /**
* Sets the output {@link File} to write to.
*
* @param outputFile
* @return {@link UpsertTestSinkBuilder}
*/
public UpsertTestSinkBuilder<IN> setOutputFile(File outputFile) {this.outputFile = checkNotNull(outputFile);
return this;
} | 3.26 |
flink_SymbolArgumentTypeStrategy_equals_rdh | // ---------------------------------------------------------------------------------------------
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if ((other == null) || (getClass() != other.getClass())) {
return false;
}
SymbolArgumentTypeStrategy that = ((SymbolArgumentTypeStrategy) (other));
return Objects.equals(symbolClass, that.symbolClass);
} | 3.26 |
flink_SimpleStreamFormat_isSplittable_rdh | // ------------------------------------------------------------------------
// pre-defined methods from Stream Format
// ------------------------------------------------------------------------
/**
* This format is always not splittable.
*/
@Override
public final boolean isSplittable() {
return false;
} | 3.26 |
flink_PartitionedFileWriter_m1_rdh | /**
* Writes a list of {@link Buffer}s to this {@link PartitionedFile}. It guarantees that after
* the return of this method, the target buffers can be released. In a data region, all data of
* the same subpartition must be written together.
*
* <p>Note: The caller is responsible for recycling the target buffers and releasing the failed
* {@link PartitionedFile} if any exception occurs.
*/
public void m1(List<BufferWithChannel> bufferWithChannels) throws IOException {
checkState(!isFinished, "File writer is already finished.");
checkState(!f1, "File writer is already closed.");
if (bufferWithChannels.isEmpty()) {
return;
}
numBuffers += bufferWithChannels.size();
long expectedBytes;
ByteBuffer[] bufferWithHeaders = new ByteBuffer[2 * bufferWithChannels.size()];
if (isBroadcastRegion) {
expectedBytes = collectBroadcastBuffers(bufferWithChannels,
bufferWithHeaders);
} else {
expectedBytes = collectUnicastBuffers(bufferWithChannels, bufferWithHeaders);
}
totalBytesWritten += expectedBytes;
BufferReaderWriterUtil.writeBuffers(dataFileChannel, expectedBytes, bufferWithHeaders);
} | 3.26 |
flink_PartitionedFileWriter_releaseQuietly_rdh | /**
* Used to close and delete the failed {@link PartitionedFile} when any exception occurs.
*/
public void releaseQuietly() {
IOUtils.closeQuietly(this);
IOUtils.deleteFileQuietly(dataFilePath);
IOUtils.deleteFileQuietly(indexFilePath);
} | 3.26 |
flink_PartitionedFileWriter_m0_rdh | /**
* Persists the region index of the current data region and starts a new region to write.
*
* <p>Note: The caller is responsible for releasing the failed {@link PartitionedFile} if any
* exception occurs.
*
* @param isBroadcastRegion
* Whether it's a broadcast region. See {@link #isBroadcastRegion}.
*/
public void m0(boolean isBroadcastRegion) throws IOException {
checkState(!isFinished, "File writer is already finished.");
checkState(!f1, "File writer is already closed.");
writeRegionIndex();
this.isBroadcastRegion = isBroadcastRegion;
} | 3.26 |
flink_PartitionedFileWriter_finish_rdh | /**
* Finishes writing the {@link PartitionedFile} which closes the file channel and returns the
* corresponding {@link PartitionedFile}.
*
* <p>Note: The caller is responsible for releasing the failed {@link PartitionedFile} if any
* exception occurs.
*/
public PartitionedFile finish() throws IOException {
checkState(!isFinished, "File writer is already finished.");
checkState(!f1, "File writer is already closed.");
isFinished = true;
writeRegionIndex();
flushIndexBuffer();
indexBuffer.rewind();
long dataFileSize = dataFileChannel.size();
long indexFileSize = f0.size();
close();
ByteBuffer indexEntryCache = null;
if (allIndexEntriesCached) {
indexEntryCache = indexBuffer;
}
indexBuffer = null;
return new PartitionedFile(numRegions, numSubpartitions, dataFilePath, indexFilePath, dataFileSize, indexFileSize, numBuffers, indexEntryCache);} | 3.26 |
flink_SolutionSetBroker_instance_rdh | /**
* Retrieve the singleton instance.
*/
public static Broker<Object> instance() {
return INSTANCE;
} | 3.26 |
flink_ExecutableOperationUtils_createDynamicTableSink_rdh | /**
* Creates a {@link DynamicTableSink} from a {@link CatalogTable}.
*
* <p>It'll try to create table sink from to {@param catalog}, then try to create from {@param sinkFactorySupplier} passed secondly. Otherwise, an attempt is made to discover a matching
* factory using Java SPI (see {@link Factory} for details).
*/
public static DynamicTableSink createDynamicTableSink(@Nullable
Catalog catalog, Supplier<Optional<DynamicTableSinkFactory>> sinkFactorySupplier, ObjectIdentifier objectIdentifier, ResolvedCatalogTable catalogTable, Map<String, String> enrichmentOptions, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) {
DynamicTableSinkFactory dynamicTableSinkFactory = null;
if (((catalog != null) && catalog.getFactory().isPresent()) && (catalog.getFactory().get() instanceof DynamicTableSinkFactory)) {
// try get from catalog
dynamicTableSinkFactory = ((DynamicTableSinkFactory) (catalog.getFactory().get()));
}
if (dynamicTableSinkFactory == null) {
dynamicTableSinkFactory = sinkFactorySupplier.get().orElse(null);
}
return FactoryUtil.createDynamicTableSink(dynamicTableSinkFactory, objectIdentifier, catalogTable,
enrichmentOptions, configuration, classLoader, isTemporary);} | 3.26 |
flink_SqlTimeTypeInfo_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return Objects.hash(clazz, f0, f1);
} | 3.26 |
flink_SqlTimeTypeInfo_instantiateComparator_rdh | // --------------------------------------------------------------------------------------------
private static <X> TypeComparator<X> instantiateComparator(Class<? extends TypeComparator<X>> comparatorClass, boolean ascendingOrder) {
try {
Constructor<? extends TypeComparator<X>> constructor = comparatorClass.getConstructor(boolean.class);
return constructor.newInstance(ascendingOrder);
} catch (Exception e) {
throw new RuntimeException("Could not initialize comparator " + comparatorClass.getName(), e);
}
} | 3.26 |
flink_DefaultCompletedCheckpointStore_addCheckpointAndSubsumeOldestOne_rdh | /**
* Synchronously writes the new checkpoints to state handle store and asynchronously removes
* older ones.
*
* @param checkpoint
* Completed checkpoint to add.
* @throws PossibleInconsistentStateException
* if adding the checkpoint failed and leaving the
* system in a possibly inconsistent state, i.e. it's uncertain whether the checkpoint
* metadata was fully written to the underlying systems or not.
*/@Override
public CompletedCheckpoint addCheckpointAndSubsumeOldestOne(final CompletedCheckpoint checkpoint, CheckpointsCleaner checkpointsCleaner, Runnable postCleanup) throws Exception {
Preconditions.checkState(running.get(), "Checkpoint store has already been shutdown.");checkNotNull(checkpoint, "Checkpoint");
final String path = completedCheckpointStoreUtil.checkpointIDToName(checkpoint.getCheckpointID());
// Now add the new one. If it fails, we don't want to lose existing data.
checkpointStateHandleStore.addAndLock(path,
checkpoint);
completedCheckpoints.addLast(checkpoint);
// Remove completed checkpoint from queue and checkpointStateHandleStore, not discard.
Optional<CompletedCheckpoint>
subsume = CheckpointSubsumeHelper.subsume(completedCheckpoints, maxNumberOfCheckpointsToRetain, completedCheckpoint ->
{
tryRemove(completedCheckpoint.getCheckpointID());
checkpointsCleaner.addSubsumedCheckpoint(completedCheckpoint);
});
findLowest(completedCheckpoints).ifPresent(id -> checkpointsCleaner.cleanSubsumedCheckpoints(id, getSharedStateRegistry().unregisterUnusedState(id), postCleanup, ioExecutor));
return subsume.orElse(null);
} | 3.26 |
flink_DefaultCompletedCheckpointStore_tryRemove_rdh | /**
* Tries to remove the checkpoint identified by the given checkpoint id.
*
* @param checkpointId
* identifying the checkpoint to remove
* @return true if the checkpoint could be removed
*/
private boolean tryRemove(long checkpointId) throws Exception {
return checkpointStateHandleStore.releaseAndTryRemove(completedCheckpointStoreUtil.checkpointIDToName(checkpointId));
} | 3.26 |
flink_DefaultCompletedCheckpointStore_tryRemoveCompletedCheckpoint_rdh | // ---------------------------------------------------------------------------------------------------------
// Private methods
// ---------------------------------------------------------------------------------------------------------
private boolean tryRemoveCompletedCheckpoint(CompletedCheckpoint completedCheckpoint, boolean shouldDiscard, CheckpointsCleaner checkpointsCleaner, Runnable postCleanup) throws Exception {
if (tryRemove(completedCheckpoint.getCheckpointID())) {
checkpointsCleaner.cleanCheckpoint(completedCheckpoint, shouldDiscard, postCleanup, ioExecutor);
return shouldDiscard;}
return shouldDiscard;
} | 3.26 |
flink_StandardDeCompressors_getCommonSuffixes_rdh | // ------------------------------------------------------------------------
/**
* Gets all common file extensions of supported file compression formats.
*/
public static Collection<String> getCommonSuffixes() {
return COMMON_SUFFIXES;
} | 3.26 |
flink_StandardDeCompressors_m0_rdh | // ------------------------------------------------------------------------
private static Map<String,
InflaterInputStreamFactory<?>> m0(final InflaterInputStreamFactory<?>... decompressors) {
final LinkedHashMap<String, InflaterInputStreamFactory<?>> map = new LinkedHashMap<>(decompressors.length);
for (InflaterInputStreamFactory<?> decompressor : decompressors) {
for (String suffix : decompressor.getCommonFileExtensions()) {
map.put(suffix, decompressor);
}
}
return map;
} | 3.26 |
flink_StandardDeCompressors_getDecompressorForExtension_rdh | /**
* Gets the decompressor for a file extension. Returns null if there is no decompressor for this
* file extension.
*/
@Nullable
public static InflaterInputStreamFactory<?> getDecompressorForExtension(String extension) {
return DECOMPRESSORS.get(extension);
} | 3.26 |
flink_StandardDeCompressors_getDecompressorForFileName_rdh | /**
* Gets the decompressor for a file name. This checks the file against all known and supported
* file extensions. Returns null if there is no decompressor for this file name.
*/
@Nullable
public static InflaterInputStreamFactory<?> getDecompressorForFileName(String fileName) {
for (final Map.Entry<String, InflaterInputStreamFactory<?>> entry : DECOMPRESSORS.entrySet()) {
if (fileName.endsWith(entry.getKey())) {
return entry.getValue();
}}
return null;
} | 3.26 |
flink_SourceStreamTask_triggerCheckpointAsync_rdh | // ------------------------------------------------------------------------
// Checkpointing
// ------------------------------------------------------------------------
@Override
public CompletableFuture<Boolean> triggerCheckpointAsync(CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) {
if (!externallyInducedCheckpoints) {
if (isSynchronousSavepoint(checkpointOptions.getCheckpointType())) {
return triggerStopWithSavepointAsync(checkpointMetaData, checkpointOptions);
} else {return super.triggerCheckpointAsync(checkpointMetaData, checkpointOptions);
}
} else if (checkpointOptions.getCheckpointType().equals(CheckpointType.FULL_CHECKPOINT)) {
// see FLINK-25256
throw new IllegalStateException(("Using externally induced sources, we can not enforce taking a full checkpoint." + "If you are restoring from a snapshot in NO_CLAIM mode, please use") + " either CLAIM or LEGACY mode.");
} else {
// we do not trigger checkpoints here, we simply state whether we can trigger them
synchronized(lock) {
return CompletableFuture.completedFuture(isRunning());
}
}
} | 3.26 |
flink_SavepointReader_readBroadcastState_rdh | /**
* Read operator {@code BroadcastState} from a {@code Savepoint}.
*
* @param identifier
* The identifier of the operator.
* @param name
* The (unique) name for the state.
* @param keyTypeInfo
* The type information for the keys in the state.
* @param valueTypeInfo
* The type information for the values in the state.
* @param <K>
* The type of keys in state.
* @param <V>
* The type of values in state.
* @return A {@code DataStream} of key-value pairs from state.
* @throws IOException
* If the savepoint does not contain the specified uid.
*/
public <K, V> DataStream<Tuple2<K, V>> readBroadcastState(OperatorIdentifier identifier, String name, TypeInformation<K> keyTypeInfo, TypeInformation<V> valueTypeInfo) throws
IOException {
return readBroadcastState(identifier, keyTypeInfo, valueTypeInfo, new MapStateDescriptor<>(name, keyTypeInfo, valueTypeInfo));
}
/**
*
* @deprecated use {@link #readBroadcastState(OperatorIdentifier, String, TypeInformation,
TypeInformation, TypeSerializer, TypeSerializer)} | 3.26 |
flink_SavepointReader_readKeyedState_rdh | /**
* Read keyed state from an operator in a {@code Savepoint}.
*
* @param identifier
* The identifier of the operator.
* @param function
* The {@link KeyedStateReaderFunction} that is called for each key in state.
* @param <K>
* The type of the key in state.
* @param <OUT>
* The output type of the transform function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If the savepoint does not contain operator state with the given uid.
*/
public <K, OUT> DataStream<OUT> readKeyedState(OperatorIdentifier identifier, KeyedStateReaderFunction<K, OUT> function) throws IOException {
TypeInformation<K> keyTypeInfo;
TypeInformation<OUT> outType;
try {
keyTypeInfo = TypeExtractor.createTypeInfo(KeyedStateReaderFunction.class, function.getClass(), 0, null, null);
} catch
(InvalidTypesException e) {
throw
new InvalidProgramException("The key type of the KeyedStateReaderFunction could not be automatically determined. Please use " + "Savepoint#readKeyedState(String, KeyedStateReaderFunction, TypeInformation, TypeInformation) instead.", e);
}
try {
outType = TypeExtractor.getUnaryOperatorReturnType(function, KeyedStateReaderFunction.class, 0, 1, TypeExtractor.NO_INDEX, keyTypeInfo, Utils.getCallLocationName(), false);
} catch (InvalidTypesException e) {throw new InvalidProgramException("The output type of the KeyedStateReaderFunction could not be automatically determined. Please use " + "Savepoint#readKeyedState(String, KeyedStateReaderFunction, TypeInformation, TypeInformation) instead.", e);
}
return readKeyedState(identifier, function, keyTypeInfo, outType);
}
/**
*
* @deprecated use {@link #readKeyedState(OperatorIdentifier, KeyedStateReaderFunction,
TypeInformation, TypeInformation)} | 3.26 |
flink_SavepointReader_readUnionState_rdh | /**
* Read operator {@code UnionState} from a {@code Savepoint} when a custom serializer was used;
* e.g., a different serializer than the one returned by {@code TypeInformation#createSerializer}.
*
* @param identifier
* The identifier of the operator.
* @param name
* The (unique) name for the state.
* @param typeInfo
* The type of the elements in the state.
* @param serializer
* The serializer used to write the elements into state.
* @param <T>
* The type of the values that are in the union state.
* @return A {@code DataStream} representing the elements in state.
* @throws IOException
* If the savepoint path is invalid or the uid does not exist.
*/
public <T> DataStream<T> readUnionState(OperatorIdentifier identifier, String name, TypeInformation<T> typeInfo, TypeSerializer<T> serializer) throws IOException {return readUnionState(identifier, typeInfo,
new ListStateDescriptor<>(name, serializer));
} | 3.26 |
flink_SavepointReader_read_rdh | /**
* Loads an existing savepoint. Useful if you want to query the state of an existing
* application.
*
* @param env
* The execution environment used to transform the savepoint.
* @param path
* The path to an existing savepoint on disk.
* @param stateBackend
* The state backend of the savepoint.
* @return A {@link SavepointReader}.
*/public static SavepointReader read(StreamExecutionEnvironment env, String path, StateBackend stateBackend) throws IOException {
CheckpointMetadata metadata = SavepointLoader.loadSavepointMetadata(path);
int maxParallelism = metadata.getOperatorStates().stream().map(OperatorState::getMaxParallelism).max(Comparator.naturalOrder()).orElseThrow(() -> new RuntimeException("Savepoint must contain at least one operator state."));
SavepointMetadataV2 savepointMetadata = new SavepointMetadataV2(maxParallelism, metadata.getMasterStates(), metadata.getOperatorStates());
return new SavepointReader(env, savepointMetadata, stateBackend);
} | 3.26 |
flink_SavepointReader_readListState_rdh | /**
* Read operator {@code ListState} from a {@code Savepoint} when a custom serializer was used;
* e.g., a different serializer than the one returned by {@code TypeInformation#createSerializer}.
*
* @param identifier
* The identifier of the operator.
* @param name
* The (unique) name for the state.
* @param typeInfo
* The type of the elements in the state.
* @param serializer
* The serializer used to write the elements into state.
* @param <T>
* The type of the values that are in the list state.
* @return A {@code DataStream} representing the elements in state.
* @throws IOException
* If the savepoint path is invalid or the uid does not exist.
*/
public <T> DataStream<T> readListState(OperatorIdentifier identifier, String name, TypeInformation<T> typeInfo, TypeSerializer<T> serializer) throws IOException {
return readListState(identifier, typeInfo, new ListStateDescriptor<>(name, serializer));
} | 3.26 |
flink_SavepointReader_m0_rdh | /**
*
* @deprecated use {@link #readKeyedState(OperatorIdentifier, KeyedStateReaderFunction)}
*/
@Deprecated
public <K, OUT> DataStream<OUT> m0(String uid, KeyedStateReaderFunction<K, OUT> function) throws
IOException {
return m0(OperatorIdentifier.forUid(uid), function);
} | 3.26 |
flink_SavepointReader_window_rdh | /**
* Read window state from an operator in a {@code Savepoint}. This method supports reading from
* any type of window.
*
* @param windowSerializer
* The serializer used for the window type.
* @return A {@link WindowSavepointReader}.
*/
public <W extends Window> WindowSavepointReader<W> window(TypeSerializer<W> windowSerializer) {
Preconditions.checkNotNull(windowSerializer, "The window serializer must not be null");
return new WindowSavepointReader<>(env, metadata, stateBackend, windowSerializer);
} | 3.26 |
flink_FileSystem_loadFileSystemFactories_rdh | // ------------------------------------------------------------------------
/**
* Loads the factories for the file systems directly supported by Flink. Aside from the {@link LocalFileSystem}, these file systems are loaded via Java's service framework.
*
* @return A map from the file system scheme to corresponding file system factory.
*/private static List<FileSystemFactory>
loadFileSystemFactories(Collection<Supplier<Iterator<FileSystemFactory>>> factoryIteratorsSuppliers) {
final ArrayList<FileSystemFactory>
list = new ArrayList<>();
// by default, we always have the local file system factory
list.add(new LocalFileSystemFactory());
LOG.debug("Loading extension file systems via services");
for (Supplier<Iterator<FileSystemFactory>> factoryIteratorsSupplier : factoryIteratorsSuppliers) {
try {
addAllFactoriesToList(factoryIteratorsSupplier.get(), list);
} catch (Throwable t) {// catching Throwable here to handle various forms of class loading
// and initialization errors
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
LOG.error("Failed to load additional file systems via services",
t);
}}
return Collections.unmodifiableList(list);
} | 3.26 |
flink_FileSystem_m0_rdh | // ------------------------------------------------------------------------
// Initialization
// ------------------------------------------------------------------------
/**
* Initializes the shared file system settings.
*
* <p>The given configuration is passed to each file system factory to initialize the respective
* file systems. Because the configuration of file systems may be different subsequent to the
* call of this method, this method clears the file system instance cache.
*
* <p>This method also reads the default file system URI from the configuration key {@link CoreOptions#DEFAULT_FILESYSTEM_SCHEME}. All calls to {@link FileSystem#get(URI)} where the
* URI has no scheme will be interpreted as relative to that URI. As an example, assume the
* default file system URI is set to {@code 'hdfs://localhost:9000/'}. A file path of {@code '/user/USERNAME/in.txt'} is interpreted as {@code 'hdfs://localhost:9000/user/USERNAME/in.txt'}.
*
* @deprecated use {@link #initialize(Configuration, PluginManager)} instead.
* @param config
* the configuration from where to fetch the parameter.
*/
@Deprecated
public static void m0(Configuration config) throws IllegalConfigurationException {initializeWithoutPlugins(config);
} | 3.26 |
flink_FileSystem_exists_rdh | /**
* Check if exists.
*
* @param f
* source file
*/
public boolean exists(final Path f) throws IOException {
try {
return getFileStatus(f) != null;
} catch (FileNotFoundException e) {
return false;
}
} | 3.26 |
flink_FileSystem_initOutPathLocalFS_rdh | // ------------------------------------------------------------------------
// output directory initialization
// ------------------------------------------------------------------------
/**
* Initializes output directories on local file systems according to the given write mode.
*
* <ul>
* <li>WriteMode.NO_OVERWRITE & parallel output:
* <ul>
* <li>A directory is created if the output path does not exist.
* <li>An existing directory is reused, files contained in the directory are NOT
* deleted.
* <li>An existing file raises an exception.
* </ul>
* <li>WriteMode.NO_OVERWRITE & NONE parallel output:
* <ul>
* <li>An existing file or directory raises an exception.
* </ul>
* <li>WriteMode.OVERWRITE & parallel output:
* <ul>
* <li>A directory is created if the output path does not exist.
* <li>An existing directory is reused, files contained in the directory are NOT
* deleted.
* <li>An existing file is deleted and replaced by a new directory.
* </ul>
* <li>WriteMode.OVERWRITE & NONE parallel output:
* <ul>
* <li>An existing file or directory (and all its content) is deleted
* </ul>
* </ul>
*
* <p>Files contained in an existing directory are not deleted, because multiple instances of a
* DataSinkTask might call this function at the same time and hence might perform concurrent
* delete operations on the file system (possibly deleting output files of concurrently running
* tasks). Since concurrent DataSinkTasks are not aware of each other, coordination of delete
* and create operations would be difficult.
*
* @param outPath
* Output path that should be prepared.
* @param writeMode
* Write mode to consider.
* @param createDirectory
* True, to initialize a directory at the given path, false to prepare
* space for a file.
* @return True, if the path was successfully prepared, false otherwise.
* @throws IOException
* Thrown, if any of the file system access operations failed.
*/
public boolean initOutPathLocalFS(Path outPath, WriteMode writeMode, boolean createDirectory) throws IOException {
if (isDistributedFS()) {
return false;
}
// NOTE: We actually need to lock here (process wide). Otherwise, multiple threads that
// concurrently work in this method (multiple output formats writing locally) might end
// up deleting each other's directories and leave non-retrievable files, without necessarily
// causing an exception. That results in very subtle issues, like output files looking as if
// they are not getting created.
// we acquire the lock interruptibly here, to make sure that concurrent threads waiting
// here can cancel faster
try {
OUTPUT_DIRECTORY_INIT_LOCK.lockInterruptibly();
} catch (InterruptedException e) {
// restore the interruption state
Thread.currentThread().interrupt();
// leave the method - we don't have the lock anyways
throw new IOException("The thread was interrupted while trying to initialize the output directory");
}
try {
FileStatus status;
try {
status = getFileStatus(outPath);
} catch (FileNotFoundException e) {
// okay, the file is not there
status = null;
}
// check if path exists
if (status != null) {
// path exists, check write mode
switch (writeMode) {
case NO_OVERWRITE :
if (status.isDir() && createDirectory) {
return true;
} else {
// file may not be overwritten
throw new IOException((((((("File or directory " + outPath) + " already exists. Existing files and directories ") + "are not overwritten in ") + WriteMode.NO_OVERWRITE.name()) + " mode. Use ") + WriteMode.OVERWRITE.name()) + " mode to overwrite existing files and directories.");
}case OVERWRITE :
if (status.isDir()) {
if (createDirectory) {
// directory exists and does not need to be created
return true;
} else {
// we will write in a single file, delete directory
try {
delete(outPath, true);
} catch (IOException e) {
throw new IOException(("Could not remove existing directory '" + outPath) + "' to allow overwrite by result file", e);
}
}
} else
{
// delete file
try {
delete(outPath, false);
} catch (IOException e) {
throw new IOException(("Could not remove existing file '" + outPath) + "' to allow overwrite by result file/directory", e);
}
}
break;
default :
throw new IllegalArgumentException("Invalid write mode: " + writeMode);
}
}
if (createDirectory) {
// Output directory needs to be created
if (!exists(outPath)) {
mkdirs(outPath);
}
// double check that the output directory exists
try {
return getFileStatus(outPath).isDir();
} catch (FileNotFoundException e) {
return false;
}
} else {
// check that the output path does not exist and an output file
// can be created by the output format.
return !exists(outPath);
}
} finally
{
OUTPUT_DIRECTORY_INIT_LOCK.unlock();
}
} | 3.26 |
flink_FileSystem_getLocalFileSystem_rdh | // ------------------------------------------------------------------------
// Obtaining File System Instances
// ------------------------------------------------------------------------
/**
* Returns a reference to the {@link FileSystem} instance for accessing the local file system.
*
* @return a reference to the {@link FileSystem} instance for accessing the local file system.
*/
public static FileSystem getLocalFileSystem() {
return FileSystemSafetyNet.wrapWithSafetyNetWhenActivated(LocalFileSystem.getSharedInstance());}
/**
* Returns a reference to the {@link FileSystem} instance for accessing the file system
* identified by the given {@link URI}.
*
* @param uri
* the {@link URI} identifying the file system
* @return a reference to the {@link FileSystem} instance for accessing the file system
identified by the given {@link URI} | 3.26 |
flink_FileSystem_createRecoverableWriter_rdh | /**
* Creates a new {@link RecoverableWriter}. A recoverable writer creates streams that can
* persist and recover their intermediate state. Persisting and recovering intermediate state is
* a core building block for writing to files that span multiple checkpoints.
*
* <p>The returned object can act as a shared factory to open and recover multiple streams.
*
* <p>This method is optional on file systems and various file system implementations may not
* support this method, throwing an {@code UnsupportedOperationException}.
*
* @return A RecoverableWriter for this file system.
* @throws IOException
* Thrown, if the recoverable writer cannot be instantiated.
*/
public RecoverableWriter createRecoverableWriter() throws IOException {
throw new UnsupportedOperationException("This file system does not support recoverable writers.");
} | 3.26 |
flink_FileSystem_getDefaultFsUri_rdh | /**
* Gets the default file system URI that is used for paths and file systems that do not specify
* and explicit scheme.
*
* <p>As an example, assume the default file system URI is set to {@code 'hdfs://someserver:9000/'}. A file path of {@code '/user/USERNAME/in.txt'} is interpreted as
* {@code 'hdfs://someserver:9000/user/USERNAME/in.txt'}.
*
* @return The default file system URI
*/
public static URI getDefaultFsUri() {
return defaultScheme != null ? defaultScheme : LocalFileSystem.getLocalFsURI();
} | 3.26 |
flink_FileSystem_initialize_rdh | /**
* Initializes the shared file system settings.
*
* <p>The given configuration is passed to each file system factory to initialize the respective
* file systems. Because the configuration of file systems may be different subsequent to the
* call of this method, this method clears the file system instance cache.
*
* <p>This method also reads the default file system URI from the configuration key {@link CoreOptions#DEFAULT_FILESYSTEM_SCHEME}. All calls to {@link FileSystem#get(URI)} where the
* URI has no scheme will be interpreted as relative to that URI. As an example, assume the
* default file system URI is set to {@code 'hdfs://localhost:9000/'}. A file path of {@code '/user/USERNAME/in.txt'} is interpreted as {@code 'hdfs://localhost:9000/user/USERNAME/in.txt'}.
*
* @param config
* the configuration from where to fetch the parameter.
* @param pluginManager
* optional plugin manager that is used to initialized filesystems provided
* as plugins.
*/
public static void initialize(Configuration config, @Nullable
PluginManager pluginManager) throws IllegalConfigurationException {
LOCK.lock();
try {
// make sure file systems are re-instantiated after re-configuration
CACHE.clear();
FS_FACTORIES.clear();
Collection<Supplier<Iterator<FileSystemFactory>>> factorySuppliers = new ArrayList<>(2);
factorySuppliers.add(() -> ServiceLoader.load(FileSystemFactory.class).iterator());
if (pluginManager != null) {
factorySuppliers.add(() -> Iterators.transform(pluginManager.load(FileSystemFactory.class), PluginFileSystemFactory::of));
}
final List<FileSystemFactory> v1 = loadFileSystemFactories(factorySuppliers);
// configure all file system factories
for (FileSystemFactory factory : v1) {
factory.configure(config);
String scheme = factory.getScheme();
FileSystemFactory fsf = ConnectionLimitingFactory.decorateIfLimited(factory, scheme, config);
FS_FACTORIES.put(scheme, fsf);
}
// configure the default (fallback) factory
FALLBACK_FACTORY.configure(config);
// also read the default file system scheme
final String stringifiedUri = config.getString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, null);
if (stringifiedUri == null) {
defaultScheme = null;} else {
try {
defaultScheme = new URI(stringifiedUri);
} catch (URISyntaxException e) {
throw new IllegalConfigurationException((("The default file system scheme ('" + CoreOptions.DEFAULT_FILESYSTEM_SCHEME) + "') is invalid: ") + stringifiedUri, e);
}
}
ALLOWED_FALLBACK_FILESYSTEMS.clear();
final Iterable<String> allowedFallbackFilesystems = Splitter.on(';').omitEmptyStrings().trimResults().split(config.getString(CoreOptions.ALLOWED_FALLBACK_FILESYSTEMS));
allowedFallbackFilesystems.forEach(ALLOWED_FALLBACK_FILESYSTEMS::add);
} finally {
LOCK.unlock();
}
} | 3.26 |
flink_FileSystem_initOutPathDistFS_rdh | /**
* Initializes output directories on distributed file systems according to the given write mode.
*
* <p>WriteMode.NO_OVERWRITE & parallel output: - A directory is created if the output path
* does not exist. - An existing file or directory raises an exception.
*
* <p>WriteMode.NO_OVERWRITE & NONE parallel output: - An existing file or directory raises
* an exception.
*
* <p>WriteMode.OVERWRITE & parallel output: - A directory is created if the output path
* does not exist. - An existing directory and its content is deleted and a new directory is
* created. - An existing file is deleted and replaced by a new directory.
*
* <p>WriteMode.OVERWRITE & NONE parallel output: - An existing file or directory is deleted
* and replaced by a new directory.
*
* @param outPath
* Output path that should be prepared.
* @param writeMode
* Write mode to consider.
* @param createDirectory
* True, to initialize a directory at the given path, false otherwise.
* @return True, if the path was successfully prepared, false otherwise.
* @throws IOException
* Thrown, if any of the file system access operations failed.
*/
public boolean initOutPathDistFS(Path outPath, WriteMode writeMode, boolean createDirectory) throws IOException {
if (!isDistributedFS()) {
return
false;
}
// NOTE: We actually need to lock here (process wide). Otherwise, multiple threads that
// concurrently work in this method (multiple output formats writing locally) might end
// up deleting each other's directories and leave non-retrievable files, without necessarily
// causing an exception. That results in very subtle issues, like output files looking as if
// they are not getting created.
// we acquire the lock interruptibly here, to make sure that concurrent threads waiting
// here can cancel faster
try {
OUTPUT_DIRECTORY_INIT_LOCK.lockInterruptibly();
} catch (InterruptedException e) {
// restore the interruption state
Thread.currentThread().interrupt();
// leave the method - we don't have the lock anyways
throw new IOException("The thread was interrupted while trying to initialize the output directory");
}
try {
// check if path exists
if (exists(outPath)) {
// path exists, check write mode
switch (writeMode) {
case NO_OVERWRITE :
// file or directory may not be overwritten
throw new IOException(((("File or directory already exists. Existing files and directories are not overwritten in " + WriteMode.NO_OVERWRITE.name())
+ " mode. Use ") + WriteMode.OVERWRITE.name())
+ " mode to overwrite existing files and directories.");
case OVERWRITE :// output path exists. We delete it and all contained files in case of a
// directory.
try {
delete(outPath, true);} catch (IOException e) {
// Some other thread might already have deleted the path.
// If - for some other reason - the path could not be deleted,
// this will be handled later.
}
break;
default :
throw new IllegalArgumentException("Invalid write mode: " + writeMode);
}
}
if (createDirectory) {
// Output directory needs to be created
try {
if (!exists(outPath)) {
mkdirs(outPath);
}
} catch (IOException ioe) {
// Some other thread might already have created the directory.
// If - for some other reason - the directory could not be created
// and the path does not exist, this will be handled later.
}
// double check that the output directory exists
return exists(outPath) && getFileStatus(outPath).isDir();
} else {
// single file case: check that the output path does not exist and
// an output file can be created by the output format.
return !exists(outPath);
}
} finally {
OUTPUT_DIRECTORY_INIT_LOCK.unlock();
}
} | 3.26 |
flink_FileSystem_create_rdh | /**
* Opens an FSDataOutputStream at the indicated Path.
*
* @param f
* the file name to open
* @param overwrite
* if a file with this name already exists, then if true, the file will be
* overwritten, and if false an error will be thrown.
* @throws IOException
* Thrown, if the stream could not be opened because of an I/O, or because a
* file already exists at that path and the write mode indicates to not overwrite the file.
* @deprecated Use {@link #create(Path, WriteMode)} instead.
*/
@Deprecated
public FSDataOutputStream create(Path f, boolean overwrite) throws IOException {
return create(f, overwrite ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE);
} | 3.26 |
flink_HsBufferContext_release_rdh | /**
* Mark buffer status to release.
*/
public void release() {
if (isReleased()) {
return;
} released = true;
// decrease ref count when buffer is released from memory.
buffer.recycleBuffer();
} | 3.26 |
flink_InternalWindowProcessFunction_open_rdh | /**
* Initialization method for the function. It is called before the actual working methods.
*/
public void open(Context<K, W> ctx) throws Exception {
this.ctx = ctx;
this.windowAssigner.open(ctx);
} | 3.26 |
flink_InternalWindowProcessFunction_close_rdh | /**
* The tear-down method of the function. It is called after the last call to the main working
* methods.
*/
public void close() throws Exception {
} | 3.26 |
flink_InternalWindowProcessFunction_cleanupTime_rdh | /**
* Returns the cleanup time for a window, which is {@code window.maxTimestamp +
* allowedLateness}. In case this leads to a value greated than {@link Long#MAX_VALUE} then a
* cleanup time of {@link Long#MAX_VALUE} is returned.
*
* @param window
* the window whose cleanup time we are computing.
*/
private long cleanupTime(W window) {
if (windowAssigner.isEventTime()) {
long cleanupTime = window.maxTimestamp() + f0;
return cleanupTime >= window.maxTimestamp() ? cleanupTime : Long.MAX_VALUE;
} else {
return window.maxTimestamp();
}
} | 3.26 |
flink_InternalWindowProcessFunction_isCleanupTime_rdh | /**
* Returns {@code true} if the given time is the cleanup time for the given window.
*/
protected final boolean
isCleanupTime(W window, long time) {
return time == toEpochMillsForTimer(cleanupTime(window), ctx.getShiftTimeZone());
} | 3.26 |
flink_InternalWindowProcessFunction_isWindowLate_rdh | /**
* Returns {@code true} if the watermark is after the end timestamp plus the allowed lateness of
* the given window.
*/
protected boolean isWindowLate(W window) {
return windowAssigner.isEventTime() && (toEpochMillsForTimer(cleanupTime(window), ctx.getShiftTimeZone()) <= ctx.currentWatermark());
} | 3.26 |
flink_PythonShellParser_constructYarnOption_rdh | /**
* Constructs yarn options. The python shell option will add prefix 'y' to align yarn options in
* `flink run`.
*
* @param options
* Options that will be used in `flink run`.
* @param yarnOption
* Python shell yarn options.
* @param commandLine
* Parsed Python shell parser options.
*/
private static void constructYarnOption(List<String> options, Option yarnOption, CommandLine commandLine) {
if (commandLine.hasOption(yarnOption.getOpt())) {
options.add("-y" + yarnOption.getOpt());
options.add(commandLine.getOptionValue(yarnOption.getOpt()));
}
} | 3.26 |
flink_PythonShellParser_parseLocal_rdh | /**
* Parses Python shell options and transfer to options which will be used in `java` to exec a
* flink job in local mini cluster.
*
* @param args
* Python shell options.
* @return Options used in `java` run.
*/
static List<String> parseLocal(String[] args) {
String[] params = new String[args.length - 1];
System.arraycopy(args, 1, params, 0, params.length);
CommandLine commandLine = parse(LOCAL_OPTIONS, params);
if (commandLine.hasOption(OPTION_HELP.getOpt())) {
printLocalHelp();
System.exit(0);
}
List<String> options = new ArrayList<>();
options.add("local");
return options;
} | 3.26 |
flink_PythonShellParser_printError_rdh | /**
* Prints the error message and help for the client.
*
* @param msg
* error message
*/
private static void printError(String msg) {
System.err.println(msg);
System.err.println("Valid cluster type are \"local\", \"remote <hostname> <portnumber>\", \"yarn\".");
System.err.println();
System.err.println("Specify the help option (-h or --help) to get help on the command.");
} | 3.26 |
flink_PythonShellParser_parseRemote_rdh | /**
* Parses Python shell options and transfer to options which will be used in `flink run -m
* ${jobmanager_address}` to submit flink job in a remote jobmanager. The Python shell options
* "remote ${hostname} ${portnumber}" will be transferred to "-m ${hostname}:${portnumber}".
*
* @param args
* Python shell options.
* @return Options used in `flink run`.
*/
static List<String> parseRemote(String[] args) {
if (args.length < 3) {
System.err.println("Specifies the <hostname> <portnumber> in 'remote' mode");printRemoteHelp();
System.exit(0);
}
String[] params = new String[args.length - 3];
System.arraycopy(args, 3, params, 0, params.length);
CommandLine commandLine = parse(REMOTE_OPTIONS, params);
if (commandLine.hasOption(OPTION_HELP.getOpt())) {
printRemoteHelp();
System.exit(0);
}String host
= args[1];
String port = args[2];
List<String> options = new ArrayList<>();
options.add(args[0]);
options.add("-m");
options.add((host + ":") + port); return options;
} | 3.26 |
flink_PythonShellParser_printHelp_rdh | /**
* Prints the help for the client.
*/
private static void printHelp() {
System.out.print("Flink Python Shell\n");
System.out.print("Usage: pyflink-shell.sh [local|remote|yarn] [options] <args>...\n");
System.out.print('\n');
printLocalHelp();
printRemoteHelp();
printYarnHelp();
System.out.println("-h | --help");
System.out.println(" Prints this usage text");
System.exit(0);
} | 3.26 |
flink_BufferReaderWriterUtil_writeBuffer_rdh | // ------------------------------------------------------------------------
// ByteBuffer read / write
// ------------------------------------------------------------------------
static boolean writeBuffer(Buffer buffer, ByteBuffer memory) {
final int bufferSize = buffer.getSize();
if (memory.remaining() < (bufferSize + HEADER_LENGTH)) {
return false;
}
memory.putShort(buffer.isBuffer()
? HEADER_VALUE_IS_BUFFER : HEADER_VALUE_IS_EVENT);
memory.putShort(buffer.isCompressed() ? BUFFER_IS_COMPRESSED : BUFFER_IS_NOT_COMPRESSED);
memory.putInt(bufferSize);
memory.put(buffer.getNioBufferReadable());
return true;
} | 3.26 |
flink_BufferReaderWriterUtil_configureByteBuffer_rdh | // ------------------------------------------------------------------------
// Utils
// ------------------------------------------------------------------------
static void configureByteBuffer(ByteBuffer buffer) {
buffer.order(ByteOrder.nativeOrder());
} | 3.26 |
flink_BufferReaderWriterUtil_writeToByteChannel_rdh | // ------------------------------------------------------------------------
// ByteChannel read / write
// ------------------------------------------------------------------------
static long writeToByteChannel(FileChannel channel, Buffer buffer,
ByteBuffer[] arrayWithHeaderBuffer) throws IOException {
final ByteBuffer headerBuffer = arrayWithHeaderBuffer[0];
setByteChannelBufferHeader(buffer, headerBuffer);
final ByteBuffer dataBuffer = buffer.getNioBufferReadable();
arrayWithHeaderBuffer[1] = dataBuffer;
final long bytesExpected = HEADER_LENGTH + dataBuffer.remaining();
m1(channel, bytesExpected, arrayWithHeaderBuffer);
return bytesExpected;
} | 3.26 |
flink_BufferReaderWriterUtil_positionToNextBuffer_rdh | /**
* Skip one data buffer from the channel's current position by headerBuffer.
*/
public static void positionToNextBuffer(FileChannel channel, ByteBuffer headerBuffer) throws IOException {
headerBuffer.clear();
if (!tryReadByteBuffer(channel, headerBuffer)) {
throwCorruptDataException();
}
headerBuffer.flip();
try {
headerBuffer.getShort(); headerBuffer.getShort();
long bufferSize = headerBuffer.getInt();
channel.position(channel.position() + bufferSize);
} catch (BufferUnderflowException | IllegalArgumentException e) {
// buffer underflow if header buffer is undersized
// IllegalArgumentException if size is outside memory segment size
throwCorruptDataException();}
} | 3.26 |
flink_DataSetFineGrainedRecoveryTestProgram_main_rdh | /**
* Program to test fine grained recovery.
*/
public class DataSetFineGrainedRecoveryTestProgram {public static void
main(String[] args) throws Exception {
final ParameterTool params = ParameterTool.fromArgs(args);
final String latchFilePath = params.getRequired("latchFilePath");
final String outputPath = params.getRequired("outputPath");
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setExecutionMode(ExecutionMode.BATCH_FORCED);
env.setParallelism(4);
env.generateSequence(0, 1000).map(new BlockingIncrementingMapFunction(latchFilePath)).writeAsText(outputPath, WriteMode.OVERWRITE).setParallelism(1);
env.execute();
}
} | 3.26 |
flink_AbstractRichFunction_open_rdh | // --------------------------------------------------------------------------------------------
// Default life cycle methods
// --------------------------------------------------------------------------------------------
@Override
public void
open(Configuration parameters) throws Exception {
} | 3.26 |
flink_SerializedJobExecutionResult_getNetRuntime_rdh | /**
* Gets the net execution time of the job, i.e., the execution time in the parallel system,
* without the pre-flight steps like the optimizer in a desired time unit.
*
* @param desiredUnit
* the unit of the <tt>NetRuntime</tt>
* @return The net execution time in the desired unit.
*/public long getNetRuntime(TimeUnit desiredUnit) { return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS);
} | 3.26 |
flink_CompletedCheckpointStats_getExternalPath_rdh | // Completed checkpoint specific methods
// ------------------------------------------------------------------------
/**
* Returns the external pointer of this checkpoint.
*/
public String getExternalPath() {
return externalPointer;
} | 3.26 |
flink_SessionWithGapOnTime_as_rdh | /**
* Assigns an alias for this window that the following {@code groupBy()} and {@code select()}
* clause can refer to. {@code select()} statement can access window properties such as window
* start or end time.
*
* @param alias
* alias for this window
* @return this window
*/
public SessionWithGapOnTimeWithAlias as(String alias) {
return as(unresolvedRef(alias));
} | 3.26 |
flink_LongZeroConvergence_m0_rdh | /**
* Returns true, if the aggregator value is zero, false otherwise.
*
* @param iteration
* The number of the iteration superstep. Ignored in this case.
* @param value
* The aggregator value, which is compared to zero.
* @return True, if the aggregator value is zero, false otherwise.
*/@Override
public boolean m0(int iteration, LongValue value) {
return value.getValue() == 0;
} | 3.26 |
flink_MapPartitionOperatorBase_m0_rdh | // --------------------------------------------------------------------------------------------
@Override
protected List<OUT> m0(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
MapPartitionFunction<IN, OUT> function = this.userFunction.getUserCodeObject();
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
ArrayList<OUT> result = new ArrayList<OUT>(inputData.size() /
4);
TypeSerializer<IN> inSerializer = getOperatorInfo().getInputType().createSerializer(executionConfig);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
CopyingIterator<IN> source = new CopyingIterator<IN>(inputData.iterator(), inSerializer);
CopyingListCollector<OUT> resultCollector = new CopyingListCollector<OUT>(result, outSerializer);
function.mapPartition(source, resultCollector);
result.trimToSize();
FunctionUtils.closeFunction(function);
return
result;
} | 3.26 |
flink_Dispatcher_getShutDownFuture_rdh | // ------------------------------------------------------
public CompletableFuture<ApplicationStatus> getShutDownFuture() {
return shutDownFuture;
} | 3.26 |
flink_Dispatcher_terminateRunningJobs_rdh | /**
* Terminate all currently running {@link JobManagerRunner}s.
*/
private void terminateRunningJobs() {
log.info("Stopping all currently running jobs of dispatcher {}.", getAddress());
final Set<JobID> jobsToRemove = jobManagerRunnerRegistry.getRunningJobIds();
for (JobID jobId : jobsToRemove) {
terminateJob(jobId);}
} | 3.26 |
flink_Dispatcher_onStart_rdh | // ------------------------------------------------------
// Lifecycle methods
// ------------------------------------------------------
@Overridepublic void onStart() throws Exception {
try {
startDispatcherServices();
} catch (Throwable t) {
final DispatcherException exception = new DispatcherException(String.format("Could not start the Dispatcher %s", getAddress()), t); onFatalError(exception);
throw exception;
}
startCleanupRetries();
startRecoveredJobs();
this.dispatcherBootstrap = this.dispatcherBootstrapFactory.create(getSelfGateway(DispatcherGateway.class), this.getRpcService().getScheduledExecutor(), this::onFatalError);
} | 3.26 |
flink_Dispatcher_submitJob_rdh | // ------------------------------------------------------
// RPCs
// ------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> submitJob(JobGraph jobGraph, Time timeout) {
final JobID jobID = jobGraph.getJobID();
log.info("Received JobGraph submission '{}' ({}).", jobGraph.getName(), jobID);return isInGloballyTerminalState(jobID).thenComposeAsync(isTerminated -> {
if (isTerminated) {
log.warn(("Ignoring JobGraph submission '{}' ({}) because the job already " + "reached a globally-terminal state (i.e. {}) in a ") + "previous execution.", jobGraph.getName(), jobID, Arrays.stream(JobStatus.values()).filter(JobStatus::isGloballyTerminalState).map(JobStatus::name).collect(Collectors.joining(", ")));
return FutureUtils.completedExceptionally(DuplicateJobSubmissionException.ofGloballyTerminated(jobID));
} else if (jobManagerRunnerRegistry.isRegistered(jobID) || submittedAndWaitingTerminationJobIDs.contains(jobID)) {
// job with the given jobID is not terminated, yet
return FutureUtils.completedExceptionally(DuplicateJobSubmissionException.of(jobID));
} else if (isPartialResourceConfigured(jobGraph)) {
return FutureUtils.completedExceptionally(new JobSubmissionException(jobID, ("Currently jobs is not supported if parts of the vertices " + "have resources configured. The limitation will be ") + "removed in future versions."));
} else {
return internalSubmitJob(jobGraph);
}
}, getMainThreadExecutor());
} | 3.26 |
flink_Dispatcher_getJobMasterGateway_rdh | /**
* Ensures that the JobMasterGateway is available.
*/private CompletableFuture<JobMasterGateway> getJobMasterGateway(JobID jobId) {
if (!jobManagerRunnerRegistry.isRegistered(jobId)) {
return
FutureUtils.completedExceptionally(new FlinkJobNotFoundException(jobId));
}
final JobManagerRunner job = jobManagerRunnerRegistry.get(jobId);
if (!job.isInitialized()) {
return FutureUtils.completedExceptionally(new UnavailableDispatcherOperationException("Unable to get JobMasterGateway for initializing job. " + "The requested operation is not available while the JobManager is initializing."));
}
return job.getJobMasterGateway();
} | 3.26 |
flink_Dispatcher_createDirtyJobResultEntryIfMissingAsync_rdh | /**
* Creates a dirty entry in the {@link #jobResultStore} if there's no entry at all for the given
* {@code executionGraph} in the {@code JobResultStore}.
*
* @param executionGraph
* The {@link AccessExecutionGraph} for which the {@link JobResult} shall
* be persisted.
* @param hasCleanJobResultEntry
* The decision the dirty entry check is based on.
* @return {@code CompletableFuture} that completes as soon as the entry exists.
*/
private CompletableFuture<Void> createDirtyJobResultEntryIfMissingAsync(AccessExecutionGraph executionGraph, boolean hasCleanJobResultEntry) {
final JobID jobId = executionGraph.getJobID();
if (hasCleanJobResultEntry) {
log.warn("Job {} is already marked as clean but clean up was triggered again.", jobId);
return FutureUtils.completedVoidFuture();
} else {
return jobResultStore.hasDirtyJobResultEntryAsync(jobId).thenCompose(hasDirtyJobResultEntry -> createDirtyJobResultEntryAsync(executionGraph, hasDirtyJobResultEntry));
}
}
/**
* Creates a dirty entry in the {@link #jobResultStore} based on the passed {@code hasDirtyJobResultEntry} flag.
*
* @param executionGraph
* The {@link AccessExecutionGraph} that is used to generate the entry.
* @param hasDirtyJobResultEntry
* The decision the entry creation is based on.
* @return {@code CompletableFuture} | 3.26 |
flink_TypeSerializerSingleton_m0_rdh | // --------------------------------------------------------------------------------------------
@Overridepublic TypeSerializerSingleton<T> m0() {
return this;
} | 3.26 |
flink_GlobalWindows_m0_rdh | /**
* Creates a new {@code GlobalWindows} {@link WindowAssigner} that assigns all elements to the
* same {@link GlobalWindow}.
*
* @return The global window policy.
*/
public static GlobalWindows m0() {
return new GlobalWindows();
} | 3.26 |
flink_IterativeStream_closeWith_rdh | /**
* Closes the iteration. This method defines the end of the iterative program part that will
* be fed back to the start of the iteration as the second input in the {@link ConnectedStreams}.
*
* @param feedbackStream
* {@link DataStream} that will be used as second input to the
* iteration head.
* @return The feedback stream.
*/
public DataStream<F> closeWith(DataStream<F> feedbackStream) {
Collection<Transformation<?>> predecessors = feedbackStream.getTransformation().getTransitivePredecessors();
if (!predecessors.contains(this.coFeedbackTransformation)) {
throw new UnsupportedOperationException("Cannot close an iteration with a feedback DataStream that does not originate from said iteration.");}
coFeedbackTransformation.addFeedbackEdge(feedbackStream.getTransformation());
return feedbackStream;
} | 3.26 |
flink_IterativeStream_withFeedbackType_rdh | /**
* Changes the feedback type of the iteration and allows the user to apply co-transformations on
* the input and feedback stream, as in a {@link ConnectedStreams}.
*
* <p>For type safety the user needs to define the feedback type
*
* @param feedbackType
* The type information of the feedback stream.
* @return A {@link ConnectedIterativeStreams}.
*/
public <F> ConnectedIterativeStreams<T, F> withFeedbackType(TypeInformation<F> feedbackType) { return new ConnectedIterativeStreams<>(originalInput, feedbackType, maxWaitTime);
}
/**
* The {@link ConnectedIterativeStreams} represent a start of an iterative part of a streaming
* program, where the original input of the iteration and the feedback of the iteration are
* connected as in a {@link ConnectedStreams} | 3.26 |
flink_TextInputFormat_getCharsetName_rdh | // --------------------------------------------------------------------------------------------
public String getCharsetName() {
return charsetName;
} | 3.26 |
flink_TextInputFormat_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return (("TextInputFormat (" + Arrays.toString(getFilePaths())) + ") - ") + this.charsetName;
} | 3.26 |
flink_TextInputFormat_readRecord_rdh | // --------------------------------------------------------------------------------------------
@Override
public String readRecord(String reusable, byte[] bytes, int offset, int numBytes) throws IOException {// Check if \n is used as delimiter and the end of this line is a \r, then remove \r from
// the line
if (((((this.getDelimiter() != null) && (this.getDelimiter().length == 1)) && (this.getDelimiter()[0] == NEW_LINE)) && ((offset + numBytes) >= 1)) && (bytes[(offset + numBytes) - 1] == CARRIAGE_RETURN)) {
numBytes -= 1;
}
return new String(bytes, offset, numBytes, this.charsetName);
} | 3.26 |
flink_TextInputFormat_configure_rdh | // --------------------------------------------------------------------------------------------
@Override
public void configure(Configuration parameters) { super.configure(parameters);
if ((charsetName == null) || (!Charset.isSupported(charsetName))) {
throw new RuntimeException("Unsupported charset: " + charsetName);
}
} | 3.26 |
flink_MultiStateKeyIterator_remove_rdh | /**
* Removes the current key from <b>ALL</b> known states in the state backend.
*/
@Override
public void remove() {if (currentKey == null) {
return;
}
for (StateDescriptor<?, ?> descriptor : descriptors) {
try {
State state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, descriptor);
state.clear();
} catch (Exception e) {
throw new RuntimeException("Failed to drop partitioned state from state backend", e);}
}
} | 3.26 |
flink_ResultPartitionFactory_isOverdraftBufferNeeded_rdh | /**
* Return whether this result partition need overdraft buffer.
*/private static boolean isOverdraftBufferNeeded(ResultPartitionType resultPartitionType) {
// Only pipelined / pipelined-bounded partition needs overdraft buffer. More
// specifically, there is no reason to request more buffers for non-pipelined (i.e.
// batch) shuffle. The reasons are as follows:
// 1. For BoundedBlockingShuffle, each full buffer will be directly released.
// 2. For SortMergeShuffle, the maximum capacity of buffer pool is 4 * numSubpartitions. It
// is efficient enough to spill this part of memory to disk.
// 3. For Hybrid Shuffle, the buffer pool is unbounded. If it can't get a normal buffer, it
// also can't get an overdraft buffer.
return resultPartitionType.isPipelinedOrPipelinedBoundedResultPartition();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.