name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ZooKeeperStateHandleStore_hasLock_rdh | // ---------------------------------------------------------------------------------------------------------
// Protected methods
// ---------------------------------------------------------------------------------------------------------
/**
* Checks whether a lock is created for this instance on the passed ZooKeeper node.
*
* @param rootPath
* The node that shall be checked.
* @return {@code true} if the lock exists; {@code false} otherwise.
*/ private boolean hasLock(String rootPath) throws Exception {
final String normalizedRootPath = normalizePath(rootPath);
try {
return client.checkExists().forPath(getInstanceLockPath(normalizedRootPath)) != null;
} catch (KeeperException.NoNodeException e) {
// this is the case if the node is marked for deletion or already deleted
return false;
}
} | 3.26 |
flink_ZooKeeperStateHandleStore_writeStoreHandleTransactionally_rdh | // this method is provided for the sole purpose of easier testing
@VisibleForTesting
void writeStoreHandleTransactionally(String path, byte[] serializedStoreHandle) throws Exception {
// Write state handle (not the actual state) to ZooKeeper. This is expected to be smaller
// than the state itself. This level of indirection makes sure that data in ZooKeeper is
// small, because ZooKeeper is designed for data in the KB range, but the state can be
// larger. Create the lock node in a transaction with the actual state node. That way we can
// prevent race conditions with a concurrent delete operation.
client.inTransaction().create().withMode(CreateMode.PERSISTENT).forPath(path, serializedStoreHandle).and().create().withMode(CreateMode.PERSISTENT).forPath(getRootLockPath(path)).and().create().withMode(CreateMode.EPHEMERAL).forPath(getInstanceLockPath(path)).and().commit();} | 3.26 |
flink_FsStateBackend_getBasePath_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
/**
* Gets the base directory where all the checkpoints are stored. The job-specific checkpoint
* directory is created inside this directory.
*
* @return The base directory for checkpoints.
* @deprecated Deprecated in favor of {@link #getCheckpointPath()}.
*/
@Deprecated
public Path getBasePath() {
return getCheckpointPath();
} | 3.26 |
flink_FsStateBackend_toString_rdh | // ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return (((((("File State Backend (" + "checkpoints: '") + getCheckpointPath()) + "', savepoints: '") + getSavepointPath()) + ", fileStateThreshold: ") + fileStateThreshold) + ")";
} | 3.26 |
flink_FsStateBackend_createCheckpointStorage_rdh | // ------------------------------------------------------------------------
// initialization and cleanup
// ------------------------------------------------------------------------
@Override
public CheckpointStorageAccess createCheckpointStorage(JobID jobId) throws IOException {
checkNotNull(jobId, "jobId");
return new FsCheckpointStorageAccess(getCheckpointPath(), getSavepointPath(), jobId, getMinFileSizeThreshold(), getWriteBufferSize());
} | 3.26 |
flink_FsStateBackend_createKeyedStateBackend_rdh | // ------------------------------------------------------------------------
// state holding structures
// ------------------------------------------------------------------------
@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull
Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws BackendBuildingException {
TaskStateManager taskStateManager = env.getTaskStateManager();
LocalRecoveryConfig localRecoveryConfig = taskStateManager.createLocalRecoveryConfig();
HeapPriorityQueueSetFactory priorityQueueSetFactory = new HeapPriorityQueueSetFactory(keyGroupRange, numberOfKeyGroups, 128);
LatencyTrackingStateConfig v5 = latencyTrackingConfigBuilder.setMetricGroup(metricGroup).build();
return new HeapKeyedStateBackendBuilder<>(kvStateRegistry, keySerializer, env.getUserCodeClassLoader().asClassLoader(), numberOfKeyGroups, keyGroupRange, env.getExecutionConfig(), ttlTimeProvider, v5, stateHandles, AbstractStateBackend.getCompressionDecorator(env.getExecutionConfig()), localRecoveryConfig, priorityQueueSetFactory, m0(), cancelStreamRegistry).build();
} | 3.26 |
flink_FsStateBackend_getWriteBufferSize_rdh | /**
* Gets the write buffer size for created checkpoint stream.
*
* <p>If not explicitly configured, this is the default value of {@link CheckpointingOptions#FS_WRITE_BUFFER_SIZE}.
*
* @return The write buffer size, in bytes.
*/
public int getWriteBufferSize() {
return writeBufferSize >= 0 ? writeBufferSize : CheckpointingOptions.FS_WRITE_BUFFER_SIZE.defaultValue();
} | 3.26 |
flink_FsStateBackend_m0_rdh | /**
* Gets whether the key/value data structures are asynchronously snapshotted, which is always
* true for this state backend.
*/
public boolean m0() {
return true;
} | 3.26 |
flink_FsStateBackend_getCheckpointPath_rdh | /**
* Gets the base directory where all the checkpoints are stored. The job-specific checkpoint
* directory is created inside this directory.
*
* @return The base directory for checkpoints.
*/
@Nonnull
@Override
public Path getCheckpointPath() {
// we know that this can never be null by the way of constructor checks
// noinspection ConstantConditions
return super.getCheckpointPath();
} | 3.26 |
flink_FsStateBackend_getMinFileSizeThreshold_rdh | /**
* Gets the threshold below which state is stored as part of the metadata, rather than in files.
* This threshold ensures that the backend does not create a large amount of very small files,
* where potentially the file pointers are larger than the state itself.
*
* <p>If not explicitly configured, this is the default value of {@link CheckpointingOptions#FS_SMALL_FILE_THRESHOLD}.
*
* @return The file size threshold, in bytes.
*/
public int
getMinFileSizeThreshold() {
return fileStateThreshold >= 0 ? fileStateThreshold : MathUtils.checkedDownCast(FS_SMALL_FILE_THRESHOLD.defaultValue().getBytes());
} | 3.26 |
flink_FsStateBackend_configure_rdh | // ------------------------------------------------------------------------
// Reconfiguration
// ------------------------------------------------------------------------
/**
* Creates a copy of this state backend that uses the values defined in the configuration for
* fields where that were not specified in this state backend.
*
* @param config
* the configuration
* @return The re-configured variant of the state backend
*/
@Override
public FsStateBackend configure(ReadableConfig config, ClassLoader classLoader) {return new FsStateBackend(this, config, classLoader);
} | 3.26 |
flink_CompiledPlan_printJsonString_rdh | /**
* Like {@link #asJsonString()}, but prints the result to {@link System#out}.
*/
default CompiledPlan printJsonString() {
System.out.println(this.asJsonString());
return this;
} | 3.26 |
flink_CompiledPlan_writeToFile_rdh | /**
* Writes this plan to a file using the JSON representation. This operation will fail if the
* file already exists, even if the content is different from this plan.
*
* @param file
* the target file
* @throws TableException
* if the file cannot be written.
*/
default void writeToFile(File file) {
writeToFile(file, false);
} | 3.26 |
flink_ThreadInfoSample_m0_rdh | /**
* Constructs a collection of {@link ThreadInfoSample}s from a collection of {@link ThreadInfo}
* samples.
*
* @param threadInfos
* the collection of {@link ThreadInfo}.
* @return the collection of the corresponding {@link ThreadInfoSample}s.
*/
public static Map<Long, ThreadInfoSample> m0(Collection<ThreadInfo> threadInfos) {
return threadInfos.stream().collect(Collectors.toMap(ThreadInfo::getThreadId, threadInfo -> new ThreadInfoSample(threadInfo.getThreadState(), threadInfo.getStackTrace())));
} | 3.26 |
flink_RecordWriter_close_rdh | /**
* This is used to broadcast streaming Watermarks in-band with records.
*/public abstract void broadcastEmit(T record) throws IOException;
/**
* Closes the writer. This stops the flushing thread (if there is one).
*/
public void close() {
// make sure we terminate the thread in any case
if (outputFlusher != null) {
outputFlusher.terminate();
try {
outputFlusher.join();
} catch (InterruptedException e) {
// ignore on close
// restore interrupt flag to fast exit further blocking calls
Thread.currentThread().interrupt();
}
}
} | 3.26 |
flink_RecordWriter_notifyFlusherException_rdh | /**
* Notifies the writer that the output flusher thread encountered an exception.
*
* @param t
* The exception to report.
*/
private void notifyFlusherException(Throwable t) {
if (flusherException == null) {
LOG.error("An exception happened while flushing the outputs", t);
flusherException = t;
volatileFlusherException = t;}
} | 3.26 |
flink_RecordWriter_setMetricGroup_rdh | /**
* Sets the metric group for this RecordWriter.
*/
public void setMetricGroup(TaskIOMetricGroup metrics) {
targetPartition.setMetricGroup(metrics);
} | 3.26 |
flink_RecordWriter_m2_rdh | /**
* This is used to send LatencyMarks to a random target channel.
*/
public void
m2(T record) throws IOException {
checkErroneous();
int targetSubpartition = rng.nextInt(numberOfChannels);
emit(record, targetSubpartition);
} | 3.26 |
flink_ThrowingRunnable_unchecked_rdh | /**
* Converts a {@link ThrowingRunnable} into a {@link Runnable} which throws all checked
* exceptions as unchecked.
*
* @param throwingRunnable
* to convert into a {@link Runnable}
* @return {@link Runnable} which throws all checked exceptions as unchecked.
*/static Runnable unchecked(ThrowingRunnable<?> throwingRunnable) {
return () -> {
try {
throwingRunnable.run();
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
} | 3.26 |
flink_RocksDBPriorityQueueConfig_getRocksDBPriorityQueueSetCacheSize_rdh | /**
* Gets the cache size of rocksDB priority queue set. It will fall back to the default value if
* it is not explicitly set.
*/
public int getRocksDBPriorityQueueSetCacheSize() {
return rocksDBPriorityQueueSetCacheSize == UNDEFINED_ROCKSDB_PRIORITY_QUEUE_SET_CACHE_SIZE ? ROCKSDB_TIMER_SERVICE_FACTORY_CACHE_SIZE.defaultValue() : rocksDBPriorityQueueSetCacheSize;
} | 3.26 |
flink_RocksDBPriorityQueueConfig_getPriorityQueueStateType_rdh | /**
* Gets the type of the priority queue state. It will fall back to the default value if it is
* not explicitly set.
*/
public PriorityQueueStateType getPriorityQueueStateType() {
return priorityQueueStateType ==
null ? TIMER_SERVICE_FACTORY.defaultValue() : priorityQueueStateType;
} | 3.26 |
flink_ProjectedRowData_from_rdh | /**
* Create an empty {@link ProjectedRowData} starting from a {@link Projection}.
*
* <p>Throws {@link IllegalStateException} if the provided {@code projection} array contains
* nested projections, which are not supported by {@link ProjectedRowData}.
*
* @see Projection
* @see ProjectedRowData
*/
public static ProjectedRowData from(Projection
projection) {
return new ProjectedRowData(projection.toTopLevelIndexes());
} | 3.26 |
flink_ProjectedRowData_getArity_rdh | // ---------------------------------------------------------------------------------------------
@Override
public int getArity() {
return indexMapping.length;
} | 3.26 |
flink_WebMonitorUtils_loadWebSubmissionExtension_rdh | /**
* Loads the {@link WebMonitorExtension} which enables web submission.
*
* @param leaderRetriever
* to retrieve the leader
* @param timeout
* for asynchronous requests
* @param responseHeaders
* for the web submission handlers
* @param localAddressFuture
* of the underlying REST server endpoint
* @param uploadDir
* where the web submission handler store uploaded jars
* @param executor
* to run asynchronous operations
* @param configuration
* used to instantiate the web submission extension
* @return Web submission extension
* @throws FlinkException
* if the web submission extension could not be loaded
*/
public static WebMonitorExtension loadWebSubmissionExtension(GatewayRetriever<? extends DispatcherGateway> leaderRetriever, Time timeout, Map<String, String> responseHeaders, CompletableFuture<String> localAddressFuture, Path uploadDir, Executor executor, Configuration configuration) throws FlinkException {
if (isFlinkRuntimeWebInClassPath()) {
try {
final Constructor<?> webSubmissionExtensionConstructor = Class.forName("org.apache.flink.runtime.webmonitor.WebSubmissionExtension").getConstructor(Configuration.class, GatewayRetriever.class,
Map.class, CompletableFuture.class, Path.class, Executor.class, Time.class);
return ((WebMonitorExtension) (webSubmissionExtensionConstructor.newInstance(configuration, leaderRetriever, responseHeaders, localAddressFuture, uploadDir, executor,
timeout)));
} catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | InvocationTargetException | IllegalAccessException e) {
throw new FlinkException("Could not load web submission extension.", e);
}
} else {
throw new FlinkException("The module flink-runtime-web could not be found in the class path. Please add " + "this jar in order to enable web based job submission.");
}
} | 3.26 |
flink_WebMonitorUtils_find_rdh | /**
* Finds the Flink log directory using log.file Java property that is set during startup.
*/
public static LogFileLocation find(Configuration config) {
final String logEnv = "log.file";
String logFilePath = System.getProperty(logEnv); if (logFilePath == null) {
LOG.warn("Log file environment variable '{}' is not set.", logEnv);
logFilePath = config.getString(WebOptions.LOG_PATH);}
// not configured, cannot serve log files
if ((logFilePath == null) || (logFilePath.length() < 4)) {
LOG.warn("JobManager log files are unavailable in the web dashboard. " + "Log file location not found in environment variable '{}' or configuration key '{}'.", logEnv, WebOptions.LOG_PATH.key());
return new LogFileLocation(null, null, null);
} String outFilePath = logFilePath.substring(0, logFilePath.length() - 3).concat("out");
File logFile = resolveFileLocation(logFilePath);
File logDir = null;
if (logFile != null) {
logDir = resolveFileLocation(logFile.getParent());
}
LOG.info("Determined location of main cluster component log file: {}", logFilePath);
LOG.info("Determined location of main cluster component stdout file: {}", outFilePath);
return new LogFileLocation(logFile, resolveFileLocation(outFilePath), logDir);
} | 3.26 |
flink_WebMonitorUtils_resolveFileLocation_rdh | /**
* Verify log file location.
*
* @param logFilePath
* Path to log file
* @return File or null if not a valid log file
*/
private static File resolveFileLocation(String logFilePath) {
File logFile = new File(logFilePath);
return logFile.exists() && logFile.canRead() ? logFile : null;
} | 3.26 |
flink_WebMonitorUtils_isFlinkRuntimeWebInClassPath_rdh | /**
* Returns {@code true} if the optional dependency {@code flink-runtime-web} is in the
* classpath.
*/
private static boolean isFlinkRuntimeWebInClassPath() {
try {
Class.forName(WEB_FRONTEND_BOOTSTRAP_CLASS_FQN); return true;
} catch (ClassNotFoundException e) {
// class not found means that there is no flink-runtime-web in the classpath
return false;
}
} | 3.26 |
flink_MemoryMappedBoundedData_finishWrite_rdh | /**
* Finishes the current region and prevents further writes. After calling this method, further
* calls to {@link #writeBuffer(Buffer)} will fail.
*/
@Override
public void finishWrite() throws IOException {
assert currentBuffer != null;
currentBuffer.flip();
fullBuffers.add(currentBuffer);
currentBuffer = null;// fail further writes fast
file.close();// won't map further regions from now on
} | 3.26 |
flink_MemoryMappedBoundedData_createWithRegionSize_rdh | /**
* Creates new MemoryMappedBoundedData, creating a memory mapped file at the given path. Each
* mapped region (= ByteBuffer) will be of the given size.
*/
public static MemoryMappedBoundedData createWithRegionSize(Path memMappedFilePath, int regionSize) throws IOException {
final FileChannel fileChannel = FileChannel.open(memMappedFilePath, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
return new MemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize); } | 3.26 |
flink_MemoryMappedBoundedData_create_rdh | // ------------------------------------------------------------------------
// Factories
// ------------------------------------------------------------------------
/**
* Creates new MemoryMappedBoundedData, creating a memory mapped file at the given path.
*/
public static MemoryMappedBoundedData create(Path
memMappedFilePath) throws IOException {
return createWithRegionSize(memMappedFilePath,
Integer.MAX_VALUE);
} | 3.26 |
flink_MemoryMappedBoundedData_alignSize_rdh | /**
* Rounds the size down to the next multiple of the {@link #PAGE_SIZE}. We need to round down
* here to not exceed the original maximum size value. Otherwise, values like INT_MAX would
* round up to overflow the valid maximum size of a memory mapping region in Java.
*/
private static int alignSize(int maxRegionSize) {
checkArgument(maxRegionSize >= PAGE_SIZE);
return maxRegionSize - (maxRegionSize % PAGE_SIZE);
} | 3.26 |
flink_MemoryMappedBoundedData_getSize_rdh | /**
* Gets the number of bytes of all written data (including the metadata in the buffer headers).
*/
@Override
public long getSize() {
long size = 0L;
for (ByteBuffer bb : fullBuffers) {
size += bb.remaining();
}
if (currentBuffer != null) {
size += currentBuffer.position();
}
return size;
} | 3.26 |
flink_MemoryMappedBoundedData_close_rdh | /**
* Unmaps the file from memory and deletes the file. After calling this method, access to any
* ByteBuffer obtained from this instance will cause a segmentation fault.
*/
public void close() throws IOException {
IOUtils.closeQuietly(file);// in case we dispose before finishing writes
for (ByteBuffer bb : fullBuffers) {PlatformDependent.freeDirectBuffer(bb);
}
fullBuffers.clear();
if (currentBuffer != null) {
PlatformDependent.freeDirectBuffer(currentBuffer);
currentBuffer = null;
}
// To make this compatible with all versions of Windows, we must wait with
// deleting the file until it is unmapped.
// See also
// https://stackoverflow.com/questions/11099295/file-flag-delete-on-close-and-memory-mapped-files/51649618#51649618
Files.delete(filePath);
} | 3.26 |
flink_WindowedOperatorTransformation_reduce_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction
* The reduce function that is used for incremental aggregation.
* @param function
* The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@Internal
public <R> BootstrapTransformation<T> reduce(ReduceFunction<T> reduceFunction, ProcessWindowFunction<T, R, K, W> function) {
// clean the closures
function = input.clean(function);
reduceFunction = input.clean(reduceFunction);
WindowOperator<K, T, ?, R, W> operator = builder.reduce(reduceFunction, function);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new BootstrapTransformation<>(input, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
} | 3.26 |
flink_WindowedOperatorTransformation_process_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function
* The window function.
* @return The data stream that is the result of applying the window function to the window.
*/@PublicEvolving
public <R> BootstrapTransformation<T> process(ProcessWindowFunction<T, R, K, W> function) {
WindowOperator<K, T, ?, R, W> operator = builder.process(function);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new BootstrapTransformation<>(input, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
} | 3.26 |
flink_WindowedOperatorTransformation_apply_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function
* The window function.
* @param resultType
* Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
*/
public <R> BootstrapTransformation<T> apply(WindowFunction<T, R, K, W> function, TypeInformation<R> resultType) {
function = input.clean(function);
WindowOperator<K, T, ?, R, W> operator = builder.apply(function);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new BootstrapTransformation<>(input, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
} | 3.26 |
flink_WindowedOperatorTransformation_m0_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggFunction
* The aggregate function that is used for incremental aggregation.
* @param windowFunction
* The window function.
* @return The data stream that is the result of applying the window function to the window.
* @param <ACC>
* The type of the AggregateFunction's accumulator
* @param <V>
* The type of AggregateFunction's result, and the WindowFunction's input
* @param <R>
* The type of the elements in the resulting stream, equal to the WindowFunction's
* result type
*/
@PublicEvolving
public <ACC, V, R>
BootstrapTransformation<T> m0(AggregateFunction<T, ACC, V> aggFunction, ProcessWindowFunction<V, R, K, W> windowFunction) {
checkNotNull(aggFunction, "aggFunction");
checkNotNull(windowFunction, "windowFunction");
TypeInformation<ACC> accumulatorType = TypeExtractor.getAggregateFunctionAccumulatorType(aggFunction, input.getType(), null, false);
return aggregate(aggFunction, windowFunction, accumulatorType);
} | 3.26 |
flink_WindowedOperatorTransformation_evictor_rdh | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* incremental aggregation of window results cannot be used.
*/
@PublicEvolving
public WindowedOperatorTransformation<T, K, W> evictor(Evictor<? super T, ? super W> evictor) {
builder.evictor(evictor);
return this;
} | 3.26 |
flink_WindowedOperatorTransformation_aggregate_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction
* The aggregation function that is used for incremental aggregation.
* @param windowFunction
* The window function.
* @param accumulatorType
* Type information for the internal accumulator type of the aggregation
* function
* @return The data stream that is the result of applying the window function to the window.
* @param <ACC>
* The type of the AggregateFunction's accumulator
* @param <V>
* The type of AggregateFunction's result, and the WindowFunction's input
* @param <R>
* The type of the elements in the resulting stream, equal to the WindowFunction's
* result type
*/
@PublicEvolving
public <ACC, V, R> BootstrapTransformation<T> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, ProcessWindowFunction<V, R, K, W> windowFunction, TypeInformation<ACC> accumulatorType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction.");
}
// clean the closures
windowFunction = input.clean(windowFunction);
aggregateFunction = input.clean(aggregateFunction);
WindowOperator<K, T, ?, R, W> operator = builder.aggregate(aggregateFunction, windowFunction, accumulatorType);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new BootstrapTransformation<>(input, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
} | 3.26 |
flink_WindowedOperatorTransformation_trigger_rdh | /**
* Sets the {@code Trigger} that should be used to trigger window emission.
*/
@PublicEvolving
public WindowedOperatorTransformation<T, K, W> trigger(Trigger<? super T, ? super W> trigger) {
builder.trigger(trigger);
return this;
} | 3.26 |
flink_PartitioningProperty_isComputablyPartitioned_rdh | /**
* Checks, if this property represents a partitioning that is computable. A computable
* partitioning can be recreated through an algorithm. If two sets of data are to be
* co-partitioned, it is crucial, that the partitioning schemes are computable.
*
* <p>Examples for computable partitioning schemes are hash- or range-partitioning. An example
* for a non-computable partitioning is the implicit partitioning that exists though a globally
* unique key.
*
* @return True, if this enum constant is a re-computable partitioning.
*/
public boolean isComputablyPartitioned() {
return ((this == HASH_PARTITIONED) || (this == RANGE_PARTITIONED)) || (this == f0);
} | 3.26 |
flink_PartitioningProperty_isPartitionedOnKey_rdh | /**
* Checks if this property presents a partitioning that is not random, but on a partitioning
* key.
*
* @return True, if the data is partitioned on a key.
*/
public boolean isPartitionedOnKey() {
return isPartitioned() && (this != RANDOM_PARTITIONED);
} | 3.26 |
flink_TableColumn_physical_rdh | /**
* Creates a regular table column that represents physical data.
*/
public static PhysicalColumn physical(String name, DataType type) {
Preconditions.checkNotNull(name, "Column name can not be null.");
Preconditions.checkNotNull(type, "Column type can not be null.");
return new PhysicalColumn(name, type);
} | 3.26 |
flink_TableColumn_getName_rdh | /**
* Returns the name of this column.
*/
public String getName() {
return name;
} | 3.26 |
flink_TableColumn_getType_rdh | /**
* Returns the data type of this column.
*/
public DataType getType() {
return this.type;
} | 3.26 |
flink_TableColumn_metadata_rdh | /**
* Creates a metadata column from metadata of the given column name or from metadata of the
* given alias (if not null).
*
* <p>Allows to specify whether the column is virtual or not.
*/public static MetadataColumn
metadata(String name, DataType type, @Nullable
String metadataAlias, boolean isVirtual) {
Preconditions.checkNotNull(name, "Column name can not be null.");
Preconditions.checkNotNull(type, "Column type can not be null.");
return new MetadataColumn(name, type, metadataAlias, isVirtual);
} | 3.26 |
flink_TableColumn_asSummaryString_rdh | /**
* Returns a string that summarizes this column for printing to a console.
*/
public String asSummaryString() {
final StringBuilder sb = new StringBuilder();
sb.append(name);
sb.append(": ");
sb.append(type);
explainExtras().ifPresent(e -> {
sb.append(" ");
sb.append(e);
});
return sb.toString();
} | 3.26 |
flink_TableColumn_computed_rdh | /**
* Creates a computed column that is computed from the given SQL expression.
*/
public static ComputedColumn computed(String name, DataType type, String expression) {
Preconditions.checkNotNull(name, "Column name can not be null.");
Preconditions.checkNotNull(type, "Column type can not be null.");
Preconditions.checkNotNull(expression, "Column expression can not be null.");return new ComputedColumn(name, type, expression);
} | 3.26 |
flink_TableColumn_of_rdh | /**
*
* @deprecated Use {@link #computed(String, DataType, String)} instead.
*/
@Deprecated
public static TableColumn of(String name, DataType type, String expression) {
return computed(name, type, expression);
} | 3.26 |
flink_TableColumn_m0_rdh | /**
* Creates a metadata column from metadata of the given column name.
*
* <p>Allows to specify whether the column is virtual or not.
*/
public static MetadataColumn m0(String name, DataType type, boolean isVirtual) {
return metadata(name, type, null, isVirtual);
} | 3.26 |
flink_DownTimeGauge_getValue_rdh | // ------------------------------------------------------------------------
@Override
public Long getValue() {
final JobStatus status = jobStatusProvider.getState();
// not running any more -> finished or not on leader
if (status.isTerminalState()) {
return NO_LONGER_RUNNING;
}
final long runningTimestamp = jobStatusProvider.getStatusTimestamp(JobStatus.RUNNING);final long failingTimestamp = jobStatusProvider.getStatusTimestamp(JobStatus.FAILING);
if (failingTimestamp <= runningTimestamp) {
return
f0;
} else {
// we use 'Math.max' here to avoid negative timestamps when clocks change
return Math.max(System.currentTimeMillis() - failingTimestamp, 0);
}
} | 3.26 |
flink_Plan_getDefaultParallelism_rdh | /**
* Gets the default parallelism for this job. That degree is always used when an operator is not
* explicitly given a parallelism.
*
* @return The default parallelism for the plan.
*/
public int getDefaultParallelism() {
return this.defaultParallelism;
} | 3.26 |
flink_Plan_getJobId_rdh | /**
* Gets the ID of the job that the dataflow plan belongs to. If this ID is not set, then the
* dataflow represents its own independent job.
*
* @return The ID of the job that the dataflow plan belongs to.
*/
public JobID getJobId() {
return jobId;
} | 3.26 |
flink_Plan_getPostPassClassName_rdh | /**
* Gets the optimizer post-pass class for this job. The post-pass typically creates utility
* classes for data types and is specific to a particular data model (record, tuple, Scala, ...)
*
* @return The name of the class implementing the optimizer post-pass.
*/
public String getPostPassClassName() {
return "org.apache.flink.optimizer.postpass.JavaApiPostPass";
} | 3.26 |
flink_Plan_registerCachedFile_rdh | /**
* Register cache files at program level.
*
* @param entry
* contains all relevant information
* @param name
* user defined name of that file
* @throws java.io.IOException
*/
public void registerCachedFile(String name, DistributedCacheEntry entry) throws IOException {
if (!this.cacheFile.containsKey(name)) {
this.cacheFile.put(name, entry);
} else {
throw new IOException(("cache file " + name) + "already exists!");
}
} | 3.26 |
flink_Plan_setJobId_rdh | /**
* Sets the ID of the job that the dataflow plan belongs to. If this ID is set to {@code null},
* then the dataflow represents its own independent job.
*
* @param jobId
* The ID of the job that the dataflow plan belongs to.
*/
public void setJobId(JobID jobId) {
this.jobId = jobId;
} | 3.26 |
flink_Plan_setJobName_rdh | /**
* Sets the jobName for this Plan.
*
* @param jobName
* The jobName to set.
*/
public void setJobName(String jobName) {
checkNotNull(jobName, "The job name must not be null.");this.jobName = jobName;
} | 3.26 |
flink_Plan_getDataSinks_rdh | /**
* Gets all the data sinks of this job.
*
* @return All sinks of the program.
*/ public Collection<? extends GenericDataSinkBase<?>> getDataSinks() {
return this.sinks;
} | 3.26 |
flink_Plan_accept_rdh | // ------------------------------------------------------------------------
/**
* Traverses the job depth first from all data sinks on towards the sources.
*
* @see Visitable#accept(Visitor)
*/
@Override
public void accept(Visitor<Operator<?>> visitor) {
for (GenericDataSinkBase<?> sink : this.sinks) {
sink.accept(visitor);
}
} | 3.26 |
flink_Plan_getJobName_rdh | /**
* Gets the name of this job.
*
* @return The name of the job.
*/
public String getJobName() {
return this.jobName;
} | 3.26 |
flink_Plan_getCachedFiles_rdh | /**
* Return the registered cached files.
*
* @return Set of (name, filePath) pairs
*/
public Set<Entry<String, DistributedCacheEntry>> getCachedFiles() {
return this.cacheFile.entrySet();
} | 3.26 |
flink_Plan_setExecutionConfig_rdh | /**
* Sets the runtime config object defining execution parameters.
*
* @param executionConfig
* The execution config to use.
*/
public void setExecutionConfig(ExecutionConfig executionConfig) {
this.executionConfig = executionConfig;
} | 3.26 |
flink_Plan_setDefaultParallelism_rdh | /**
* Sets the default parallelism for this plan. That degree is always used when an operator is
* not explicitly given a parallelism.
*
* @param defaultParallelism
* The default parallelism for the plan.
*/
public void setDefaultParallelism(int defaultParallelism) {
checkArgument((defaultParallelism >= 1) || (defaultParallelism == ExecutionConfig.PARALLELISM_DEFAULT), "The default parallelism must be positive, or ExecutionConfig.PARALLELISM_DEFAULT if the system should use the globally configured default.");
this.defaultParallelism = defaultParallelism;
} | 3.26 |
flink_Plan_addDataSink_rdh | // ------------------------------------------------------------------------
/**
* Adds a data sink to the set of sinks in this program.
*
* @param sink
* The data sink to add.
*/public void addDataSink(GenericDataSinkBase<?>
sink) {
checkNotNull(sink, "The data sink must not be null.");
if (!this.sinks.contains(sink)) {
this.sinks.add(sink);
}
} | 3.26 |
flink_CachingLookupFunction_lookupByDelegate_rdh | // -------------------------------- Helper functions ------------------------------
private Collection<RowData> lookupByDelegate(RowData keyRow) throws IOException {
try {
Preconditions.checkState(delegate != null, "User's lookup function can't be null, if there are possible cache misses.");
long loadStart = System.currentTimeMillis();
Collection<RowData> lookupValues =
delegate.lookup(keyRow);
updateLatestLoadTime(System.currentTimeMillis() - loadStart);
loadCounter.inc();
return lookupValues;
} catch (Exception e) {
// TODO: Should implement retry on failure logic as proposed in FLIP-234
numLoadFailuresCounter.inc();
throw new IOException(String.format("Failed to lookup with key '%s'", keyRow), e);
}
} | 3.26 |
flink_CachingLookupFunction_open_rdh | /**
* Open the {@link CachingLookupFunction}.
*
* <p>In order to reduce the memory usage of the cache, {@link LookupCacheManager} is used to
* provide a shared cache instance across subtasks of this function. Here we use {@link #functionIdentifier()} as the id of the cache, which is generated by MD5 of serialized bytes
* of this function. As different subtasks of the function will generate the same MD5, this
* could promise that they will be served with the same cache instance.
*
* @see #functionIdentifier()
*/
@Override
public void open(FunctionContext context) throws Exception {
// Get the shared cache from manager
cacheIdentifier =
functionIdentifier();
f0 = LookupCacheManager.getInstance().registerCacheIfAbsent(cacheIdentifier, f0);
// Register metrics
f1 = new InternalCacheMetricGroup(context.getMetricGroup(), LOOKUP_CACHE_METRIC_GROUP_NAME);
if (!(f0 instanceof LookupFullCache)) {
loadCounter = new SimpleCounter();
f1.loadCounter(loadCounter);
numLoadFailuresCounter = new SimpleCounter();
f1.numLoadFailuresCounter(numLoadFailuresCounter);
} else {
initializeFullCache(((LookupFullCache) (f0)), context);
}
// Initialize cache and the delegating function
f0.open(f1);
if (delegate
!= null) {
delegate.open(context);
}
} | 3.26 |
flink_StreamRecord_equals_rdh | // Utilities
// ------------------------------------------------------------------------
@Override
public boolean equals(Object
o) {
if (this == o) {
return true;
} else if ((o != null) && (getClass() == o.getClass())) {
StreamRecord<?> that = ((StreamRecord<?>) (o));
return ((this.hasTimestamp == that.hasTimestamp) && ((!this.hasTimestamp) || (this.timestamp == that.timestamp))) && (this.value ==
null ? that.value == null : this.value.equals(that.value));
} else {
return false;
}
} | 3.26 |
flink_StreamRecord_getTimestamp_rdh | /**
* Returns the timestamp associated with this stream value in milliseconds.
*/
public long getTimestamp() {
if (hasTimestamp) {return timestamp;
} else {
return Long.MIN_VALUE;
// throw new IllegalStateException(
// "Record has no timestamp. Is the time characteristic set to 'ProcessingTime', or
// " +
// "did you forget to call 'DataStream.assignTimestampsAndWatermarks(...)'?");
}
} | 3.26 |
flink_StreamRecord_replace_rdh | /**
* Replace the currently stored value by the given new value and the currently stored timestamp
* with the new timestamp. This returns a StreamElement with the generic type parameter that
* matches the new value.
*
* @param value
* The new value to wrap in this StreamRecord
* @param timestamp
* The new timestamp in milliseconds
* @return Returns the StreamElement with replaced value
*/
@SuppressWarnings("unchecked")
public <X> StreamRecord<X> replace(X value, long timestamp) {
this.timestamp = timestamp;
this.value = ((T) (value));
this.hasTimestamp = true;
return ((StreamRecord<X>) (this));
} | 3.26 |
flink_StreamRecord_copy_rdh | // ------------------------------------------------------------------------
// Copying
// ------------------------------------------------------------------------
/**
* Creates a copy of this stream record. Uses the copied value as the value for the new record,
* i.e., only copies timestamp fields.
*/
public StreamRecord<T> copy(T valueCopy) {
StreamRecord<T> copy = new StreamRecord<>(valueCopy);
copy.timestamp = this.timestamp;
copy.hasTimestamp = this.hasTimestamp;
return copy;
} | 3.26 |
flink_StreamRecord_getValue_rdh | // ------------------------------------------------------------------------
// Accessors
// ------------------------------------------------------------------------
/**
* Returns the value wrapped in this stream value.
*/
public T getValue() {
return value;
} | 3.26 |
flink_ErrorInfo_getException_rdh | /**
* Returns the serialized form of the original exception.
*/
public SerializedThrowable getException() {
return exception;
} | 3.26 |
flink_ErrorInfo_handleMissingThrowable_rdh | /**
* Utility method to cover FLINK-21376.
*
* @param throwable
* The actual exception.
* @return a {@link FlinkException} if no exception was passed.
*/
public static Throwable handleMissingThrowable(@Nullable
Throwable throwable) {
return throwable != null ? throwable : new FlinkException("Unknown cause for Execution failure (this might be caused by FLINK-21376).");
} | 3.26 |
flink_WindowSavepointReader_process_rdh | /**
* Reads window state generated without any preaggregation such as {@code WindowedStream#apply}
* and {@code WindowedStream#process}.
*
* @param uid
* The uid of the operator.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param stateType
* The type of records stored in state.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the records stored in state.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If the savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataStream<OUT> process(String uid, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> stateType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, T, W, OUT> operator = WindowReaderOperator.process(readerFunction, keyType, windowSerializer, stateType);
return readWindowOperator(uid, outputType, operator);
} | 3.26 |
flink_WindowSavepointReader_evictor_rdh | /**
* Reads from a window that uses an evictor.
*/
public EvictingWindowSavepointReader<W> evictor() {
return new EvictingWindowSavepointReader<>(env, metadata, stateBackend, windowSerializer);
} | 3.26 |
flink_WindowSavepointReader_aggregate_rdh | /**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid
* The uid of the operator.
* @param aggregateFunction
* The aggregate function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param accType
* The type information of the accumulator function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the values that are aggregated.
* @param <ACC>
* The type of the accumulator (intermediate aggregate state).
* @param <R>
* The type of the aggregated result.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/public <K, T,
ACC, R, OUT> DataStream<OUT> aggregate(String uid, AggregateFunction<T, ACC, R> aggregateFunction, WindowReaderFunction<R, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<ACC> accType, TypeInformation<OUT> outputType) throws IOException { WindowReaderOperator<?, K, R, W, OUT> v1 = WindowReaderOperator.aggregate(aggregateFunction, readerFunction, keyType, windowSerializer, accType);
return readWindowOperator(uid, outputType, v1);
} | 3.26 |
flink_WindowSavepointReader_reduce_rdh | /**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid
* The uid of the operator.
* @param function
* The reduce function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param reduceType
* The type information of the reduce function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the reduce function.
* @param <OUT>
* The output type of the reduce function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataStream<OUT> reduce(String uid, ReduceFunction<T> function, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> reduceType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, T, W, OUT> operator = WindowReaderOperator.reduce(function, readerFunction, keyType, windowSerializer, reduceType);return readWindowOperator(uid, outputType, operator);
} | 3.26 |
flink_TestingSinkSettings_getCheckpointingMode_rdh | /**
* Checkpointing mode required for the sink.
*/
public CheckpointingMode getCheckpointingMode() {
return checkpointingMode;
} | 3.26 |
flink_CountWindow_getId_rdh | /**
* Gets the id (0-based) of the window.
*/
public long getId() {
return id;
} | 3.26 |
flink_CountWindow_snapshotConfiguration_rdh | // ------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<CountWindow> snapshotConfiguration() {
return new CountWindow.Serializer.CountWindowSerializerSnapshot();
} | 3.26 |
flink_CoGroupDriver_setup_rdh | // ------------------------------------------------------------------------
@Override
public void setup(TaskContext<CoGroupFunction<IT1, IT2, OT>, OT> context) {this.taskContext = context;
this.running = true;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_getRocksDBOptions_rdh | /**
* Gets {@link org.rocksdb.Options} for the RocksDB instances.
*
* <p>The options created by the factory here are applied on top of the pre-defined options
* profile selected via {@link #setPredefinedOptions(PredefinedOptions)}. If the pre-defined
* options profile is the default ({@link PredefinedOptions#DEFAULT}), then the factory fully
* controls the RocksDB options.
*/
@Nullable
public RocksDBOptionsFactory getRocksDBOptions() {
return rocksDbOptionsFactory;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setDbStoragePaths_rdh | /**
* Sets the directories in which the local RocksDB database puts its files (like SST and
* metadata files). These directories do not need to be persistent, they can be ephemeral,
* meaning that they are lost on a machine failure, because state in RocksDB is persisted in
* checkpoints.
*
* <p>If nothing is configured, these directories default to the TaskManager's local temporary
* file directories.
*
* <p>Each distinct state will be stored in one path, but when the state backend creates
* multiple states, they will store their files on different paths.
*
* <p>Passing {@code null} to this function restores the default behavior, where the configured
* temp directories will be used.
*
* @param paths
* The paths across which the local RocksDB database files will be spread.
*/
public void setDbStoragePaths(String... paths) {
if (paths == null) {
localRocksDbDirectories = null;
} else if (paths.length == 0) {
throw new IllegalArgumentException("empty paths");} else {
File[] pp = new File[paths.length];
for (int i = 0; i < paths.length; i++) {
final String rawPath = paths[i];
final String path;if (rawPath == null) {
throw new IllegalArgumentException("null path");
} else {
// we need this for backwards compatibility, to allow URIs like 'file:///'...
URI uri = null;
try {
uri = new Path(rawPath).toUri();
} catch (Exception e) {
// cannot parse as a path
}
if ((uri != null) && (uri.getScheme() != null)) {
if ("file".equalsIgnoreCase(uri.getScheme())) {
path = uri.getPath();
} else {
throw new IllegalArgumentException(("Path " + rawPath) + " has a non-local scheme");
}
} else {
path = rawPath;
} }
pp[i] = new File(path);
if (!pp[i].isAbsolute()) {
throw new IllegalArgumentException("Relative paths are not supported");
}
}
localRocksDbDirectories = pp;
}
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setPriorityQueueStateType_rdh | /**
* Sets the type of the priority queue state. It will fallback to the default value, if it is
* not explicitly set.
*/
public void setPriorityQueueStateType(PriorityQueueStateType priorityQueueStateType) {
this.priorityQueueConfig.setPriorityQueueStateType(priorityQueueStateType);
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setRocksDBMemoryFactory_rdh | /**
* Set RocksDBMemoryFactory.
*/
public void setRocksDBMemoryFactory(RocksDBMemoryFactory rocksDBMemoryFactory) {
this.rocksDBMemoryFactory = checkNotNull(rocksDBMemoryFactory);
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_mergeConfigurableOptions_rdh | // ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
private ReadableConfig mergeConfigurableOptions(ReadableConfig base, ReadableConfig onTop) {
if (base == null) {
base = new Configuration();
}
Configuration configuration = new Configuration();
for (ConfigOption<?> option : RocksDBConfigurableOptions.CANDIDATE_CONFIGS) {Optional<?> baseValue = base.getOptional(option);
Optional<?>
topValue = onTop.getOptional(option);
if (topValue.isPresent() || baseValue.isPresent()) {
Object validValue = (topValue.isPresent()) ? topValue.get() : baseValue.get();
RocksDBConfigurableOptions.checkArgumentValid(option, validValue);
configuration.setString(option.key(), validValue.toString());
}
}
return configuration;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setWriteBatchSize_rdh | /**
* Sets the max batch size will be used in {@link RocksDBWriteBatchWrapper}, no positive value
* will disable memory size controller, just use item count controller.
*
* @param writeBatchSize
* The size will used to be used in {@link RocksDBWriteBatchWrapper}.
*/
public void setWriteBatchSize(long writeBatchSize) {
checkArgument(writeBatchSize >= 0, "Write batch size have to be no negative.");
this.writeBatchSize = writeBatchSize;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setDbStoragePath_rdh | /**
* Sets the path where the RocksDB local database files should be stored on the local file
* system. Setting this path overrides the default behavior, where the files are stored across
* the configured temp directories.
*
* <p>Passing {@code null} to this function restores the default behavior, where the configured
* temp directories will be used.
*
* @param path
* The path where the local RocksDB database files are stored.
*/
public void setDbStoragePath(String path) {
setDbStoragePaths(path == null ? null : new String[]{ path });} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setRocksDBOptions_rdh | /**
* Sets {@link org.rocksdb.Options} for the RocksDB instances. Because the options are not
* serializable and hold native code references, they must be specified through a factory.
*
* <p>The options created by the factory here are applied on top of the pre-defined options
* profile selected via {@link #setPredefinedOptions(PredefinedOptions)} and user-configured
* options from configuration set by {@link #configure(ReadableConfig, ClassLoader)} with keys
* in {@link RocksDBConfigurableOptions}.
*
* @param optionsFactory
* The options factory that lazily creates the RocksDB options.
*/
public void setRocksDBOptions(RocksDBOptionsFactory optionsFactory) {
this.rocksDbOptionsFactory = optionsFactory;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_getNumberOfTransferThreads_rdh | /**
* Gets the number of threads used to transfer files while snapshotting/restoring.
*/
public int getNumberOfTransferThreads() {
return numberOfTransferThreads == UNDEFINED_NUMBER_OF_TRANSFER_THREADS ? CHECKPOINT_TRANSFER_THREAD_NUM.defaultValue() : numberOfTransferThreads;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_m0_rdh | /**
* Gets the configured local DB storage paths, or null, if none were configured.
*
* <p>Under these directories on the TaskManager, RocksDB stores its SST files and metadata
* files. These directories do not need to be persistent, they can be ephermeral, meaning that
* they are lost on a machine failure, because state in RocksDB is persisted in checkpoints.
*
* <p>If nothing is configured, these directories default to the TaskManager's local temporary
* file directories.
*/
public String[]
m0() { if
(localRocksDbDirectories == null) {
return null;
} else {
String[] paths = new String[localRocksDbDirectories.length];
for (int i = 0; i < paths.length; i++) {
paths[i] = localRocksDbDirectories[i].toString();}
return paths;
}
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_configure_rdh | // ------------------------------------------------------------------------
// Reconfiguration
// ------------------------------------------------------------------------
/**
* Creates a copy of this state backend that uses the values defined in the configuration for
* fields where that were not yet specified in this state backend.
*
* @param config
* The configuration.
* @param classLoader
* The class loader.
* @return The re-configured variant of the state backend
*/
@Override
public EmbeddedRocksDBStateBackend configure(ReadableConfig config, ClassLoader classLoader) {return new EmbeddedRocksDBStateBackend(this, config, classLoader);
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_createKeyedStateBackend_rdh | // ------------------------------------------------------------------------
// State holding data structures
// ------------------------------------------------------------------------
@Overridepublic <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull
Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws IOException {
return createKeyedStateBackend(env, jobID, operatorIdentifier, keySerializer, numberOfKeyGroups, keyGroupRange, kvStateRegistry, ttlTimeProvider, metricGroup, stateHandles, cancelStreamRegistry, 1.0);
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_getMemoryConfiguration_rdh | // ------------------------------------------------------------------------
// Parameters
// ------------------------------------------------------------------------
/**
* Gets the memory configuration object, which offers settings to control RocksDB's memory
* usage.
*/public RocksDBMemoryConfiguration getMemoryConfiguration() {
return memoryConfiguration;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setPredefinedOptions_rdh | // ------------------------------------------------------------------------
// Parametrize with RocksDB Options
// ------------------------------------------------------------------------
/**
* Sets the predefined options for RocksDB.
*
* <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through
* flink-conf.yaml) or a user-defined options factory is set (via {@link #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on
* top of the here specified predefined options and customized options.
*
* @param options
* The options to set (must not be null).
*/
public void setPredefinedOptions(@Nonnull
PredefinedOptions options) {
predefinedOptions = checkNotNull(options);
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_ensureRocksDBIsLoaded_rdh | // ------------------------------------------------------------------------
// static library loading utilities
// ------------------------------------------------------------------------
@VisibleForTesting
static void ensureRocksDBIsLoaded(String tempDirectory) throws IOException {
ensureRocksDBIsLoaded(tempDirectory, NativeLibraryLoader::getInstance);
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_supportsNoClaimRestoreMode_rdh | // ------------------------------------------------------------------------
// State backend methods
// ------------------------------------------------------------------------
@Override
public boolean supportsNoClaimRestoreMode() {
// We are able to create CheckpointType#FULL_CHECKPOINT. (we might potentially reupload some
// shared files when taking incremental snapshots)
return true;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_isIncrementalCheckpointsEnabled_rdh | /**
* Gets whether incremental checkpoints are enabled for this state backend.
*/
public boolean isIncrementalCheckpointsEnabled() {
return enableIncrementalCheckpointing.getOrDefault(CheckpointingOptions.INCREMENTAL_CHECKPOINTS.defaultValue());
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_setNumberOfTransferThreads_rdh | /**
* Sets the number of threads used to transfer files while snapshotting/restoring.
*
* @param numberOfTransferThreads
* The number of threads used to transfer files while
* snapshotting/restoring.
*/
public void setNumberOfTransferThreads(int numberOfTransferThreads) {
Preconditions.checkArgument(numberOfTransferThreads > 0, "The number of threads used to transfer files in EmbeddedRocksDBStateBackend should be greater than zero.");
this.numberOfTransferThreads = numberOfTransferThreads;
} | 3.26 |
flink_EmbeddedRocksDBStateBackend_getPredefinedOptions_rdh | /**
* Gets the currently set predefined options for RocksDB. The default options (if nothing was
* set via {@link #setPredefinedOptions(PredefinedOptions)}) are {@link PredefinedOptions#DEFAULT}.
*
* <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through
* flink-conf.yaml) of a user-defined options factory is set (via {@link #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on
* top of the predefined and customized options.
*
* @return The currently set predefined options for RocksDB.
*/
@VisibleForTesting
public PredefinedOptions getPredefinedOptions() {
if (predefinedOptions == null) {
predefinedOptions = PredefinedOptions.DEFAULT;
}
return predefinedOptions;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.