name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_StreamExecutionEnvironment_enableCheckpointing_rdh
|
/**
* Enables checkpointing for the streaming job. The distributed state of the streaming dataflow
* will be periodically snapshotted. In case of a failure, the streaming dataflow will be
* restarted from the latest completed checkpoint. This method selects {@link CheckpointingMode#EXACTLY_ONCE} guarantees.
*
* <p>The job draws checkpoints periodically, in the default interval. The state will be stored
* in the configured state backend.
*
* <p>NOTE: Checkpointing iterative streaming dataflows is not properly supported at the moment.
* For that reason, iterative jobs will not be started if used with enabled checkpointing. To
* override this mechanism, use the {@link #enableCheckpointing(long, CheckpointingMode,
* boolean)} method.
*
* @deprecated Use {@link #enableCheckpointing(long)} instead.
*/
@Deprecated
@PublicEvolving
public StreamExecutionEnvironment enableCheckpointing() {
checkpointCfg.setCheckpointInterval(500);
return this;}
| 3.26 |
flink_StreamExecutionEnvironment_fromSource_rdh
|
/**
* Adds a data {@link Source} to the environment to get a {@link DataStream}.
*
* <p>The result will be either a bounded data stream (that can be processed in a batch way) or
* an unbounded data stream (that must be processed in a streaming way), based on the
* boundedness property of the source, as defined by {@link Source#getBoundedness()}.
*
* <p>This method takes an explicit type information for the produced data stream, so that
* callers can define directly what type/serializer will be used for the produced stream. For
* sources that describe their produced type, the method {@link #fromSource(Source,
* WatermarkStrategy, String)} can be used to avoid specifying the produced type redundantly.
*
* @param source
* the user defined source
* @param sourceName
* Name of the data source
* @param <OUT>
* type of the returned stream
* @param typeInfo
* the user defined type information for the stream
* @return the data stream constructed
*/
@Experimental
public <OUT> DataStreamSource<OUT> fromSource(Source<OUT, ?, ?> source, WatermarkStrategy<OUT> timestampsAndWatermarks, String sourceName, TypeInformation<OUT> typeInfo) {
final TypeInformation<OUT> resolvedTypeInfo = getTypeInfo(source, sourceName, Source.class, typeInfo);
return new DataStreamSource<>(this, checkNotNull(source, "source"), checkNotNull(timestampsAndWatermarks, "timestampsAndWatermarks"), checkNotNull(resolvedTypeInfo), checkNotNull(sourceName));
}
| 3.26 |
flink_StreamExecutionEnvironment_socketTextStream_rdh
|
/**
* Creates a new data stream that contains the strings received infinitely from a socket.
* Received strings are decoded by the system's default character set, using"\n" as delimiter.
* The reader is terminated immediately when the socket is down.
*
* @param hostname
* The host name which a server socket binds
* @param port
* The port number which a server socket binds. A port number of 0 means that the
* port number is automatically allocated.
* @return A data stream containing the strings received from the socket
*/
@PublicEvolving
public DataStreamSource<String> socketTextStream(String hostname, int port) {
return socketTextStream(hostname, port, "\n");
}
| 3.26 |
flink_StreamExecutionEnvironment_clean_rdh
|
/**
* Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning is
* not disabled in the {@link org.apache.flink.api.common.ExecutionConfig}
*/
@Internal
public <F> F clean(F f) {
if (getConfig().isClosureCleanerEnabled()) {
ClosureCleaner.clean(f, getConfig().getClosureCleanerLevel(), true);
}
ClosureCleaner.ensureSerializable(f);
return f;
}
| 3.26 |
flink_StreamExecutionEnvironment_getConfiguration_rdh
|
/**
* Gives read-only access to the underlying configuration of this environment.
*
* <p>Note that the returned configuration might not be complete. It only contains options that
* have initialized the environment via {@link #StreamExecutionEnvironment(Configuration)} or
* options that are not represented in dedicated configuration classes such as {@link ExecutionConfig} or {@link CheckpointConfig}.
*
* <p>Use {@link #configure(ReadableConfig, ClassLoader)} to set options that are specific to
* this environment.
*/
@Internal
public ReadableConfig getConfiguration() {
// Note to implementers:
// In theory, you can cast the return value of this method to Configuration and perform
// mutations. In practice, this could cause side effects. A better approach is to implement
// the ReadableConfig interface and create a layered configuration.
// For example:
// TableConfig implements ReadableConfig {
// underlyingLayer ReadableConfig
// thisConfigLayer Configuration
//
// get(configOption) {
// return thisConfigLayer
// .getOptional(configOption)
// .orElseGet(underlyingLayer.get(configOption))
// }
// }
return configuration;
}
| 3.26 |
flink_StreamExecutionEnvironment_setMaxParallelism_rdh
|
/**
* Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
* is Short.MAX_VALUE + 1.
*
* <p>The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
* defines the number of key groups used for partitioned state.
*
* @param maxParallelism
* Maximum degree of parallelism to be used for the program., with {@code 0 < maxParallelism <= 2^15}.
*/
public StreamExecutionEnvironment setMaxParallelism(int maxParallelism) {
Preconditions.checkArgument((maxParallelism > 0) && (maxParallelism <= KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM), (("maxParallelism is out of bounds 0 < maxParallelism <= " + KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM) + ". Found: ") + maxParallelism);
config.setMaxParallelism(maxParallelism);return this;
}
| 3.26 |
flink_StreamExecutionEnvironment_addOperator_rdh
|
/**
* Adds an operator to the list of operators that should be executed when calling {@link #execute}.
*
* <p>When calling {@link #execute()} only the operators that where previously added to the list
* are executed.
*
* <p>This is not meant to be used by users. The API methods that create operators must call
* this method.
*/
@Internal
public void addOperator(Transformation<?> transformation) {
Preconditions.checkNotNull(transformation, "transformation must not be null.");
this.transformations.add(transformation);
}
| 3.26 |
flink_StreamExecutionEnvironment_m3_rdh
|
/**
* Creates a {@link RemoteStreamEnvironment}. The remote environment sends (parts of) the
* program to a cluster for execution. Note that all file paths used in the program must be
* accessible from the cluster. The execution will use the specified parallelism.
*
* @param host
* The host name or address of the master (JobManager), where the program should be
* executed.
* @param port
* The port of the master (JobManager), where the program should be executed.
* @param clientConfig
* The configuration used by the client that connects to the remote cluster.
* @param jarFiles
* The JAR files with code that needs to be shipped to the cluster. If the
* program uses user-defined functions, user-defined input formats, or any libraries, those
* must be provided in the JAR files.
* @return A remote environment that executes the program on a cluster.
*/
public static StreamExecutionEnvironment m3(String host, int port, Configuration clientConfig, String...
jarFiles) {
return new RemoteStreamEnvironment(host, port, clientConfig,
jarFiles);
}
| 3.26 |
flink_StreamExecutionEnvironment_getCachedFiles_rdh
|
/**
* Get the list of cached files that were registered for distribution among the task managers.
*/
public List<Tuple2<String, DistributedCache.DistributedCacheEntry>> getCachedFiles() {
return cacheFile;
}
| 3.26 |
flink_StreamExecutionEnvironment_setDefaultSavepointDirectory_rdh
|
/**
* Sets the default savepoint directory, where savepoints will be written to if no is explicitly
* provided when triggered.
*
* @return This StreamExecutionEnvironment itself, to allow chaining of function calls.
* @see #getDefaultSavepointDirectory()
*/
@PublicEvolving
public StreamExecutionEnvironment
setDefaultSavepointDirectory(Path savepointDirectory) {
this.defaultSavepointDirectory = Preconditions.checkNotNull(savepointDirectory);
return this;
}
| 3.26 |
flink_StreamExecutionEnvironment_setDefaultLocalParallelism_rdh
|
/**
* Sets the default parallelism that will be used for the local execution environment created by
* {@link #createLocalEnvironment()}.
*
* @param parallelism
* The parallelism to use as the default local parallelism.
*/
@PublicEvolving
public static void setDefaultLocalParallelism(int parallelism) {
defaultLocalParallelism = parallelism;
}
| 3.26 |
flink_StreamExecutionEnvironment_fromData_rdh
|
/**
* Creates a new data stream that contains the given elements. The framework will determine the
* type according to the based type user supplied. The elements should be the same or be the
* subclass to the based type. The sequence of elements must not be empty.
*
* <p>NOTE: This creates a non-parallel data stream source by default (parallelism of one).
* Adjustment of parallelism is supported via {@code setParallelism()} on the result.
*
* @param type
* The based class type in the collection.
* @param data
* The array of elements to create the data stream from.
* @param <OUT>
* The type of the returned data stream
* @return The data stream representing the given array of elements
*/
@SafeVarargs
public final <OUT> DataStreamSource<OUT> fromData(Class<OUT> type, OUT... data) {
if (data.length == 0) {throw new IllegalArgumentException("fromElements needs at least one element as argument");
}
TypeInformation<OUT> typeInfo;
try {
typeInfo = TypeExtractor.getForClass(type);
} catch (Exception e) {
throw new RuntimeException((("Could not create TypeInformation for type " + type.getName()) + "; please specify the TypeInformation manually via ") + "StreamExecutionEnvironment#fromData(Collection, TypeInformation)", e);
}
return fromData(Arrays.asList(data), typeInfo);
}
/**
* Creates a new data stream that contains a sequence of numbers. This is a parallel source, if
* you manually set the parallelism to {@code 1} (using {@link org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator#setParallelism(int)})
* the generated sequence of elements is in order.
*
* @param from
* The number to start at (inclusive)
* @param to
* The number to stop at (inclusive)
* @return A data stream, containing all number in the [from, to] interval
* @deprecated Use {@link #fromSequence(long, long)} instead to create a new data stream that
contains {@link org.apache.flink.api.connector.source.lib.NumberSequenceSource}
| 3.26 |
flink_StreamExecutionEnvironment_createRemoteEnvironment_rdh
|
/**
* Creates a {@link RemoteStreamEnvironment}. The remote environment sends (parts of) the
* program to a cluster for execution. Note that all file paths used in the program must be
* accessible from the cluster. The execution will use the specified parallelism.
*
* @param host
* The host name or address of the master (JobManager), where the program should be
* executed.
* @param port
* The port of the master (JobManager), where the program should be executed.
* @param parallelism
* The parallelism to use during the execution.
* @param jarFiles
* The JAR files with code that needs to be shipped to the cluster. If the
* program uses user-defined functions, user-defined input formats, or any libraries, those
* must be provided in the JAR files.
* @return A remote environment that executes the program on a cluster.
*/
public static StreamExecutionEnvironment createRemoteEnvironment(String host, int port, int parallelism, String... jarFiles) {
RemoteStreamEnvironment env = new RemoteStreamEnvironment(host, port, jarFiles);
env.setParallelism(parallelism);
return env;
}
| 3.26 |
flink_StreamExecutionEnvironment_setStateBackend_rdh
|
/**
* Sets the state backend that describes how to store operator. It defines the data structures
* that hold state during execution (for example hash tables, RocksDB, or other data stores).
*
* <p>State managed by the state backend includes both keyed state that is accessible on {@link org.apache.flink.streaming.api.datastream.KeyedStream keyed streams}, as well as state
* maintained directly by the user code that implements {@link org.apache.flink.streaming.api.checkpoint.CheckpointedFunction CheckpointedFunction}.
*
* <p>The {@link org.apache.flink.runtime.state.hashmap.HashMapStateBackend} maintains state in
* heap memory, as objects. It is lightweight without extra dependencies, but is limited to JVM
* heap memory.
*
* <p>In contrast, the {@code EmbeddedRocksDBStateBackend} stores its state in an embedded
* {@code RocksDB} instance. This state backend can store very large state that exceeds memory
* and spills to local disk. All key/value state (including windows) is stored in the key/value
* index of RocksDB.
*
* <p>In both cases, fault tolerance is managed via the jobs {@link org.apache.flink.runtime.state.CheckpointStorage} which configures how and where state
* backends persist during a checkpoint.
*
* @return This StreamExecutionEnvironment itself, to allow chaining of function calls.
* @see #getStateBackend()
* @see CheckpointConfig#setCheckpointStorage( org.apache.flink.runtime.state.CheckpointStorage)
*/
@PublicEvolving
public StreamExecutionEnvironment setStateBackend(StateBackend backend) {
this.defaultStateBackend
= Preconditions.checkNotNull(backend);
return this;
}
| 3.26 |
flink_StreamExecutionEnvironment_isUnalignedCheckpointsEnabled_rdh
|
/**
* Returns whether unaligned checkpoints are enabled.
*/
@PublicEvolving
public boolean isUnalignedCheckpointsEnabled() {
return checkpointCfg.isUnalignedCheckpointsEnabled();
}
| 3.26 |
flink_StreamExecutionEnvironment_isChainingEnabled_rdh
|
/**
* Returns whether operator chaining is enabled.
*
* @return {@code true} if chaining is enabled, false otherwise.
*/
@PublicEvolving public boolean isChainingEnabled() {
return isChainingEnabled;
}
| 3.26 |
flink_StreamExecutionEnvironment_setBufferTimeout_rdh
|
/**
* Sets the maximum time frequency (milliseconds) for the flushing of the output buffers. By
* default the output buffers flush frequently to provide low latency and to aid smooth
* developer experience. Setting the parameter can result in three logical modes:
*
* <ul>
* <li>A positive integer triggers flushing periodically by that integer
* <li>0 triggers flushing after every record thus minimizing latency
* <li>-1 triggers flushing only when the output buffer is full thus maximizing throughput
* </ul>
*
* @param timeoutMillis
* The maximum time between two output flushes.
*/
public StreamExecutionEnvironment setBufferTimeout(long timeoutMillis) {
if (timeoutMillis < ExecutionOptions.DISABLED_NETWORK_BUFFER_TIMEOUT) {
throw new IllegalArgumentException("Timeout of buffer must be non-negative or -1");
}
this.bufferTimeout = timeoutMillis;
return this;
}
| 3.26 |
flink_StreamExecutionEnvironment_setParallelism_rdh
|
/**
* Sets the parallelism for operations executed through this environment. Setting a parallelism
* of x here will cause all operators (such as map, batchReduce) to run with x parallel
* instances. This method overrides the default parallelism for this environment. The {@link LocalStreamEnvironment} uses by default a value equal to the number of hardware contexts (CPU
* cores / threads). When executing the program via the command line client from a JAR file, the
* default degree of parallelism is the one configured for that setup.
*
* @param parallelism
* The parallelism
*/
public StreamExecutionEnvironment setParallelism(int parallelism) {
config.setParallelism(parallelism);
return this;
}
/**
* Sets the runtime execution mode for the application (see {@link RuntimeExecutionMode}). This
* is equivalent to setting the {@code execution.runtime-mode} in your application's
* configuration file.
*
* <p>We recommend users to NOT use this method but set the {@code execution.runtime-mode}
| 3.26 |
flink_StreamExecutionEnvironment_getStateBackend_rdh
|
/**
* Gets the state backend that defines how to store and checkpoint state.
*
* @see #setStateBackend(StateBackend)
*/
@PublicEvolving
public StateBackend
getStateBackend() {
return defaultStateBackend;
}
| 3.26 |
flink_StreamExecutionEnvironment_close_rdh
|
/**
* Close and clean up the execution environment. All the cached intermediate results will be
* released physically.
*/
@Override
public void close() throws Exception {
for (AbstractID id : cachedTransformations.keySet()) {
invalidateClusterDataset(id);
}
}
| 3.26 |
flink_StreamExecutionEnvironment_registerType_rdh
|
/**
* Registers the given type with the serialization stack. If the type is eventually serialized
* as a POJO, then the type is registered with the POJO serializer. If the type ends up being
* serialized with Kryo, then it will be registered at Kryo to make sure that only tags are
* written.
*
* @param type
* The class of the type to register.
*/
public void registerType(Class<?> type) {
if (type == null) {
throw new NullPointerException("Cannot register null type class.");
}
TypeInformation<?> typeInfo = TypeExtractor.createTypeInfo(type);
if (typeInfo instanceof PojoTypeInfo) {
config.registerPojoType(type);
} else {
config.registerKryoType(type);
}
}
// --------------------------------------------------------------------------------------------
// Time characteristic
// --------------------------------------------------------------------------------------------
/**
* Sets the time characteristic for all streams create from this environment, e.g., processing
* time, event time, or ingestion time.
*
* <p>If you set the characteristic to IngestionTime of EventTime this will set a default
* watermark update interval of 200 ms. If this is not applicable for your application you
* should change it using {@link ExecutionConfig#setAutoWatermarkInterval(long)}.
*
* @param characteristic
* The time characteristic.
* @deprecated In Flink 1.12 the default stream time characteristic has been changed to {@link TimeCharacteristic#EventTime}, thus you don't need to call this method for enabling
event-time support anymore. Explicitly using processing-time windows and timers works in
event-time mode. If you need to disable watermarks, please use {@link ExecutionConfig#setAutoWatermarkInterval(long)}. If you are using {@link TimeCharacteristic#IngestionTime}, please manually set an appropriate {@link WatermarkStrategy}. If you are using generic "time window" operations (for example {@link org.apache.flink.streaming.api.datastream.KeyedStream#timeWindow(org.apache.flink.streaming.api.windowing.time.Time)}
| 3.26 |
flink_StreamExecutionEnvironment_registerTypeWithKryoSerializer_rdh
|
/**
* Registers the given Serializer via its class as a serializer for the given type at the
* KryoSerializer.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/
@SuppressWarnings("rawtypes")
public void registerTypeWithKryoSerializer(Class<?> type, Class<? extends Serializer> serializerClass) {
config.registerTypeWithKryoSerializer(type, serializerClass);
}
| 3.26 |
flink_StreamExecutionEnvironment_createLocalEnvironment_rdh
|
/**
* Creates a {@link LocalStreamEnvironment}. The local execution environment will run the
* program in a multi-threaded fashion in the same JVM as the environment was created in.
*
* @param configuration
* Pass a custom configuration into the cluster
* @return A local execution environment with the specified parallelism.
*/
public static LocalStreamEnvironment createLocalEnvironment(Configuration configuration) {
if (configuration.getOptional(CoreOptions.DEFAULT_PARALLELISM).isPresent()) {
return new
LocalStreamEnvironment(configuration);
} else {
Configuration copyOfConfiguration = new Configuration();
copyOfConfiguration.addAll(configuration);
copyOfConfiguration.set(CoreOptions.DEFAULT_PARALLELISM, defaultLocalParallelism);
return new LocalStreamEnvironment(copyOfConfiguration);
}
}
| 3.26 |
flink_StreamExecutionEnvironment_fromParallelCollection_rdh
|
// private helper for passing different names
private <OUT> DataStreamSource<OUT> fromParallelCollection(SplittableIterator<OUT> iterator, TypeInformation<OUT> typeInfo, String operatorName) {
return addSource(new FromSplittableIteratorFunction<>(iterator), operatorName, typeInfo, Boundedness.BOUNDED);
}
/**
* Reads the given file line-by-line and creates a data stream that contains a string with the
* contents of each such line. The file will be read with the UTF-8 character set.
*
* <p><b>NOTES ON CHECKPOINTING: </b> The source monitors the path, creates the {@link org.apache.flink.core.fs.FileInputSplit FileInputSplits} to be processed, forwards them to
* the downstream readers to read the actual data, and exits, without waiting for the readers to
* finish reading. This implies that no more checkpoint barriers are going to be forwarded after
* the source exits, thus having no checkpoints after that point.
*
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @return The data stream that represents the data read from the given file as text lines
* @deprecated Use {@code FileSource#forRecordStreamFormat()/forBulkFileFormat()/forRecordFileFormat() instead}. An
example of reading a file using a simple {@code TextLineInputFormat}:
<pre>{@code FileSource<String> source =
FileSource.forRecordStreamFormat(
new TextLineInputFormat(), new Path("/foo/bar"))
.build();}
| 3.26 |
flink_StreamExecutionEnvironment_createLocalEnvironmentWithWebUI_rdh
|
/**
* Creates a {@link LocalStreamEnvironment} for local program execution that also starts the web
* monitoring UI.
*
* <p>The local execution environment will run the program in a multi-threaded fashion in the
* same JVM as the environment was created in. It will use the parallelism specified in the
* parameter.
*
* <p>If the configuration key 'rest.port' was set in the configuration, that particular port
* will be used for the web UI. Otherwise, the default port (8081) will be used.
*/
@PublicEvolving
public static StreamExecutionEnvironment createLocalEnvironmentWithWebUI(Configuration conf) {
checkNotNull(conf, "conf");
if (!conf.contains(RestOptions.PORT)) {
// explicitly set this option so that it's not set to 0 later
conf.setInteger(RestOptions.PORT, RestOptions.PORT.defaultValue());
}
return createLocalEnvironment(conf);
}
| 3.26 |
flink_StreamExecutionEnvironment_addDefaultKryoSerializer_rdh
|
/**
* Adds a new Kryo default serializer to the Runtime.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/public void addDefaultKryoSerializer(Class<?> type, Class<? extends Serializer<?>> serializerClass) {
config.addDefaultKryoSerializer(type, serializerClass);
}
| 3.26 |
flink_StreamExecutionEnvironment_registerJobListener_rdh
|
/**
* Register a {@link JobListener} in this environment. The {@link JobListener} will be notified
* on specific job status changed.
*/
@PublicEvolving
public void registerJobListener(JobListener jobListener) {
checkNotNull(jobListener, "JobListener cannot be null");jobListeners.add(jobListener);
}
| 3.26 |
flink_StreamExecutionEnvironment_setRestartStrategy_rdh
|
/**
* Sets the restart strategy configuration. The configuration specifies which restart strategy
* will be used for the execution graph in case of a restart.
*
* @param restartStrategyConfiguration
* Restart strategy configuration to be set
*/
@PublicEvolving
public void setRestartStrategy(RestartStrategies.RestartStrategyConfiguration restartStrategyConfiguration) {
config.setRestartStrategy(restartStrategyConfiguration);}
| 3.26 |
flink_StreamExecutionEnvironment_enableChangelogStateBackend_rdh
|
/**
* Enable the change log for current state backend. This change log allows operators to persist
* state changes in a very fine-grained manner. Currently, the change log only applies to keyed
* state, so non-keyed operator state and channel state are persisted as usual. The 'state' here
* refers to 'keyed state'. Details are as follows:
*
* <p>Stateful operators write the state changes to that log (logging the state), in addition to
* applying them to the state tables in RocksDB or the in-mem Hashtable.
*
* <p>An operator can acknowledge a checkpoint as soon as the changes in the log have reached
* the durable checkpoint storage.
*
* <p>The state tables are persisted periodically, independent of the checkpoints. We call this
* the materialization of the state on the checkpoint storage.
*
* <p>Once the state is materialized on checkpoint storage, the state changelog can be truncated
* to the corresponding point.
*
* <p>It establish a way to drastically reduce the checkpoint interval for streaming
* applications across state backends. For more details please check the FLIP-158.
*
* <p>If this method is not called explicitly, it means no preference for enabling the change
* log. Configs for change log enabling will override in different config levels
* (job/local/cluster).
*
* @param enabled
* true if enable the change log for state backend explicitly, otherwise disable
* the change log.
* @return This StreamExecutionEnvironment itself, to allow chaining of function calls.
* @see #isChangelogStateBackendEnabled()
*/
@PublicEvolving
public StreamExecutionEnvironment enableChangelogStateBackend(boolean enabled) {
this.changelogStateBackendEnabled = TernaryBoolean.fromBoolean(enabled);
return this;
}
/**
* Gets the enable status of change log for state backend.
*
* @return a {@link TernaryBoolean} for the enable status of change log for state backend. Could
be {@link TernaryBoolean#UNDEFINED} if user never specify this by calling {@link #enableChangelogStateBackend(boolean)}
| 3.26 |
flink_StreamExecutionEnvironment_execute_rdh
|
/**
* Triggers the program execution. The environment will execute all parts of the program that
* have resulted in a "sink" operation. Sink operations are for example printing results or
* forwarding them to a message queue.
*
* @param streamGraph
* the stream graph representing the transformations
* @return The result of the job execution, containing elapsed time and accumulators.
* @throws Exception
* which occurs during job execution.
*/@Internal
public JobExecutionResult execute(StreamGraph streamGraph) throws Exception {
final JobClient jobClient = executeAsync(streamGraph);
try { final JobExecutionResult jobExecutionResult;
if (configuration.getBoolean(DeploymentOptions.ATTACHED)) {
jobExecutionResult = jobClient.getJobExecutionResult().get();
} else {
jobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID());
}
jobListeners.forEach(jobListener -> jobListener.onJobExecuted(jobExecutionResult, null));
return jobExecutionResult;
} catch (Throwable t) {
// get() on the JobExecutionResult Future will throw an ExecutionException. This
// behaviour was largely not there in Flink versions before the PipelineExecutor
// refactoring so we should strip that exception.
Throwable strippedException = ExceptionUtils.stripExecutionException(t);
jobListeners.forEach(jobListener -> {
jobListener.onJobExecuted(null, strippedException);
});
ExceptionUtils.rethrowException(strippedException);
// never reached, only make javac happy
return null;
}
}
| 3.26 |
flink_StreamExecutionEnvironment_m0_rdh
|
/**
* Creates a data stream from the given non-empty collection. The type of the data stream is
* that of the elements in the collection.
*
* <p>The framework will try and determine the exact type from the collection elements. In case
* of generic elements, it may be necessary to manually supply the type information via {@link #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e. a data
* stream source with parallelism one.
*
* @param data
* The collection of elements to create the data stream from.
* @param <OUT>
* The generic type of the returned data stream.
* @return The data stream representing the given collection
*/public <OUT> DataStreamSource<OUT> m0(Collection<OUT> data) {
Preconditions.checkNotNull(data, "Collection must not be null");
if (data.isEmpty()) {
throw new IllegalArgumentException("Collection must not be empty");
}
OUT first = data.iterator().next();
if (first == null) {
throw new IllegalArgumentException("Collection must not contain null elements");
}
TypeInformation<OUT> typeInfo;
try {
typeInfo = TypeExtractor.getForObject(first);
} catch (Exception e) { throw new RuntimeException((("Could not create TypeInformation for type " + first.getClass()) + "; please specify the TypeInformation manually via ") + "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)", e);
}
return m0(data, typeInfo);
}
| 3.26 |
flink_StreamExecutionEnvironment_getStreamGraph_rdh
|
/**
* Getter of the {@link StreamGraph} of the streaming job with the option to clear previously
* registered {@link Transformation transformations}. Clearing the transformations allows, for
* example, to not re-execute the same operations when calling {@link #execute()} multiple
* times.
*
* @param clearTransformations
* Whether or not to clear previously registered transformations
* @return The stream graph representing the transformations
*/
@Internal
public StreamGraph getStreamGraph(boolean clearTransformations) {
final StreamGraph streamGraph = getStreamGraph(transformations);
if (clearTransformations) {
transformations.clear();
}
return streamGraph;
}
| 3.26 |
flink_StreamExecutionEnvironment_configure_rdh
|
/**
* Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link StreamPipelineOptions#TIME_CHARACTERISTIC}. It will reconfigure {@link StreamExecutionEnvironment}, {@link ExecutionConfig} and {@link CheckpointConfig}.
*
* <p>It will change the value of a setting only if a corresponding option was set in the {@code configuration}. If a key is not present, the current value of a field will remain untouched.
*
* @param configuration
* a configuration to read the values from
* @param classLoader
* a class loader to use when loading classes
*/
@PublicEvolving
public void configure(ReadableConfig configuration, ClassLoader classLoader) {
configuration.getOptional(StreamPipelineOptions.TIME_CHARACTERISTIC).ifPresent(this::setStreamTimeCharacteristic);
configuration.getOptional(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG).ifPresent(this::enableChangelogStateBackend);
Optional.ofNullable(loadStateBackend(configuration, classLoader)).ifPresent(this::setStateBackend);
configuration.getOptional(PipelineOptions.OPERATOR_CHAINING).ifPresent(c -> this.isChainingEnabled
= c);
configuration.getOptional(PipelineOptions.OPERATOR_CHAINING_CHAIN_OPERATORS_WITH_DIFFERENT_MAX_PARALLELISM).ifPresent(c -> this.isChainingOfOperatorsWithDifferentMaxParallelismEnabled = c);
configuration.getOptional(DeploymentOptions.JOB_LISTENERS).ifPresent(listeners -> registerCustomListeners(classLoader, listeners));
configuration.getOptional(PipelineOptions.CACHED_FILES).ifPresent(f -> {
this.cacheFile.clear();
this.cacheFile.addAll(DistributedCache.parseCachedFilesFromString(f));
});
configuration.getOptional(ExecutionOptions.RUNTIME_MODE).ifPresent(runtimeMode -> this.configuration.set(ExecutionOptions.RUNTIME_MODE, runtimeMode));
configuration.getOptional(ExecutionOptions.BATCH_SHUFFLE_MODE).ifPresent(shuffleMode -> this.configuration.set(ExecutionOptions.BATCH_SHUFFLE_MODE, shuffleMode));
configuration.getOptional(ExecutionOptions.SORT_INPUTS).ifPresent(sortInputs -> this.configuration.set(ExecutionOptions.SORT_INPUTS, sortInputs));
configuration.getOptional(ExecutionOptions.USE_BATCH_STATE_BACKEND).ifPresent(sortInputs -> this.configuration.set(ExecutionOptions.USE_BATCH_STATE_BACKEND, sortInputs));
configuration.getOptional(PipelineOptions.NAME).ifPresent(jobName -> this.configuration.set(PipelineOptions.NAME, jobName));
configuration.getOptional(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH).ifPresent(flag -> this.configuration.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, flag));
configuration.getOptional(PipelineOptions.JARS).ifPresent(jars -> this.configuration.set(PipelineOptions.JARS, jars));
configuration.getOptional(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_ENABLED).ifPresent(flag -> this.configuration.set(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_ENABLED, flag));
configBufferTimeout(configuration);
config.configure(configuration, classLoader);
checkpointCfg.configure(configuration);
// here we should make sure the configured checkpoint storage will take effect
// this needs to happen after checkpointCfg#configure(...) to override the effect of
// checkpointCfg#setCheckpointStorage(checkpointDirectory)
configureCheckpointStorage(configuration, checkpointCfg);
}
| 3.26 |
flink_StreamExecutionEnvironment_getRestartStrategy_rdh
|
/**
* Returns the specified restart strategy configuration.
*
* @return The restart strategy configuration to be used
*/
@PublicEvolving
public RestartStrategyConfiguration getRestartStrategy() {
return config.getRestartStrategy();
}
/**
* Sets the number of times that failed tasks are re-executed. A value of zero effectively
* disables fault tolerance. A value of {@code -1} indicates that the system default value (as
* defined in the configuration) should be used.
*
* @param numberOfExecutionRetries
* The number of times the system will try to re-execute failed
* tasks.
* @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link RestartStrategies#fixedDelayRestart(int, Time)}
| 3.26 |
flink_StreamExecutionEnvironment_fromCollection_rdh
|
/**
* Creates a data stream from the given iterator.
*
* <p>Because the iterator will remain unmodified until the actual execution happens, the type
* of data returned by the iterator must be given explicitly in the form of the type
* information. This method is useful for cases where the type is generic. In that case, the
* type class (as given in {@link #fromCollection(java.util.Iterator, Class)} does not supply
* all type information.
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e., a data
* stream source with parallelism one.
*
* @param data
* The iterator of elements to create the data stream from
* @param typeInfo
* The TypeInformation for the produced data stream
* @param <OUT>
* The type of the returned data stream
* @return The data stream representing the elements in the iterator
*/
public <OUT> DataStreamSource<OUT> fromCollection(Iterator<OUT> data, TypeInformation<OUT> typeInfo) {
Preconditions.checkNotNull(data,
"The iterator must not be null");
SourceFunction<OUT> function = new FromIteratorFunction<>(data);
return addSource(function, "Collection Source", typeInfo, Boundedness.BOUNDED);
}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_extractUsedInputFields_rdh
|
/**
* Extracts input fields which are used in the Calc node and the ChangelogNormalize node.
*/
private int[] extractUsedInputFields(StreamPhysicalCalc calc, Set<Integer> primaryKeyIndices) {
RexProgram program = calc.getProgram();
List<RexNode> projectsAndCondition = program.getProjectList().stream().map(program::expandLocalRef).collect(Collectors.toList());
if (program.getCondition() != null) {
projectsAndCondition.add(program.expandLocalRef(program.getCondition()));
}
Set<Integer> projectedFields = Arrays.stream(extractRefInputFields(projectsAndCondition)).boxed().collect(Collectors.toSet());
// we can't project primary keys
projectedFields.addAll(primaryKeyIndices);
return projectedFields.stream().sorted().mapToInt(Integer::intValue).toArray();
}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_adjustInputRef_rdh
|
/**
* Adjust the {@param expr} field indices according to the field index {@param mapping}.
*/
private RexNode adjustInputRef(RexNode expr, Map<Integer, Integer> mapping) {
return expr.accept(new RexShuttle() {
@Override
public RexNode
visitInputRef(RexInputRef inputRef) {
Integer newIndex = mapping.get(inputRef.getIndex());
return new RexInputRef(newIndex, inputRef.getType());
}
});
}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_partitionPrimaryKeyPredicates_rdh
|
/**
* Separates the given {@param predicates} into filters which affect only the primary key and
* anything else.
*/
private void partitionPrimaryKeyPredicates(List<RexNode> predicates, Set<Integer> primaryKeyIndices, List<RexNode>
primaryKeyPredicates, List<RexNode> remainingPredicates) {
for (RexNode predicate : predicates) {
int[] inputRefs = extractRefInputFields(Collections.singletonList(predicate));
if (Arrays.stream(inputRefs).allMatch(primaryKeyIndices::contains)) {
primaryKeyPredicates.add(predicate);
} else {
remainingPredicates.add(predicate);
}
}}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_pushCalcThroughChangelogNormalize_rdh
|
/**
* Pushes {@param primaryKeyPredicates} and used fields project into the {@link StreamPhysicalChangelogNormalize}.
*/private StreamPhysicalChangelogNormalize pushCalcThroughChangelogNormalize(RelOptRuleCall call, List<RexNode> primaryKeyPredicates, int[] usedInputFields) {
final StreamPhysicalChangelogNormalize changelogNormalize = call.rel(1);
final StreamPhysicalExchange exchange = call.rel(2);
final Set<Integer> primaryKeyIndices = IntStream.of(changelogNormalize.uniqueKeys()).boxed().collect(Collectors.toSet());
if (primaryKeyPredicates.isEmpty() && (usedInputFields.length == changelogNormalize.getRowType().getFieldCount())) {
// There are no filters and no project which can be pushed, so just return the existing
// node.
return changelogNormalize;
}
final
StreamPhysicalCalc
pushedCalc = projectUsedFieldsWithConditions(call.builder(), exchange.getInput(), primaryKeyPredicates, usedInputFields);// build input field reference from old field index to new field index
final Map<Integer, Integer> inputRefMapping = buildFieldsMapping(usedInputFields);
final List<Integer> newPrimaryKeyIndices = primaryKeyIndices.stream().map(inputRefMapping::get).collect(Collectors.toList());
final FlinkRelDistribution newDistribution = FlinkRelDistribution.hash(newPrimaryKeyIndices, true);
final RelTraitSet newTraitSet = exchange.getTraitSet().replace(newDistribution);
final StreamPhysicalExchange newExchange = exchange.copy(newTraitSet, pushedCalc,
newDistribution);
return ((StreamPhysicalChangelogNormalize) (changelogNormalize.copy(changelogNormalize.getTraitSet(), newExchange, newPrimaryKeyIndices.stream().mapToInt(Integer::intValue).toArray())));
}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_buildFieldsMapping_rdh
|
/**
* Build field reference mapping from old field index to new field index after projection.
*/
private Map<Integer, Integer> buildFieldsMapping(int[] projectedInputRefs) {
final Map<Integer, Integer> fieldsOldToNewIndexMapping = new HashMap<>();
for (int i = 0; i < projectedInputRefs.length; i++) {
fieldsOldToNewIndexMapping.put(projectedInputRefs[i], i);
}
return fieldsOldToNewIndexMapping;
}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_transformWithRemainingPredicates_rdh
|
/**
* Transforms the {@link RelOptRuleCall} to use {@param changelogNormalize} as the new input to
* a {@link StreamPhysicalCalc} which uses {@param predicates} for the condition.
*/
private void transformWithRemainingPredicates(RelOptRuleCall call, StreamPhysicalChangelogNormalize changelogNormalize, List<RexNode> predicates, int[] usedInputFields) {
final StreamPhysicalCalc calc = call.rel(0);
final RelBuilder relBuilder = call.builder();final RexProgramBuilder programBuilder = new RexProgramBuilder(changelogNormalize.getRowType(), relBuilder.getRexBuilder());
final Map<Integer,
Integer> inputRefMapping = buildFieldsMapping(usedInputFields);
// add projects
for (Pair<RexLocalRef, String> ref : calc.getProgram().getNamedProjects()) {
RexNode shiftedProject = adjustInputRef(calc.getProgram().expandLocalRef(ref.left), inputRefMapping);
programBuilder.addProject(shiftedProject, ref.right);
}
// add conditions
final List<RexNode> shiftedPredicates = predicates.stream().map(p -> adjustInputRef(p, inputRefMapping)).collect(Collectors.toList());
final RexNode v36 = relBuilder.and(shiftedPredicates);
if (!v36.isAlwaysTrue()) {
programBuilder.addCondition(v36);
}
final RexProgram newProgram = programBuilder.getProgram();
if (newProgram.isTrivial()) {
call.transformTo(changelogNormalize);
} else {
final StreamPhysicalCalc newProjectedCalc = new StreamPhysicalCalc(changelogNormalize.getCluster(), changelogNormalize.getTraitSet(), changelogNormalize, newProgram, newProgram.getOutputRowType());
call.transformTo(newProjectedCalc);
}
}
| 3.26 |
flink_PushCalcPastChangelogNormalizeRule_projectUsedFieldsWithConditions_rdh
|
/**
* Builds a new {@link StreamPhysicalCalc} on the input node with the given {@param conditions}
* and a used fields projection.
*/
private StreamPhysicalCalc projectUsedFieldsWithConditions(RelBuilder relBuilder, RelNode input, List<RexNode>
conditions, int[] usedFields) {
final RelDataType inputRowType = input.getRowType();
final List<String> inputFieldNames = inputRowType.getFieldNames();
final RexProgramBuilder
programBuilder = new RexProgramBuilder(inputRowType, relBuilder.getRexBuilder());
// add project
for (int v26 : usedFields) {
programBuilder.addProject(programBuilder.makeInputRef(v26), inputFieldNames.get(v26));
}// add conditions
final RexNode condition = relBuilder.and(conditions);
if (!condition.isAlwaysTrue()) {
programBuilder.addCondition(condition);
}
final RexProgram newProgram = programBuilder.getProgram();
return new StreamPhysicalCalc(input.getCluster(), input.getTraitSet(), input, newProgram, newProgram.getOutputRowType());
}
| 3.26 |
flink_FlinkConfMountDecorator_getClusterSidePropertiesMap_rdh
|
/**
* Get properties map for the cluster-side after removal of some keys.
*/
private Map<String, String> getClusterSidePropertiesMap(Configuration flinkConfig) {
final Configuration clusterSideConfig = flinkConfig.clone();// Remove some configuration options that should not be taken to cluster side.
clusterSideConfig.removeConfig(KubernetesConfigOptions.KUBE_CONFIG_FILE);
clusterSideConfig.removeConfig(DeploymentOptionsInternal.CONF_DIR);
clusterSideConfig.removeConfig(RestOptions.BIND_ADDRESS);
clusterSideConfig.removeConfig(JobManagerOptions.BIND_HOST);clusterSideConfig.removeConfig(TaskManagerOptions.BIND_HOST);
clusterSideConfig.removeConfig(TaskManagerOptions.HOST);
return clusterSideConfig.toMap();
}
| 3.26 |
flink_TypeInformation_m0_rdh
|
/**
* Checks whether this type can be used as a key for sorting. The order produced by sorting this
* type must be meaningful.
*/
@PublicEvolving
public boolean m0() {
return isKeyType();
}
| 3.26 |
flink_TypeInformation_of_rdh
|
/**
* Creates a TypeInformation for a generic type via a utility "type hint". This method can be
* used as follows:
*
* <pre>{@code TypeInformation<Tuple2<String, Long>> info = TypeInformation.of(new TypeHint<Tuple2<String, Long>>(){});}</pre>
*
* @param typeHint
* The hint for the generic type.
* @param <T>
* The generic type.
* @return The TypeInformation object for the type described by the hint.
*/
public static <T> TypeInformation<T> of(TypeHint<T> typeHint) {
return typeHint.getTypeInfo();
}
| 3.26 |
flink_SinkJoinerPlanNode_setCosts_rdh
|
// --------------------------------------------------------------------------------------------
public void setCosts(Costs nodeCosts) {
// the plan enumeration logic works as for regular two-input-operators, which is important
// because of the branch handling logic. it does pick redistributing network channels
// between the sink and the sink joiner, because sinks joiner has a different parallelism
// than the sink.
// we discard any cost and simply use the sum of the costs from the two children.
Costs totalCosts = getInput1().getSource().getCumulativeCosts().clone();
totalCosts.addCosts(getInput2().getSource().getCumulativeCosts());
super.setCosts(totalCosts);
}
| 3.26 |
flink_SinkJoinerPlanNode_getDataSinks_rdh
|
// --------------------------------------------------------------------------------------------
public void getDataSinks(List<SinkPlanNode> sinks) {
final PlanNode in1 = this.input1.getSource();
final PlanNode
in2 = this.input2.getSource();
if (in1 instanceof SinkPlanNode) {
sinks.add(((SinkPlanNode) (in1)));
} else if (in1 instanceof SinkJoinerPlanNode) {
((SinkJoinerPlanNode) (in1)).getDataSinks(sinks);
} else {
throw new CompilerException("Illegal child node for a sink joiner utility node: Neither Sink nor Sink Joiner");
}
if (in2 instanceof SinkPlanNode) {sinks.add(((SinkPlanNode) (in2)));
} else if (in2 instanceof SinkJoinerPlanNode) {
((SinkJoinerPlanNode) (in2)).getDataSinks(sinks);
} else {
throw new CompilerException("Illegal child node for a sink joiner utility node: Neither Sink nor Sink Joiner");
}
}
| 3.26 |
flink_EnvironmentInformation_getJvmStartupOptionsArray_rdh
|
/**
* Gets the system parameters and environment parameters that were passed to the JVM on startup.
*
* @return The options passed to the JVM on startup.
*/
public static String[] getJvmStartupOptionsArray() {
try {
RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
List<String> options = bean.getInputArguments();
return options.toArray(new String[options.size()]);
} catch (Throwable t) {
return new String[0];}
}
| 3.26 |
flink_EnvironmentInformation_getOpenFileHandlesLimit_rdh
|
/**
* Tries to retrieve the maximum number of open file handles. This method will only work on
* UNIX-based operating systems with Sun/Oracle Java versions.
*
* <p>If the number of max open file handles cannot be determined, this method returns {@code -1}.
*
* @return The limit of open file handles, or {@code -1}, if the limit could not be determined.
*/
public static long
getOpenFileHandlesLimit() {
if (OperatingSystem.isWindows()) {
// getMaxFileDescriptorCount method is not available on Windows
return -1L;
}
Class<?> sunBeanClass;
try {
sunBeanClass = Class.forName("com.sun.management.UnixOperatingSystemMXBean");
} catch (ClassNotFoundException e) {return -1L;
}
try {
Method fhLimitMethod = sunBeanClass.getMethod("getMaxFileDescriptorCount");
Object result = fhLimitMethod.invoke(ManagementFactory.getOperatingSystemMXBean());
return ((Long) (result));
} catch (Throwable t) {
LOG.warn("Unexpected error when accessing file handle limit", t);
return -1L;
}
}
| 3.26 |
flink_EnvironmentInformation_logEnvironmentInfo_rdh
|
/**
* Logs information about the environment, like code revision, current user, Java version, and
* JVM parameters.
*
* @param log
* The logger to log the information to.
* @param componentName
* The component name to mention in the log.
* @param commandLineArgs
* The arguments accompanying the starting the component.
*/
public static void logEnvironmentInfo(Logger log, String componentName, String[] commandLineArgs) {
if (log.isInfoEnabled()) {
RevisionInformation rev = getRevisionInformation();
String version =
getVersion();
String scalaVersion = getScalaVersion();
String jvmVersion = getJvmVersion();
String[] options = getJvmStartupOptionsArray();String javaHome = System.getenv("JAVA_HOME");String inheritedLogs = System.getenv("FLINK_INHERITED_LOGS");
String arch = System.getProperty("os.arch");
long maxHeapMegabytes = getMaxJvmHeapMemory() >>> 20;
if (inheritedLogs != null) {
log.info("--------------------------------------------------------------------------------");
log.info(" Preconfiguration: ");
log.info(inheritedLogs);
}
log.info("--------------------------------------------------------------------------------");
log.info((((((((((((" Starting " + componentName) + " (Version: ") + version) + ", Scala: ") + scalaVersion) + ", ") + "Rev:") + rev.commitId) + ", ") + "Date:") + rev.commitDate) + ")");
log.info(" OS current user: " + System.getProperty("user.name"));
log.info(" Current Hadoop/Kerberos user: " + getHadoopUser());
log.info(" JVM: " +
jvmVersion);
log.info(" Arch: " + arch);
log.info((" Maximum heap size: " + maxHeapMegabytes) + " MiBytes");
log.info(" JAVA_HOME: " + (javaHome == null ? "(not set)" : javaHome));
String
hadoopVersionString = getHadoopVersionString();
if (hadoopVersionString != null) {
log.info(" Hadoop version: " + hadoopVersionString);
} else {
log.info(" No Hadoop Dependency available");
}
if (options.length == 0) {
log.info(" JVM Options: (none)");
} else {
log.info(" JVM Options:");
for (String s : options) {
log.info(" " + s);
}
}
if ((commandLineArgs == null) || (commandLineArgs.length == 0)) {
log.info(" Program Arguments: (none)");
} else {
log.info(" Program Arguments:");
for (String s : commandLineArgs) {
if (GlobalConfiguration.isSensitive(s)) {
log.info((" " + GlobalConfiguration.HIDDEN_CONTENT) + " (sensitive information)");
} else {
log.info(" " + s);
}}
}
log.info(" Classpath: " + System.getProperty("java.class.path"));
log.info("--------------------------------------------------------------------------------");
}
}
| 3.26 |
flink_EnvironmentInformation_getGitCommitId_rdh
|
/**
*
* @return The last known commit id of this version of the software.
*/
public static String getGitCommitId() {
return getVersionsInstance().gitCommitId;
}
| 3.26 |
flink_EnvironmentInformation_getMaxJvmHeapMemory_rdh
|
/**
* The maximum JVM heap size, in bytes.
*
* <p>This method uses the <i>-Xmx</i> value of the JVM, if set. If not set, it returns (as a
* heuristic) 1/4th of the physical memory size.
*
* @return The maximum JVM heap size, in bytes.
*/
public static long getMaxJvmHeapMemory() {
final long maxMemory = Runtime.getRuntime().maxMemory();
if (maxMemory != Long.MAX_VALUE) {
// we have the proper max memory
return maxMemory;
} else {
// max JVM heap size is not set - use the heuristic to use 1/4th of the physical memory
final long physicalMemory = Hardware.getSizeOfPhysicalMemory();
if (physicalMemory != (-1)) {
// got proper value for physical memory
return physicalMemory / 4;} else {
throw new
RuntimeException("Could not determine the amount of free memory.\n" + "Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes.");
}}
}
| 3.26 |
flink_EnvironmentInformation_m2_rdh
|
/**
* Gets an estimate of the size of the free heap memory. The estimate may vary, depending on the
* current level of memory fragmentation and the number of dead objects. For a better (but more
* heavy-weight) estimate, use {@link #getSizeOfFreeHeapMemoryWithDefrag()}.
*
* @return An estimate of the size of the free heap memory, in bytes.
*/
public static long m2() {
Runtime r = Runtime.getRuntime();
return (getMaxJvmHeapMemory() - r.totalMemory()) + r.freeMemory();
}
| 3.26 |
flink_EnvironmentInformation_getJvmVersion_rdh
|
/**
* Gets the version of the JVM in the form "VM_Name - Vendor - Spec/Version".
*
* @return The JVM version.
*/
public static String getJvmVersion() {
try {
final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
return (((((bean.getVmName() + " - ") + bean.getVmVendor()) + " - ") + bean.getSpecVersion()) + '/') + bean.getVmVersion();
} catch (Throwable
t) {return UNKNOWN;
}
}
| 3.26 |
flink_EnvironmentInformation_getBuildTime_rdh
|
/**
*
* @return The Instant this version of the software was built.
*/
public static Instant getBuildTime() {
return getVersionsInstance().gitBuildTime;
}
| 3.26 |
flink_EnvironmentInformation_getGitCommitTime_rdh
|
/**
*
* @return The Instant of the last commit of this code.
*/
public static Instant getGitCommitTime() {
return getVersionsInstance().gitCommitTime;
}
| 3.26 |
flink_EnvironmentInformation_m1_rdh
|
/**
* Gets an estimate of the size of the free heap memory.
*
* <p>NOTE: This method is heavy-weight. It triggers a garbage collection to reduce
* fragmentation and get a better estimate at the size of free memory. It is typically more
* accurate than the plain version {@link #getSizeOfFreeHeapMemory()}.
*
* @return An estimate of the size of the free heap memory, in bytes.
*/
public static long m1() {
// trigger a garbage collection, to reduce fragmentation
System.gc();
return m2();
}
| 3.26 |
flink_EnvironmentInformation_getHadoopUser_rdh
|
/**
* Gets the name of the user that is running the JVM.
*
* @return The name of the user that is running the JVM.
*/
public static String getHadoopUser() {
try {
Class<?> ugiClass = Class.forName("org.apache.hadoop.security.UserGroupInformation", false, EnvironmentInformation.class.getClassLoader());
Method currentUserMethod = ugiClass.getMethod("getCurrentUser");
Method shortUserNameMethod = ugiClass.getMethod("getShortUserName");
Object ugi = currentUserMethod.invoke(null);
return ((String) (shortUserNameMethod.invoke(ugi)));
} catch (ClassNotFoundException e) {
return "<no hadoop dependency found>";
} catch (LinkageError e) {
// hadoop classes are not in the classpath
LOG.debug("Cannot determine user/group information using Hadoop utils. " + "Hadoop classes not loaded or compatible", e);
}
catch (Throwable t) {
// some other error occurred that we should log and make known
LOG.warn("Error while accessing user/group information via Hadoop utils.", t);
}
return UNKNOWN;
}
| 3.26 |
flink_EnvironmentInformation_getVersion_rdh
|
/**
* Returns the version of the code as String.
*
* @return The project version string.
*/
public static String getVersion() {
return getVersionsInstance().projectVersion;
}
| 3.26 |
flink_EnvironmentInformation_getGitCommitIdAbbrev_rdh
|
/**
*
* @return The last known abbreviated commit id of this version of the software.
*/
public static String getGitCommitIdAbbrev() {
return getVersionsInstance().gitCommitIdAbbrev;
}
| 3.26 |
flink_CombinedWatermarkStatus_setWatermark_rdh
|
/**
* Returns true if the watermark was advanced, that is if the new watermark is larger than
* the previous one.
*
* <p>Setting a watermark will clear the idleness flag.
*/
public boolean setWatermark(long watermark) {this.idle = false;
final boolean v3 = watermark > this.watermark;
if (v3) {
this.onWatermarkUpdate.onWatermarkUpdate(watermark);
this.watermark = Math.max(watermark, this.watermark);
}
return v3;
}
| 3.26 |
flink_CombinedWatermarkStatus_m0_rdh
|
/**
* Checks whether we need to update the combined watermark.
*
* <p><b>NOTE:</b>It can update {@link #isIdle()} status.
*
* @return true, if the combined watermark changed
*/
public boolean m0() {
long minimumOverAllOutputs = Long.MAX_VALUE;
// if we don't have any outputs minimumOverAllOutputs is not valid, it's still
// at its initial Long.MAX_VALUE state and we must not emit that
if (partialWatermarks.isEmpty()) {
return false;
}
boolean allIdle = true;
for (PartialWatermark partialWatermark : partialWatermarks) {
if (!partialWatermark.isIdle()) {
minimumOverAllOutputs = Math.min(minimumOverAllOutputs, partialWatermark.getWatermark());
allIdle = false;
}
}this.idle
= allIdle;
if ((!allIdle) && (minimumOverAllOutputs > combinedWatermark)) {
combinedWatermark = minimumOverAllOutputs;
return true;
}return false;
}
| 3.26 |
flink_CombinedWatermarkStatus_getWatermark_rdh
|
/**
* Returns the current watermark timestamp. This will throw {@link IllegalStateException} if
* the output is currently idle.
*/
private long getWatermark() {checkState(!idle, "Output is idle.");
return watermark;
}
| 3.26 |
flink_AvroDeserializationSchema_forGeneric_rdh
|
/**
* Creates {@link AvroDeserializationSchema} that produces {@link GenericRecord} using provided
* schema.
*
* @param schema
* schema of produced records
* @param encoding
* Avro serialization approach to use for decoding
* @return deserialized record in form of {@link GenericRecord}
*/
public static AvroDeserializationSchema<GenericRecord> forGeneric(Schema schema, AvroEncoding encoding) {
return new AvroDeserializationSchema<>(GenericRecord.class, schema, encoding);
}
| 3.26 |
flink_AvroDeserializationSchema_m0_rdh
|
/**
* Creates {@link AvroDeserializationSchema} that produces classes that were generated from avro
* schema.
*
* @param tClass
* class of record to be produced
* @return deserialized record
*/
public static <T extends SpecificRecord> AvroDeserializationSchema<T> m0(Class<T> tClass) {
return forSpecific(tClass, AvroEncoding.BINARY);
}
| 3.26 |
flink_AvroDeserializationSchema_forSpecific_rdh
|
/**
* Creates {@link AvroDeserializationSchema} that produces classes that were generated from avro
* schema.
*
* @param tClass
* class of record to be produced
* @param encoding
* Avro serialization approach to use for decoding
* @return deserialized record
*/
public static <T extends SpecificRecord> AvroDeserializationSchema<T> forSpecific(Class<T> tClass, AvroEncoding
encoding) {
return new AvroDeserializationSchema<>(tClass, null, encoding);
}
| 3.26 |
flink_CharValueComparator_supportsSerializationWithKeyNormalization_rdh
|
// --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
}
| 3.26 |
flink_WorksetNode_setCandidateProperties_rdh
|
// --------------------------------------------------------------------------------------------
public void setCandidateProperties(GlobalProperties gProps, LocalProperties lProps, Channel initialInput) {
if (this.cachedPlans != null) {
throw new IllegalStateException();
} else {
WorksetPlanNode wspn = new WorksetPlanNode(this, ("Workset (" + this.getOperator().getName()) + ")", gProps, lProps, initialInput);
this.cachedPlans = Collections.<PlanNode>singletonList(wspn);
}
}
| 3.26 |
flink_WorksetNode_getOperator_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the contract object for this data source node.
*
* @return The contract.
*/
@Override
public WorksetPlaceHolder<?> getOperator() {
return ((WorksetPlaceHolder<?>) (super.getOperator()));
}
| 3.26 |
flink_DefaultDelegationTokenManager_obtainDelegationTokens_rdh
|
/**
* Obtains new tokens in a one-time fashion and leaves it up to the caller to distribute them.
*/
@Override
public void obtainDelegationTokens(DelegationTokenContainer container) throws Exception {
LOG.info("Obtaining delegation tokens");
obtainDelegationTokensAndGetNextRenewal(container);LOG.info("Delegation tokens obtained successfully");
}
| 3.26 |
flink_DefaultDelegationTokenManager_start_rdh
|
/**
* Creates a re-occurring task which obtains new tokens and automatically distributes them to
* task managers.
*/
@Override
public void start(Listener listener)
throws Exception {
checkNotNull(scheduledExecutor, "Scheduled executor must not be null");
checkNotNull(ioExecutor, "IO executor must not be null");
this.listener = checkNotNull(listener, "Listener must not be null");
synchronized(tokensUpdateFutureLock) {
checkState(tokensUpdateFuture == null, "Manager is already started");
}
startTokensUpdate();
}
| 3.26 |
flink_DefaultDelegationTokenManager_stop_rdh
|
/**
* Stops re-occurring token obtain task.
*/
@Override
public void stop() {
LOG.info("Stopping credential renewal");
stopTokensUpdate();
LOG.info("Stopped credential renewal");
}
| 3.26 |
flink_GroupReduceOperatorBase_setCombinable_rdh
|
/**
* Marks the group reduce operation as combinable. Combinable operations may pre-reduce the data
* before the actual group reduce operations. Combinable user-defined functions must implement
* the interface {@link GroupCombineFunction}.
*
* @param combinable
* Flag to mark the group reduce operation as combinable.
*/
public void setCombinable(boolean combinable) {
// sanity check
if (combinable && (!GroupCombineFunction.class.isAssignableFrom(this.userFunction.getUserCodeClass()))) {throw new IllegalArgumentException("Cannot set a UDF as combinable if it does not implement the interface " + GroupCombineFunction.class.getName());
} else {
this.combinable =
combinable;
}
}
| 3.26 |
flink_GroupReduceOperatorBase_setGroupOrder_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Sets the order of the elements within a reduce group.
*
* @param order
* The order for the elements in a reduce group.
*/
public void setGroupOrder(Ordering order) {
this.groupOrder = order;
}
| 3.26 |
flink_GroupReduceOperatorBase_executeOnCollections_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected List<OUT> executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
GroupReduceFunction<IN, OUT> function = this.userFunction.getUserCodeObject();
UnaryOperatorInformation<IN, OUT> operatorInfo = getOperatorInfo();
TypeInformation<IN> inputType = operatorInfo.getInputType();
int[] keyColumns = getKeyColumns(0);
int[] sortColumns = keyColumns;
boolean[] sortOrderings = new boolean[sortColumns.length];
if (groupOrder != null) {
sortColumns = ArrayUtils.addAll(sortColumns, groupOrder.getFieldPositions());
sortOrderings = ArrayUtils.addAll(sortOrderings, groupOrder.getFieldSortDirections());
}
if (sortColumns.length == 0) {
// => all reduce. No comparator
checkArgument(sortOrderings.length == 0);
} else {
final TypeComparator<IN> sortComparator = getTypeComparator(inputType, sortColumns, sortOrderings, executionConfig);
Collections.sort(inputData, new Comparator<IN>() {
@Override
public int compare(IN o1, IN o2) {
return sortComparator.compare(o1, o2);
}
});
}
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
ArrayList<OUT> result
= new ArrayList<OUT>();
if (inputData.size() > 0) {
final TypeSerializer<IN> inputSerializer = inputType.createSerializer(executionConfig);
if (keyColumns.length == 0) {
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
List<IN> inputDataCopy = new ArrayList<IN>(inputData.size());
for (IN in : inputData) {
inputDataCopy.add(inputSerializer.copy(in));
}
CopyingListCollector<OUT> collector = new CopyingListCollector<OUT>(result, outSerializer);
function.reduce(inputDataCopy, collector);
}
else {
boolean[] keyOrderings = new boolean[keyColumns.length];final TypeComparator<IN> comparator = getTypeComparator(inputType, keyColumns, keyOrderings, executionConfig);
ListKeyGroupedIterator<IN> keyedIterator = new ListKeyGroupedIterator<IN>(inputData, inputSerializer, comparator);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
CopyingListCollector<OUT> collector = new CopyingListCollector<OUT>(result, outSerializer);
while (keyedIterator.nextKey()) {
function.reduce(keyedIterator.getValues(), collector);
}
}
}
FunctionUtils.closeFunction(function);
return result;
}
| 3.26 |
flink_StreamElementQueueEntry_completeExceptionally_rdh
|
/**
* Not supported. Exceptions must be handled in the AsyncWaitOperator.
*/
@Override
default void completeExceptionally(Throwable error) {
throw new UnsupportedOperationException("This result future should only be used to set completed results.");
}
| 3.26 |
flink_WorksetIterationPlanNode_setCosts_rdh
|
// --------------------------------------------------------------------------------------------
public void setCosts(Costs nodeCosts) {
// add the costs from the step function
nodeCosts.addCosts(this.solutionSetDeltaPlanNode.getCumulativeCostsShare());
nodeCosts.addCosts(this.nextWorkSetPlanNode.getCumulativeCostsShare());
super.setCosts(nodeCosts);
}
| 3.26 |
flink_WorksetIterationPlanNode_getSerializerForIterationChannel_rdh
|
// --------------------------------------------------------------------------------------------
public TypeSerializerFactory<?> getSerializerForIterationChannel() {
return serializerForIterationChannel;
}
| 3.26 |
flink_WorksetIterationPlanNode_getIterationNode_rdh
|
// --------------------------------------------------------------------------------------------
public WorksetIterationNode getIterationNode() {
if (this.template instanceof WorksetIterationNode) {
return ((WorksetIterationNode) (this.template));
} else {
throw new RuntimeException();
}
}
| 3.26 |
flink_WorksetIterationPlanNode_getWorksetSerializer_rdh
|
// --------------------------------------------------------------------------------------------
public TypeSerializerFactory<?> getWorksetSerializer() {
return worksetSerializer;
}
| 3.26 |
flink_WorksetIterationPlanNode_mergeBranchPlanMaps_rdh
|
/**
* Merging can only take place after the solutionSetDelta and nextWorkset PlanNode has been set,
* because they can contain also some of the branching nodes.
*/
@Override
protected void mergeBranchPlanMaps(Map<OptimizerNode, PlanNode> branchPlan1, Map<OptimizerNode, PlanNode> branchPlan2) {
}
| 3.26 |
flink_PropertiesUtil_getLong_rdh
|
/**
* Get long from properties. This method only logs if the long is not valid.
*
* @param config
* Properties
* @param key
* key in Properties
* @param defaultValue
* default value if value is not set
* @return default or value of key
*/
public static long getLong(Properties config, String key, long defaultValue, Logger logger) {
try {
return getLong(config, key, defaultValue);
} catch
(IllegalArgumentException iae) {
logger.warn(iae.getMessage());
return defaultValue;
}
}
/**
* Get boolean from properties. This method returns {@code true}
| 3.26 |
flink_PropertiesUtil_getInt_rdh
|
/**
* Get integer from properties. This method throws an exception if the integer is not valid.
*
* @param config
* Properties
* @param key
* key in Properties
* @param defaultValue
* default value if value is not set
* @return default or value of key
*/
public static int getInt(Properties config, String key, int defaultValue) {
String val = config.getProperty(key);
if (val == null) {
return
defaultValue;
} else {
try {
return Integer.parseInt(val);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException((((((("Value for configuration key='" + key) + "' is not set correctly. ") + "Entered value='") + val) + "'. Default value='") + defaultValue) + "'");
}
}
}
| 3.26 |
flink_GSChecksumWriteChannel_m0_rdh
|
/**
* Closes the channel and validates the checksum against the storage. Manually verifying
* checksums for streaming uploads is recommended by Google, see here:
* https://cloud.google.com/storage/docs/streaming
*
* @throws IOException
* On underlying failure or non-matching checksums
*/
public void m0()
throws IOException {
LOGGER.trace("Closing write channel to blob {}", blobIdentifier);
// close channel and get blob metadata
f1.close();
Optional<GSBlobStorage.BlobMetadata> blobMetadata = f0.getMetadata(blobIdentifier);
if (!blobMetadata.isPresent()) {throw new IOException(String.format("Failed to read metadata for blob %s", blobIdentifier));
}
// make sure checksums match
String writeChecksum = ChecksumUtils.convertChecksumToString(hasher.hash().asInt());
String blobChecksum = blobMetadata.get().getChecksum();
if (!writeChecksum.equals(blobChecksum)) {
throw new IOException(String.format("Checksum mismatch writing blob %s: expected %s but found %s", blobIdentifier, writeChecksum, blobChecksum));
}
}
| 3.26 |
flink_GSChecksumWriteChannel_write_rdh
|
/**
* Writes bytes to the underlying channel and updates checksum.
*
* @param content
* The content to write
* @param start
* The start position
* @param length
* The number of bytes to write
* @return The number of bytes written
* @throws IOException
* On underlying failure
*/
public int write(byte[] content, int start, int length) throws IOException {
LOGGER.trace("Writing {} bytes to blob {}", length, blobIdentifier);
Preconditions.checkNotNull(content);Preconditions.checkArgument(start >= 0);
Preconditions.checkArgument(length >= 0);
hasher.putBytes(content, start, length);
return f1.write(content, start, length);
}
| 3.26 |
flink_HiveParserQBExpr_containsQueryWithoutSourceTable_rdh
|
/**
* returns true, if the query block contains any query, or subquery without a source table. Like
* select current_user(), select current_database()
*
* @return true, if the query block contains any query without a source table
*/
public boolean containsQueryWithoutSourceTable() {
if (qb != null) {
return qb.containsQueryWithoutSourceTable();
} else {
return qbexpr1.containsQueryWithoutSourceTable() || qbexpr2.containsQueryWithoutSourceTable();
}
}
| 3.26 |
flink_TypeHint_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return typeInfo.hashCode();
}
| 3.26 |
flink_TypeHint_getTypeInfo_rdh
|
// ------------------------------------------------------------------------
/**
* Gets the type information described by this TypeHint.
*
* @return The type information described by this TypeHint.
*/public TypeInformation<T> getTypeInfo() { return typeInfo;
}
| 3.26 |
flink_FlinkAssertions_chainOfCauses_rdh
|
/**
* You can use this method in combination with {@link AbstractThrowableAssert#extracting(Function, AssertFactory)} to perform assertions on a chain
* of causes. For example:
*
* <pre>{@code assertThat(throwable)
* .extracting(FlinkAssertions::chainOfCauses, FlinkAssertions.STREAM_THROWABLE)}</pre>
*
* @return the list is ordered from the current {@link Throwable} up to the root cause.
*/
public static Stream<Throwable> chainOfCauses(Throwable throwable) {
if (throwable == null) {
return Stream.empty();
}
if (throwable.getCause() == null) {
return Stream.of(throwable);
} return Stream.concat(Stream.of(throwable), chainOfCauses(throwable.getCause()));
}
| 3.26 |
flink_FlinkAssertions_anyCauseMatches_rdh
|
/**
* Shorthand to assert the chain of causes includes a {@link Throwable} matching a specific
* {@link Class} and containing the provided message. Same as:
*
* <pre>{@code assertThatChainOfCauses(throwable)
* .anySatisfy(
* cause ->
* assertThat(cause)
* .hasMessageContaining(containsMessage));}</pre>
*/
public static ThrowingConsumer<? super Throwable> anyCauseMatches(String containsMessage) {
return t -> assertThatChainOfCauses(t).as("Any cause contains message '%s'", containsMessage).anySatisfy(t1 -> assertThat(t1).hasMessageContaining(containsMessage));
}
| 3.26 |
flink_FlinkAssertions_m0_rdh
|
/**
* Shorthand to assert chain of causes. Same as:
*
* <pre>{@code assertThat(throwable)
* .extracting(FlinkAssertions::chainOfCauses, FlinkAssertions.STREAM_THROWABLE)}</pre>
*/
public static ListAssert<Throwable> m0(Throwable root) {
return assertThat(root).extracting(FlinkAssertions::chainOfCauses,
STREAM_THROWABLE);
}
| 3.26 |
flink_FlinkAssertions_assertThatFuture_rdh
|
/**
* Create assertion for {@link java.util.concurrent.CompletionStage}.
*
* @param actual
* the actual value.
* @param <T>
* the type of the value contained in the {@link java.util.concurrent.CompletionStage}.
* @return the created assertion object.
*/
public static <T> FlinkCompletableFutureAssert<T> assertThatFuture(CompletionStage<T> actual) {
return new FlinkCompletableFutureAssert<>(actual);
}
| 3.26 |
flink_BlobLibraryCacheManager_getNumberOfReferenceHolders_rdh
|
/**
* Gets the number of tasks holding {@link ClassLoader} references for the given job.
*
* @param jobId
* ID of a job
* @return number of reference holders
*/
int getNumberOfReferenceHolders(JobID jobId) {
synchronized(lockObject) {
LibraryCacheEntry entry = cacheEntries.get(jobId);
return entry == null ? 0 : entry.getReferenceCount();
}
}
| 3.26 |
flink_BlobLibraryCacheManager_m1_rdh
|
/**
* Release the class loader to ensure any file descriptors are closed and the cached
* libraries are deleted immediately.
*/
private void m1() {
runReleaseHooks();
if (!wrapsSystemClassLoader) {
try {
((Closeable) (classLoader)).close();
} catch (IOException e) {
LOG.warn("Failed to release user code class loader for " + Arrays.toString(libraries.toArray()));
}
}
// clear potential references to user-classes in the singleton cache
TypeFactory.defaultInstance().clearCache();
}
| 3.26 |
flink_BlobLibraryCacheManager_getNumberOfManagedJobs_rdh
|
/**
* Returns the number of registered jobs that this library cache manager handles.
*
* @return number of jobs (irrespective of the actual number of tasks per job)
*/
int getNumberOfManagedJobs() {
synchronized(lockObject) {
return cacheEntries.size();
}
}
| 3.26 |
flink_LogicalTypeDuplicator_instantiateStructuredBuilder_rdh
|
// --------------------------------------------------------------------------------------------
private Builder instantiateStructuredBuilder(StructuredType structuredType) {
final Optional<ObjectIdentifier> identifier = structuredType.getObjectIdentifier();
final Optional<Class<?>> implementationClass = structuredType.getImplementationClass();
if (identifier.isPresent() &&
implementationClass.isPresent()) {return StructuredType.newBuilder(identifier.get(), implementationClass.get());
} else if
(identifier.isPresent()) {
return StructuredType.newBuilder(identifier.get());
} else if (implementationClass.isPresent()) {
return StructuredType.newBuilder(implementationClass.get());
}
else {
throw new TableException("Invalid structured type.");
}
}
| 3.26 |
flink_StreamTableSinkFactory_createTableSink_rdh
|
/**
* Only create stream table sink.
*/
@Override
default TableSink<T> createTableSink(Map<String, String> properties) {
StreamTableSink<T> sink = createStreamTableSink(properties);
if (sink == null) {
throw new ValidationException("Please override 'createTableSink(Context)' method.");
}
return sink;}
| 3.26 |
flink_FileInputSplit_hashCode_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return getSplitNumber() ^ (file == null ? 0 : file.hashCode());
}
| 3.26 |
flink_FileInputSplit_getPath_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Returns the path of the file containing this split's data.
*
* @return the path of the file containing this split's data.
*/
public Path getPath() {
return file;
}
| 3.26 |
flink_BufferAvailabilityListener_notifyPriorityEvent_rdh
|
/**
* Called when the first priority event is added to the head of the buffer queue.
*
* @param prioritySequenceNumber
* the sequence number that identifies the priority buffer.
*/
default void notifyPriorityEvent(int prioritySequenceNumber) {
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.