name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ExecutionEnvironment_registerType_rdh
|
/**
* Registers the given type with the serialization stack. If the type is eventually serialized
* as a POJO, then the type is registered with the POJO serializer. If the type ends up being
* serialized with Kryo, then it will be registered at Kryo to make sure that only tags are
* written.
*
* @param type
* The class of the type to register.
*/
public void registerType(Class<?> type) {
if (type
== null) {
throw new NullPointerException("Cannot register null type class.");
}
TypeInformation<?>
typeInfo = TypeExtractor.createTypeInfo(type);
if (typeInfo instanceof PojoTypeInfo) {
config.registerPojoType(type);
} else {
config.registerKryoType(type);
}
}
| 3.26 |
flink_ExecutionEnvironment_clearJobListeners_rdh
|
/**
* Clear all registered {@link JobListener}s.
*/
@PublicEvolving
public void clearJobListeners() {
this.jobListeners.clear();
}
/**
* Triggers the program execution asynchronously. The environment will execute all parts of the
* program that have resulted in a "sink" operation. Sink operations are for example printing
* results ({@link DataSet#print()}, writing results (e.g. {@link DataSet#writeAsText(String)},
* {@link DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other
* generic data sinks created with {@link DataSet#output(org.apache.flink.api.common.io.OutputFormat)}.
*
* <p>The program execution will be logged and displayed with a generated default name.
*
* @return A {@link JobClient}
| 3.26 |
flink_ExecutionEnvironment_registerCachedFile_rdh
|
/**
* Registers a file at the distributed cache under the given name. The file will be accessible
* from any user-defined function in the (distributed) runtime under a local path. Files may be
* local files (which will be distributed via BlobServer), or files in a distributed file
* system. The runtime will copy the files temporarily to a local cache, if needed.
*
* <p>The {@link org.apache.flink.api.common.functions.RuntimeContext} can be obtained inside
* UDFs via {@link org.apache.flink.api.common.functions.RichFunction#getRuntimeContext()} and
* provides access {@link org.apache.flink.api.common.cache.DistributedCache} via {@link org.apache.flink.api.common.functions.RuntimeContext#getDistributedCache()}.
*
* @param filePath
* The path of the file, as a URI (e.g. "file:///some/path" or
* "hdfs://host:port/and/path")
* @param name
* The name under which the file is registered.
* @param executable
* flag indicating whether the file should be executable
*/
public void registerCachedFile(String filePath, String name, boolean executable) {
this.f1.add(new Tuple2<>(name, new DistributedCacheEntry(filePath, executable)));
}
| 3.26 |
flink_ExecutionEnvironment_registerTypeWithKryoSerializer_rdh
|
/**
* Registers the given Serializer via its class as a serializer for the given type at the
* KryoSerializer.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/
public void registerTypeWithKryoSerializer(Class<?> type, Class<? extends Serializer<?>> serializerClass) {
config.registerTypeWithKryoSerializer(type, serializerClass);}
| 3.26 |
flink_ExecutionEnvironment_getJobListeners_rdh
|
/**
* Gets the config JobListeners.
*/
protected List<JobListener> getJobListeners() {
return jobListeners;}
/**
* Gets the parallelism with which operation are executed by default. Operations can
* individually override this value to use a specific parallelism via {@link Operator#setParallelism(int)}. Other operations may need to run with a different parallelism
* - for example calling {@link DataSet#reduce(org.apache.flink.api.common.functions.ReduceFunction)} over the entire set
* will insert eventually an operation that runs non-parallel (parallelism of one).
*
* @return The parallelism used by operations, unless they override that value. This method
returns {@link ExecutionConfig#PARALLELISM_DEFAULT}
| 3.26 |
flink_ExecutionEnvironment_createLocalEnvironmentWithWebUI_rdh
|
/**
* Creates a {@link LocalEnvironment} for local program execution that also starts the web
* monitoring UI.
*
* <p>The local execution environment will run the program in a multi-threaded fashion in the
* same JVM as the environment was created in. It will use the parallelism specified in the
* parameter.
*
* <p>If the configuration key 'rest.port' was set in the configuration, that particular port
* will be used for the web UI. Otherwise, the default port (8081) will be used.
*/
@PublicEvolving
public static ExecutionEnvironment createLocalEnvironmentWithWebUI(Configuration conf) {
checkNotNull(conf, "conf");
if (!conf.contains(RestOptions.PORT)) {
// explicitly set this option so that it's not set to 0 later
conf.setInteger(RestOptions.PORT, RestOptions.PORT.defaultValue());
}
return createLocalEnvironment(conf, -1);
}
| 3.26 |
flink_ExecutionEnvironment_fromParallelCollection_rdh
|
// private helper for passing different call location names
private <X> DataSource<X> fromParallelCollection(SplittableIterator<X> iterator, TypeInformation<X> type, String callLocationName) {
return new DataSource<>(this, new ParallelIteratorInputFormat<>(iterator), type, callLocationName);
}
| 3.26 |
flink_ExecutionEnvironment_createLocalEnvironment_rdh
|
/**
* Creates a {@link LocalEnvironment} which is used for executing Flink jobs.
*
* @param configuration
* to start the {@link LocalEnvironment} with
* @param defaultParallelism
* to initialize the {@link LocalEnvironment} with
* @return {@link LocalEnvironment}
*/
private static LocalEnvironment createLocalEnvironment(Configuration configuration, int defaultParallelism) {
final LocalEnvironment localEnvironment
= new LocalEnvironment(configuration);
if (defaultParallelism > 0) {
localEnvironment.setParallelism(defaultParallelism);
}
return localEnvironment;
}
| 3.26 |
flink_ExecutionEnvironment_setRestartStrategy_rdh
|
/**
* Sets the restart strategy configuration. The configuration specifies which restart strategy
* will be used for the execution graph in case of a restart.
*
* @param restartStrategyConfiguration
* Restart strategy configuration to be set
*/
@PublicEvolving
public void setRestartStrategy(RestartStrategies.RestartStrategyConfiguration restartStrategyConfiguration) {
config.setRestartStrategy(restartStrategyConfiguration);
}
| 3.26 |
flink_ExecutionEnvironment_fromCollection_rdh
|
/**
* Creates a DataSet from the given iterator. Because the iterator will remain unmodified until
* the actual execution happens, the type of data returned by the iterator must be given
* explicitly in the form of the type information. This method is useful for cases where the
* type is generic. In that case, the type class (as given in {@link #fromCollection(Iterator,
* Class)} does not supply all type information.
*
* <p>Note that this operation will result in a non-parallel data source, i.e. a data source
* with a parallelism of one.
*
* @param data
* The collection of elements to create the data set from.
* @param type
* The TypeInformation for the produced data set.
* @return A DataSet representing the elements in the iterator.
* @see #fromCollection(Iterator, Class)
*/
public <X> DataSource<X> fromCollection(Iterator<X> data, TypeInformation<X> type) {
return new DataSource<>(this, new IteratorInputFormat<>(data), type, Utils.getCallLocationName());
}
| 3.26 |
flink_ExecutionEnvironment_setDefaultLocalParallelism_rdh
|
/**
* Sets the default parallelism that will be used for the local execution environment created by
* {@link #createLocalEnvironment()}.
*
* @param parallelism
* The parallelism to use as the default local parallelism.
*/
public static void setDefaultLocalParallelism(int parallelism) {
defaultLocalDop = parallelism;
}
| 3.26 |
flink_ExecutionEnvironment_createRemoteEnvironment_rdh
|
/**
* Creates a {@link RemoteEnvironment}. The remote environment sends (parts of) the program to a
* cluster for execution. Note that all file paths used in the program must be accessible from
* the cluster. The execution will use the specified parallelism.
*
* @param host
* The host name or address of the master (JobManager), where the program should be
* executed.
* @param port
* The port of the master (JobManager), where the program should be executed.
* @param parallelism
* The parallelism to use during the execution.
* @param jarFiles
* The JAR files with code that needs to be shipped to the cluster. If the
* program uses user-defined functions, user-defined input formats, or any libraries, those
* must be provided in the JAR files.
* @return A remote environment that executes the program on a cluster.
*/
public static ExecutionEnvironment createRemoteEnvironment(String host, int port, int parallelism, String... jarFiles) {
RemoteEnvironment rec = new RemoteEnvironment(host, port, jarFiles);
rec.setParallelism(parallelism);
return rec;
}
| 3.26 |
flink_ExecutionEnvironment_getRestartStrategy_rdh
|
/**
* Returns the specified restart strategy configuration.
*
* @return The restart strategy configuration to be used
*/
@PublicEvolving
public RestartStrategyConfiguration getRestartStrategy() {
return config.getRestartStrategy();
}
/**
* Sets the number of times that failed tasks are re-executed. A value of zero effectively
* disables fault tolerance. A value of {@code -1} indicates that the system default value (as
* defined in the configuration) should be used.
*
* @param numberOfExecutionRetries
* The number of times the system will try to re-execute failed
* tasks.
* @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link RestartStrategies.FixedDelayRestartStrategyConfiguration}
| 3.26 |
flink_ExecutionEnvironment_readFile_rdh
|
// ------------------------------------ File Input Format
// -----------------------------------------
public <X> DataSource<X> readFile(FileInputFormat<X> inputFormat, String filePath) {
if (inputFormat
== null) { throw new IllegalArgumentException("InputFormat must not be null.");
}
if
(filePath == null) {
throw new IllegalArgumentException("The file path must not be null."); }
inputFormat.setFilePath(new Path(filePath));try {
return createInput(inputFormat, TypeExtractor.getInputFormatTypes(inputFormat));
} catch (Exception e) {
throw new InvalidProgramException(("The type returned by the input format could not be automatically determined. " + "Please specify the TypeInformation of the produced type explicitly by using the ") + "'createInput(InputFormat, TypeInformation)' method instead.");
}
}
| 3.26 |
flink_ExecutionEnvironment_getDefaultLocalParallelism_rdh
|
// --------------------------------------------------------------------------------------------
// Default parallelism for local execution
// --------------------------------------------------------------------------------------------
/**
* Gets the default parallelism that will be used for the local execution environment created by
* {@link #createLocalEnvironment()}.
*
* @return The default local parallelism
*/
public static int getDefaultLocalParallelism() {
return defaultLocalDop;
}
| 3.26 |
flink_ExecutionEnvironment_resetContextEnvironment_rdh
|
/**
* Un-sets the context environment factory. After this method is called, the call to {@link #getExecutionEnvironment()} will again return a default local execution environment, and it
* is possible to explicitly instantiate the LocalEnvironment and the RemoteEnvironment.
*/
protected static void resetContextEnvironment() {
contextEnvironmentFactory = null;
f0.remove();
}
| 3.26 |
flink_ExecutionEnvironment_setParallelism_rdh
|
/**
* Sets the parallelism for operations executed through this environment. Setting a parallelism
* of x here will cause all operators (such as join, map, reduce) to run with x parallel
* instances.
*
* <p>This method overrides the default parallelism for this environment. The {@link LocalEnvironment} uses by default a value equal to the number of hardware contexts (CPU cores
* / threads). When executing the program via the command line client from a JAR file, the
* default parallelism is the one configured for that setup.
*
* @param parallelism
* The parallelism
*/
public void setParallelism(int parallelism) {config.setParallelism(parallelism);
}
| 3.26 |
flink_ExecutionEnvironment_configure_rdh
|
/**
* Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link PipelineOptions#CACHED_FILES}. It will reconfigure {@link ExecutionEnvironment} and {@link ExecutionConfig}.
*
* <p>It will change the value of a setting only if a corresponding option was set in the {@code configuration}. If a key is not present, the current value of a field will remain untouched.
*
* @param configuration
* a configuration to read the values from
* @param classLoader
* a class loader to use when loading classes
*/
@PublicEvolving
public void configure(ReadableConfig configuration, ClassLoader classLoader) {
configuration.getOptional(DeploymentOptions.JOB_LISTENERS).ifPresent(listeners -> registerCustomListeners(classLoader, listeners));
configuration.getOptional(PipelineOptions.CACHED_FILES).ifPresent(f -> {
this.cacheFile.clear();
this.cacheFile.addAll(DistributedCache.parseCachedFilesFromString(f));
});
configuration.getOptional(PipelineOptions.NAME).ifPresent(jobName -> this.getConfiguration().set(PipelineOptions.NAME, jobName));
config.configure(configuration, classLoader);
}
| 3.26 |
flink_ExecutionEnvironment_readTextFileWithValue_rdh
|
/**
* Creates a {@link DataSet} that represents the Strings produced by reading the given file line
* wise. This method is similar to {@link #readTextFile(String, String)}, but it produces a
* DataSet with mutable {@link StringValue} objects, rather than Java Strings. StringValues can
* be used to tune implementations to be less object and garbage collection heavy.
*
* <p>The {@link java.nio.charset.Charset} with the given name will be used to read the files.
*
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName
* The name of the character set used to read the file.
* @param skipInvalidLines
* A flag to indicate whether to skip lines that cannot be read with the
* given character set.
* @return A DataSet that represents the data read from the given file as text lines.
*/
public DataSource<StringValue> readTextFileWithValue(String filePath, String charsetName, boolean skipInvalidLines) {
Preconditions.checkNotNull(filePath, "The file path may not be null.");
TextValueInputFormat format = new TextValueInputFormat(new Path(filePath));
format.setCharsetName(charsetName);
format.setSkipInvalidLines(skipInvalidLines);
return new DataSource<>(this, format, new ValueTypeInfo<>(StringValue.class), Utils.getCallLocationName());
}
// ----------------------------------- Primitive Input Format
// ---------------------------------------
/**
* Creates a {@link DataSet} that represents the primitive type produced by reading the given
* file line wise. This method is similar to {@link #readCsvFile(String)} with single field, but
* it produces a DataSet not through {@link org.apache.flink.api.java.tuple.Tuple1}.
*
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param typeClass
* The primitive type class to be read.
* @return A {@link DataSet}
| 3.26 |
flink_ExecutionEnvironment_createCollectionsEnvironment_rdh
|
/**
* Creates a {@link CollectionEnvironment} that uses Java Collections underneath. This will
* execute in a single thread in the current JVM. It is very fast but will fail if the data does
* not fit into memory. parallelism will always be 1. This is useful during implementation and
* for debugging.
*
* @return A Collection Environment
*/
@PublicEvolving
public static CollectionEnvironment createCollectionsEnvironment() {
CollectionEnvironment v18 = new CollectionEnvironment();
v18.setParallelism(1);
return v18;
}
| 3.26 |
flink_ExecutionEnvironment_createProgramPlan_rdh
|
/**
* Creates the program's {@link Plan}. The plan is a description of all data sources, data
* sinks, and operations and how they interact, as an isolated unit that can be executed with an
* {@link PipelineExecutor}. Obtaining a plan and starting it with an executor is an alternative
* way to run a program and is only possible if the program consists only of distributed
* operations.
*
* @param jobName
* The name attached to the plan (displayed in logs and monitoring).
* @param clearSinks
* Whether or not to start a new stage of execution.
* @return The program's plan.
*/
@Internal
public Plan createProgramPlan(String jobName, boolean clearSinks) {
checkNotNull(jobName);
if (this.sinks.isEmpty()) {
if (wasExecuted) {
throw new RuntimeException(("No new data sinks have been defined since the " + "last execution. The last execution refers to the latest call to ") + "'execute()', 'count()', 'collect()', or 'print()'.");
} else {
throw new RuntimeException(("No data sinks have been created yet. " + "A program needs at least one sink that consumes data. ") + "Examples are writing the data set or printing it.");}
}
final PlanGenerator generator = new PlanGenerator(sinks, config, getParallelism(), f1, jobName);
final Plan plan = generator.generate();
// clear all the sinks such that the next execution does not redo everything
if (clearSinks) {
this.sinks.clear();
wasExecuted = true;
}return plan;
}
| 3.26 |
flink_ExecutionEnvironment_execute_rdh
|
/**
* Triggers the program execution. The environment will execute all parts of the program that
* have resulted in a "sink" operation. Sink operations are for example printing results ({@link DataSet#print()}, writing results (e.g. {@link DataSet#writeAsText(String)}, {@link DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other generic
* data sinks created with {@link DataSet#output(org.apache.flink.api.common.io.OutputFormat)}.
*
* <p>The program execution will be logged and displayed with the given job name.
*
* @return The result of the job execution, containing elapsed time and accumulators.
* @throws Exception
* Thrown, if the program executions fails.
*/
public JobExecutionResult execute(String jobName) throws Exception {
final JobClient jobClient =
executeAsync(jobName);
try {
if (configuration.getBoolean(DeploymentOptions.ATTACHED)) {
lastJobExecutionResult = jobClient.getJobExecutionResult().get();
} else {
lastJobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID());
}
jobListeners.forEach(jobListener -> jobListener.onJobExecuted(lastJobExecutionResult, null));
} catch (Throwable t) {
// get() on the JobExecutionResult Future will throw an ExecutionException. This
// behaviour was largely not there in Flink versions before the PipelineExecutor
// refactoring so we should strip that exception.
Throwable strippedException = ExceptionUtils.stripExecutionException(t);
jobListeners.forEach(jobListener -> {
jobListener.onJobExecuted(null, strippedException);
});
ExceptionUtils.rethrowException(strippedException);
}
return lastJobExecutionResult;
}
| 3.26 |
flink_ExecutionEnvironment_readCsvFile_rdh
|
// ----------------------------------- CSV Input Format ---------------------------------------
/**
* Creates a CSV reader to read a comma separated value (CSV) file. The reader has options to
* define parameters and field types and will eventually produce the DataSet that corresponds to
* the read and parsed CSV input.
*
* @param filePath
* The path of the CSV file.
* @return A CsvReader that can be used to configure the CSV input.
*/
public CsvReader readCsvFile(String filePath) {
return new CsvReader(filePath, this);
}
| 3.26 |
flink_ExecutionEnvironment_getLastJobExecutionResult_rdh
|
/**
* Returns the {@link org.apache.flink.api.common.JobExecutionResult} of the last executed job.
*
* @return The execution result from the latest job execution.
*/
public JobExecutionResult getLastJobExecutionResult() {
return this.lastJobExecutionResult;
}
| 3.26 |
flink_ExecutionEnvironment_initializeContextEnvironment_rdh
|
// --------------------------------------------------------------------------------------------
// Methods to control the context environment and creation of explicit environments other
// than the context environment
// --------------------------------------------------------------------------------------------
/**
* Sets a context environment factory, that creates the context environment for running programs
* with pre-configured environments. Examples are running programs from the command line.
*
* <p>When the context environment factory is set, no other environments can be explicitly used.
*
* @param ctx
* The context environment factory.
*/
protected static void initializeContextEnvironment(ExecutionEnvironmentFactory ctx) {
contextEnvironmentFactory =
Preconditions.checkNotNull(ctx);
f0.set(ctx);
}
| 3.26 |
flink_ExecutionEnvironment_getJobName_rdh
|
/**
* Gets the job name. If user defined job name is not found in the configuration, the default
* name based on the timestamp when this method is invoked will return.
*
* @return A job name.
*/
private String getJobName() {
return configuration.getString(PipelineOptions.NAME, "Flink Java Job at " + Calendar.getInstance().getTime());
}
// --------------------------------------------------------------------------------------------
// Instantiation of Execution Contexts
// --------------------------------------------------------------------------------------------
/**
* Creates an execution environment that represents the context in which the program is
* currently executed. If the program is invoked standalone, this method returns a local
* execution environment, as returned by {@link #createLocalEnvironment()}
| 3.26 |
flink_GSCommitRecoverable_getComponentBlobIds_rdh
|
/**
* Returns the list of component blob ids, which have to be resolved from the temporary bucket
* name, prefix, and component ids. Resolving them this way vs. storing the blob ids directly
* allows us to move in-progress blobs by changing options to point to new in-progress
* locations.
*
* @param options
* The GS file system options
* @return The list of component blob ids
*/
List<GSBlobIdentifier> getComponentBlobIds(GSFileSystemOptions options) {
String temporaryBucketName = BlobUtils.getTemporaryBucketName(finalBlobIdentifier, options);
List<GSBlobIdentifier> componentBlobIdentifiers = componentObjectIds.stream().map(temporaryObjectId -> BlobUtils.getTemporaryObjectName(finalBlobIdentifier, temporaryObjectId)).map(temporaryObjectName -> new GSBlobIdentifier(temporaryBucketName, temporaryObjectName)).collect(Collectors.toList());
f0.trace("Resolved component blob identifiers for blob {}: {}", finalBlobIdentifier, componentBlobIdentifiers);
return componentBlobIdentifiers;
}
| 3.26 |
flink_AvroParquetRecordFormat_createReader_rdh
|
/**
* Creates a new reader to read avro {@link GenericRecord} from Parquet input stream.
*
* <p>Several wrapper classes haven be created to Flink abstraction become compatible with the
* parquet abstraction. Please refer to the inner classes {@link AvroParquetRecordReader},
* {@link ParquetInputFile}, {@code FSDataInputStreamAdapter} for details.
*/
@Override
public Reader<E> createReader(Configuration config, FSDataInputStream stream, long
fileLen, long splitEnd) throws IOException {
// current version does not support splitting.
checkNotSplit(fileLen, splitEnd);
return new AvroParquetRecordReader<E>(AvroParquetReader.<E>builder(new ParquetInputFile(stream, fileLen)).withDataModel(getDataModel()).build());
}
| 3.26 |
flink_AvroParquetRecordFormat_getProducedType_rdh
|
/**
* Gets the type produced by this format. This type will be the type produced by the file source
* as a whole.
*/
@Override
public TypeInformation<E> getProducedType() {return type; }
| 3.26 |
flink_AvroParquetRecordFormat_restoreReader_rdh
|
/**
* Restores the reader from a checkpointed position. It is in fact identical since only {@link CheckpointedPosition#NO_OFFSET} as the {@code restoredOffset} is support.
*/
@Override
public Reader<E> restoreReader(Configuration config, FSDataInputStream stream, long restoredOffset, long fileLen, long splitEnd) throws IOException {
// current version does not support splitting.
checkNotSplit(fileLen, splitEnd);
checkArgument(restoredOffset == CheckpointedPosition.NO_OFFSET, "The restoredOffset should always be NO_OFFSET");
return createReader(config, stream, fileLen, splitEnd);
}
| 3.26 |
flink_AvroParquetRecordFormat_isSplittable_rdh
|
/**
* Current version does not support splitting.
*/
@Override
public boolean isSplittable() {
return false;
}
| 3.26 |
flink_Tuple4_of_rdh
|
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3> Tuple4<T0, T1, T2, T3> of(T0 f0, T1 f1, T2 f2, T3 f3) {
return new Tuple4<>(f0, f1, f2,
f3);
}
| 3.26 |
flink_Tuple4_equals_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple4)) {
return false;
}@SuppressWarnings("rawtypes")
Tuple4 tuple = ((Tuple4) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ?
!f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {return false;
}
return true;
}
| 3.26 |
flink_Tuple4_copy_rdh
|
/**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple4<T0,
T1, T2, T3> copy() {
return new Tuple4<>(this.f0, this.f1, this.f2, this.f3);
}
| 3.26 |
flink_Tuple4_toString_rdh
|
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3), where the
* individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3))
+ ")";
}
| 3.26 |
flink_Tuple4_setFields_rdh
|
/**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
}
| 3.26 |
flink_EvictingWindowSavepointReader_reduce_rdh
|
/**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid
* The uid of the operator.
* @param function
* The reduce function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param reduceType
* The type information of the reduce function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the reduce function.
* @param <OUT>
* The output type of the reduce function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataStream<OUT> reduce(String uid, ReduceFunction<T> function, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> reduceType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator =
WindowReaderOperator.evictingWindow(new ReduceEvictingWindowReaderFunction<>(readerFunction, function), keyType, windowSerializer, reduceType, env.getConfig());
return readWindowOperator(uid, outputType, operator);}
| 3.26 |
flink_EvictingWindowSavepointReader_process_rdh
|
/**
* Reads window state generated without any preaggregation such as {@code WindowedStream#apply}
* and {@code WindowedStream#process}.
*
* @param uid
* The uid of the operator.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param stateType
* The type of records stored in state.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the records stored in state.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If the savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataStream<OUT> process(String uid, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> stateType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator = WindowReaderOperator.evictingWindow(new ProcessEvictingWindowReader<>(readerFunction), keyType, windowSerializer, stateType, env.getConfig());
return readWindowOperator(uid, outputType, operator);
}
| 3.26 |
flink_EvictingWindowSavepointReader_aggregate_rdh
|
/**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid
* The uid of the operator.
* @param aggregateFunction
* The aggregate function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param inputType
* The type information of the accumulator function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the values that are aggregated.
* @param <ACC>
* The type of the accumulator (intermediate aggregate state).
* @param <R>
* The type of the aggregated result.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, ACC, R, OUT> DataStream<OUT> aggregate(String uid, AggregateFunction<T, ACC, R> aggregateFunction, WindowReaderFunction<R, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> inputType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator = WindowReaderOperator.evictingWindow(new AggregateEvictingWindowReaderFunction<>(readerFunction, aggregateFunction), keyType, windowSerializer, inputType, env.getConfig());
return readWindowOperator(uid, outputType, operator);
}
| 3.26 |
flink_SocketClientSink_open_rdh
|
// ------------------------------------------------------------------------
// Life cycle
// ------------------------------------------------------------------------
/**
* Initialize the connection with the Socket in the server.
*
* @param openContext
* the context.
*/
@Override
public void open(OpenContext openContext) throws Exception {
try {
synchronized(lock) {
createConnection();
}
} catch (IOException e) { throw new IOException((("Cannot connect to socket server at " + hostName) + ":") + port, e);
}
}
| 3.26 |
flink_SocketClientSink_getCurrentNumberOfRetries_rdh
|
// ------------------------------------------------------------------------
// For testing
// ------------------------------------------------------------------------
int getCurrentNumberOfRetries() {
synchronized(lock) {
return retries;
}
}
| 3.26 |
flink_SocketClientSink_invoke_rdh
|
/**
* Called when new data arrives to the sink, and forwards it to Socket.
*
* @param value
* The value to write to the socket.
*/
@Override
public void invoke(IN value) throws Exception {
byte[] msg = schema.serialize(value);
try {
outputStream.write(msg);
if (autoFlush) {
outputStream.flush();
}
} catch (IOException e) {
// if no re-tries are enable, fail immediately
if (maxNumRetries == 0) {
throw
new IOException(((((("Failed to send message '" + value) + "' to socket server at ") + hostName) + ":") + port)
+ ". Connection re-tries are not enabled.", e);
}
LOG.error(((((("Failed to send message '" + value) + "' to socket server at ") + hostName) + ":") + port) + ". Trying to reconnect...", e);
// do the retries in locked scope, to guard against concurrent close() calls
// note that the first re-try comes immediately, without a wait!
synchronized(lock) {
IOException lastException = null;
retries = 0;
while (isRunning && ((maxNumRetries < 0) || (retries < maxNumRetries))) {
// first, clean up the old resources
try {
if (outputStream != null) {outputStream.close();
}
} catch (IOException ee) {
LOG.error("Could not close output stream from failed write attempt", ee);
}
try {
if (client != null) {
client.close();
}
} catch (IOException ee) {
LOG.error("Could not close socket from failed write attempt", ee);
}
// try again
retries++;
try {
// initialize a new connection
createConnection();
// re-try the write
outputStream.write(msg);
// success!
return;
} catch (IOException ee) {
lastException = ee;
LOG.error("Re-connect to socket server and send message failed. Retry time(s): " + retries, ee);
}
// wait before re-attempting to connect
lock.wait(CONNECTION_RETRY_DELAY);}
// throw an exception if the task is still running, otherwise simply leave the
// method
if (isRunning) { throw new IOException(((((((("Failed to send message '" + value) + "' to socket server at ") + hostName) + ":") + port) + ". Failed after ") + retries) + " retries.", lastException);
}
}
}
}
| 3.26 |
flink_SocketClientSink_close_rdh
|
/**
* Closes the connection with the Socket server.
*/
@Overridepublic void close() throws Exception {
// flag this as not running any more
isRunning = false;
// clean up in locked scope, so there is no concurrent change to the stream and client
synchronized(lock) {
// we notify first (this statement cannot fail). The notified thread will not continue
// anyways before it can re-acquire the lock
lock.notifyAll();
try {
if (outputStream != null) {
outputStream.close();
}
} finally {
if (client != null) {
client.close();
}
}
}
}
| 3.26 |
flink_SocketClientSink_createConnection_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private void createConnection() throws IOException {
client =
new Socket(hostName, port);
client.setKeepAlive(true);
client.setTcpNoDelay(true);
outputStream = client.getOutputStream();
}
| 3.26 |
flink_JoinWithSolutionSetSecondDriver_initialize_rdh
|
// --------------------------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
public void initialize() throws Exception {
final TypeSerializer<IT2> solutionSetSerializer;
final TypeComparator<IT2> solutionSetComparator;
// grab a handle to the hash table from the iteration broker
if (taskContext instanceof AbstractIterativeTask) {
AbstractIterativeTask<?, ?> iterativeTaskContext = ((AbstractIterativeTask<?, ?>) (taskContext));
String identifier = iterativeTaskContext.brokerKey();
Object table = SolutionSetBroker.instance().get(identifier);
if (table instanceof CompactingHashTable) {
this.hashTable = ((CompactingHashTable<IT2>) (table));
solutionSetSerializer = this.hashTable.getBuildSideSerializer();solutionSetComparator = this.hashTable.getBuildSideComparator().duplicate();
} else if (table instanceof JoinHashMap) {
this.objectMap = ((JoinHashMap<IT2>) (table));
solutionSetSerializer = this.objectMap.getBuildSerializer();
solutionSetComparator = this.objectMap.getBuildComparator().duplicate();
} else {
throw new RuntimeException("Unrecognized solution set index: " + table);
}
} else {
throw new Exception("The task context of this driver is no iterative task context.");
}
TaskConfig config = taskContext.getTaskConfig();
ClassLoader classLoader = taskContext.getUserCodeClassLoader();
TypeSerializer<IT1> probeSideSerializer = taskContext.<IT1>getInputSerializer(0).getSerializer();
TypeComparatorFactory<IT1> probeSideComparatorFactory = config.getDriverComparator(0, classLoader);
this.probeSideComparator = probeSideComparatorFactory.createComparator();
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (objectReuseEnabled) {
solutionSideRecord = solutionSetSerializer.createInstance();
probeSideRecord = probeSideSerializer.createInstance();
}
TypePairComparatorFactory<IT1, IT2> factory = taskContext.getTaskConfig().getPairComparatorFactory(taskContext.getUserCodeClassLoader());
pairComparator = factory.createComparator12(this.probeSideComparator,
solutionSetComparator);
}
| 3.26 |
flink_JoinWithSolutionSetSecondDriver_setup_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void setup(TaskContext<FlatJoinFunction<IT1, IT2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
}
| 3.26 |
flink_InputGateDeploymentDescriptor_getConsumedPartitionType_rdh
|
/**
* Returns the type of this input channel's consumed result partition.
*
* @return consumed result partition type
*/
public ResultPartitionType getConsumedPartitionType() {
return f1;
}
| 3.26 |
flink_InputGateDeploymentDescriptor_getConsumedSubpartitionIndexRange_rdh
|
/**
* Return the index range of the consumed subpartitions.
*/
public IndexRange getConsumedSubpartitionIndexRange() {
return consumedSubpartitionIndexRange;
}
| 3.26 |
flink_DistCp_isLocal_rdh
|
// -----------------------------------------------------------------------------------------
// HELPER METHODS
// -----------------------------------------------------------------------------------------
private static boolean isLocal(final ExecutionEnvironment env) {
return env instanceof LocalEnvironment;
}
| 3.26 |
flink_AsyncDynamicTableSinkBuilder_setMaxBufferedRequests_rdh
|
/**
*
* @param maxBufferedRequests
* the maximum buffer length. Callbacks to add elements to the buffer
* and calls to write will block if this length has been reached and will only unblock if
* elements from the buffer have been removed for flushing.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBufferedRequests(int maxBufferedRequests) {
this.maxBufferedRequests = maxBufferedRequests;
return ((ConcreteBuilderT) (this));
}
| 3.26 |
flink_AsyncDynamicTableSinkBuilder_m0_rdh
|
/**
*
* @param maxInFlightRequests
* maximum number of uncompleted calls to submitRequestEntries that
* the SinkWriter will allow at any given point. Once this point has reached, writes and
* callbacks to add elements to the buffer may block until one or more requests to
* submitRequestEntries completes.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT m0(int maxInFlightRequests) {
this.maxInFlightRequests = maxInFlightRequests;
return ((ConcreteBuilderT) (this));
}
| 3.26 |
flink_AsyncDynamicTableSinkBuilder_setMaxBatchSize_rdh
|
/**
*
* @param maxBatchSize
* maximum number of elements that may be passed in a list to be written
* downstream.
* @return {@link ConcreteBuilderT} itself
*/public ConcreteBuilderT setMaxBatchSize(int maxBatchSize) {
this.maxBatchSize = maxBatchSize;
return ((ConcreteBuilderT) (this));
}
| 3.26 |
flink_AsyncDynamicTableSinkBuilder_setMaxTimeInBufferMS_rdh
|
/**
*
* @param maxTimeInBufferMS
* the maximum amount of time an element may remain in the buffer. In
* most cases elements are flushed as a result of the batch size (in bytes or number) being
* reached or during a snapshot. However, there are scenarios where an element may remain in
* the buffer forever or a long period of time. To mitigate this, a timer is constantly
* active in the buffer such that: while the buffer is not empty, it will flush every
* maxTimeInBufferMS milliseconds.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxTimeInBufferMS(long maxTimeInBufferMS) {
this.maxTimeInBufferMS = maxTimeInBufferMS;
return ((ConcreteBuilderT) (this));
}
| 3.26 |
flink_AsyncDynamicTableSinkBuilder_setMaxBufferSizeInBytes_rdh
|
/**
*
* @param maxBufferSizeInBytes
* a flush will be attempted if the most recent call to write
* introduces an element to the buffer such that the total size of the buffer is greater
* than or equal to this threshold value.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBufferSizeInBytes(long maxBufferSizeInBytes) {
this.maxBufferSizeInBytes = maxBufferSizeInBytes;
return ((ConcreteBuilderT) (this));
}
| 3.26 |
flink_MetricQueryService_createMetricQueryService_rdh
|
/**
* Starts the MetricQueryService actor in the given actor system.
*
* @param rpcService
* The rpcService running the MetricQueryService
* @param resourceID
* resource ID to disambiguate the actor name
* @return actor reference to the MetricQueryService
*/
public static MetricQueryService createMetricQueryService(RpcService rpcService, ResourceID resourceID, long maximumFrameSize) {
String endpointId = (resourceID == null) ? METRIC_QUERY_SERVICE_NAME : (METRIC_QUERY_SERVICE_NAME + "_") + resourceID.getResourceIdString();
return new MetricQueryService(rpcService, endpointId,
maximumFrameSize);
}
| 3.26 |
flink_MetricQueryService_replaceInvalidChars_rdh
|
/**
* Lightweight method to replace unsupported characters. If the string does not contain any
* unsupported characters, this method creates no new string (and in fact no new objects at
* all).
*
* <p>Replacements:
*
* <ul>
* <li>{@code space : . ,} are replaced by {@code _} (underscore)
* </ul>
*/
private static String replaceInvalidChars(String str) {
char[] chars = null;
final int strLen = str.length();
int pos = 0;
for (int i = 0; i < strLen; i++) {
final char c = str.charAt(i);
switch (c) {
case ' ' :
case '.' :
case ':' :
case ',' :
if (chars == null) {
chars = str.toCharArray();
}
chars[pos++] = '_';
break;
default :
if (chars != null) {
chars[pos] = c;
}
pos++;
}
}
return chars == null ? str : new String(chars, 0, pos);
}
| 3.26 |
flink_SimpleOperatorFactory_of_rdh
|
/**
* Create a SimpleOperatorFactory from existed StreamOperator.
*/
@SuppressWarnings("unchecked")
public static <OUT> SimpleOperatorFactory<OUT> of(StreamOperator<OUT> operator) {
if (operator == null) {
return null;
} else if ((operator instanceof StreamSource) && (((StreamSource) (operator)).getUserFunction() instanceof InputFormatSourceFunction)) {
return new SimpleInputFormatOperatorFactory<OUT>(((StreamSource) (operator)));
} else if ((operator instanceof UserFunctionProvider) && (((UserFunctionProvider<Function>) (operator)).getUserFunction() instanceof OutputFormatSinkFunction)) {
return new SimpleOutputFormatOperatorFactory<>(((OutputFormatSinkFunction<?>) (((UserFunctionProvider<Function>) (operator)).getUserFunction())).getFormat(), operator);
} else if (operator instanceof AbstractUdfStreamOperator) {
return new SimpleUdfStreamOperatorFactory<OUT>(((AbstractUdfStreamOperator) (operator)));
} else {
return new SimpleOperatorFactory<>(operator);
}
}
| 3.26 |
flink_WatermarkAssignerOperator_processWatermark_rdh
|
/**
* Override the base implementation to completely ignore watermarks propagated from upstream (we
* rely only on the {@link WatermarkGenerator} to emit watermarks from here).
*/
@Override
public void processWatermark(Watermark mark) throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if ((mark.getTimestamp() == Long.MAX_VALUE) && (currentWatermark != Long.MAX_VALUE)) {
if ((idleTimeout > 0) && currentStatus.equals(WatermarkStatus.IDLE)) {
// mark the channel active
emitWatermarkStatus(WatermarkStatus.ACTIVE);
}
currentWatermark = Long.MAX_VALUE;
output.emitWatermark(mark);
}
}
| 3.26 |
flink_StreamNonDeterministicPhysicalPlanResolver_resolvePhysicalPlan_rdh
|
/**
* Try to resolve the NDU problem if configured {@link OptimizerConfigOptions#TABLE_OPTIMIZER_NONDETERMINISTIC_UPDATE_STRATEGY} is in `TRY_RESOLVE`
* mode. Will raise an error if the NDU issues in the given plan can not be completely solved.
*/
public static List<RelNode> resolvePhysicalPlan(List<RelNode> expanded, TableConfig tableConfig) {
OptimizerConfigOptions.NonDeterministicUpdateStrategy handling = tableConfig.getConfiguration().get(OptimizerConfigOptions.TABLE_OPTIMIZER_NONDETERMINISTIC_UPDATE_STRATEGY);
if (handling == NonDeterministicUpdateStrategy.TRY_RESOLVE) { Preconditions.checkArgument(expanded.stream().allMatch(rel -> rel instanceof StreamPhysicalRel));
StreamNonDeterministicUpdatePlanVisitor planResolver = new StreamNonDeterministicUpdatePlanVisitor();
return expanded.stream().map(rel -> ((StreamPhysicalRel) (rel))).map(planResolver::visit).collect(Collectors.toList());
}
// do nothing, return original relNodes
return expanded;}
| 3.26 |
flink_ExecutionConfigAccessor_fromConfiguration_rdh
|
/**
* Creates an {@link ExecutionConfigAccessor} based on the provided {@link Configuration}.
*/
public static ExecutionConfigAccessor fromConfiguration(final Configuration configuration) {
return new ExecutionConfigAccessor(checkNotNull(configuration));
}
| 3.26 |
flink_ExecutionConfigAccessor_fromProgramOptions_rdh
|
/**
* Creates an {@link ExecutionConfigAccessor} based on the provided {@link ProgramOptions} as
* provided by the user through the CLI.
*/
public static <T> ExecutionConfigAccessor fromProgramOptions(final ProgramOptions options, final List<T> jobJars) {
checkNotNull(options);
checkNotNull(jobJars);
final Configuration configuration = new Configuration();
options.applyToConfiguration(configuration);
ConfigUtils.encodeCollectionToConfig(configuration, PipelineOptions.JARS, jobJars, Object::toString);
return new ExecutionConfigAccessor(configuration);
}
| 3.26 |
flink_TGetQueryIdReq_findByThriftIdOrThrow_rdh
|
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException(("Field " + fieldId) + " doesn't exist!");
return fields;
}
| 3.26 |
flink_TGetQueryIdReq_isSet_rdh
|
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case OPERATION_HANDLE :
return isSetOperationHandle();
}
throw new IllegalStateException();
}
| 3.26 |
flink_TGetQueryIdReq_findByName_rdh
|
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);}
| 3.26 |
flink_TGetQueryIdReq_findByThriftId_rdh
|
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1 :
// OPERATION_HANDLE
return OPERATION_HANDLE;
default :
return null;
}
}
| 3.26 |
flink_TGetQueryIdReq_isSetOperationHandle_rdh
|
/**
* Returns true if field operationHandle is set (has been assigned a value) and false otherwise
*/
public boolean isSetOperationHandle() {return this.operationHandle != null;
}
| 3.26 |
flink_RequestJobsWithIDsOverview_readResolve_rdh
|
/**
* Preserve the singleton property by returning the singleton instance
*/
private Object readResolve() {
return
INSTANCE;
}
| 3.26 |
flink_RequestJobsWithIDsOverview_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return RequestJobsWithIDsOverview.class.hashCode();
}
| 3.26 |
flink_OuterJoinPaddingUtil_padLeft_rdh
|
/**
* Returns a padding result with the given left row.
*
* @param leftRow
* the left row to pad
* @return the reusable null padding result
*/
public final RowData padLeft(RowData leftRow) {
return joinedRow.replace(leftRow, rightNullPaddingRow);
}
| 3.26 |
flink_OuterJoinPaddingUtil_m0_rdh
|
/**
* Returns a padding result with the given right row.
*
* @param rightRow
* the right row to pad
* @return the reusable null padding result
*/
public final RowData m0(RowData rightRow) {
return joinedRow.replace(leftNullPaddingRow,
rightRow);
}
| 3.26 |
flink_StopWithSavepointTerminationManager_stopWithSavepoint_rdh
|
/**
* Enforces the correct completion order of the passed {@code CompletableFuture} instances in
* accordance to the contract of {@link StopWithSavepointTerminationHandler}.
*
* @param completedSavepointFuture
* The {@code CompletableFuture} of the savepoint creation step.
* @param terminatedExecutionStatesFuture
* The {@code CompletableFuture} of the termination step.
* @param mainThreadExecutor
* The executor the {@code StopWithSavepointTerminationHandler}
* operations run on.
* @return A {@code CompletableFuture} containing the path to the created savepoint.
*/
public CompletableFuture<String> stopWithSavepoint(CompletableFuture<CompletedCheckpoint> completedSavepointFuture, CompletableFuture<Collection<ExecutionState>> terminatedExecutionStatesFuture, ComponentMainThreadExecutor mainThreadExecutor) {
FutureUtils.assertNoException(// the completedSavepointFuture could also be completed by
// CheckpointCanceller which doesn't run in the mainThreadExecutor
completedSavepointFuture.handleAsync((completedSavepoint, throwable) -> {
stopWithSavepointTerminationHandler.handleSavepointCreation(completedSavepoint, throwable);
return null;
}, mainThreadExecutor).thenRun(() -> // the execution termination has to run in a
// separate Runnable to disconnect it from any
// previous task failure handling
FutureUtils.assertNoException(terminatedExecutionStatesFuture.thenAcceptAsync(stopWithSavepointTerminationHandler::handleExecutionsTermination, mainThreadExecutor))));
return stopWithSavepointTerminationHandler.getSavepointPath();
}
| 3.26 |
flink_ArrowWriter_getFieldWriters_rdh
|
/**
* Gets the field writers.
*/
public ArrowFieldWriter<IN>[] getFieldWriters() {
return fieldWriters;
}
| 3.26 |
flink_ArrowWriter_write_rdh
|
/**
* Writes the specified row which is serialized into Arrow format.
*/
public void write(IN row) {
for (int i = 0; i < fieldWriters.length; i++) {
fieldWriters[i].write(row, i);
}
}
| 3.26 |
flink_ArrowWriter_finish_rdh
|
/**
* Finishes the writing of the current row batch.
*/
public void finish() {
root.setRowCount(fieldWriters[0].getCount());
for (ArrowFieldWriter<IN> fieldWriter : fieldWriters) {
fieldWriter.finish();
}
}
| 3.26 |
flink_ArrowWriter_reset_rdh
|
/**
* Resets the state of the writer to write the next batch of rows.
*/
public void reset() {
root.setRowCount(0);
for (ArrowFieldWriter fieldWriter : fieldWriters) {
fieldWriter.reset();
}
}
| 3.26 |
flink_S3RecoverableFsDataOutputStream_write_rdh
|
// ------------------------------------------------------------------------
// stream methods
// ------------------------------------------------------------------------
@Override
public void
write(int b) throws IOException {
f1.write(b);
}
| 3.26 |
flink_S3RecoverableFsDataOutputStream_persist_rdh
|
// ------------------------------------------------------------------------
// recoverable stream methods
// ------------------------------------------------------------------------
@Override
public ResumeRecoverable persist() throws IOException {lock();
try {
f1.flush();
openNewPartIfNecessary(f0);
// We do not stop writing to the current file, we merely limit the upload to the
// first n bytes of the current file
return upload.snapshotAndGetRecoverable(f1);
} finally {
unlock();
}
}
| 3.26 |
flink_S3RecoverableFsDataOutputStream_openNewPartIfNecessary_rdh
|
// ------------------------------------------------------------------------
private void openNewPartIfNecessary(long
sizeThreshold) throws IOException {
final long fileLength = f1.getPos();if (fileLength >= sizeThreshold) { lock();
try {
uploadCurrentAndOpenNewPart(fileLength);
} finally {
unlock();
}
}
}
| 3.26 |
flink_S3RecoverableFsDataOutputStream_lock_rdh
|
// ------------------------------------------------------------------------
// locking
// ------------------------------------------------------------------------
private void lock() throws IOException {
try {
lock.lockInterruptibly();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("interrupted");
}
}
| 3.26 |
flink_S3RecoverableFsDataOutputStream_newStream_rdh
|
// ------------------------------------------------------------------------
// factory methods
// ------------------------------------------------------------------------
public static S3RecoverableFsDataOutputStream newStream(final RecoverableMultiPartUpload upload, final FunctionWithException<File, RefCountedFileWithStream, IOException> tmpFileCreator, final long userDefinedMinPartSize) throws IOException {
checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);
final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(tmpFileCreator, Optional.empty());
return new S3RecoverableFsDataOutputStream(upload, tmpFileCreator, fileStream, userDefinedMinPartSize, 0L);
}
| 3.26 |
flink_SupportsReadingMetadata_supportsMetadataProjection_rdh
|
/**
* Defines whether projections can be applied to metadata columns.
*
* <p>This method is only called if the source does <em>not</em> implement {@link SupportsProjectionPushDown}. By default, the planner will only apply metadata columns which
* have actually been selected in the query regardless. By returning {@code false} instead the
* source can inform the planner to apply all metadata columns defined in the table's schema.
*
* <p>If the source implements {@link SupportsProjectionPushDown}, projections of metadata
* columns are always considered before calling {@link #applyReadableMetadata(List, DataType)}.
*/
default boolean supportsMetadataProjection() {
return true;
}
| 3.26 |
flink_BytesKeyNormalizationUtil_putNormalizedKey_rdh
|
/**
* Writes the normalized key of given record. The normalized key consists of the key serialized
* as bytes and the timestamp of the record.
*
* <p>NOTE: The key does not represent a logical order. It can be used only for grouping keys!
*/
static <IN> void putNormalizedKey(Tuple2<byte[], StreamRecord<IN>>
record, int dataLength, MemorySegment target, int offset, int numBytes) {
byte[] data = record.f0;
if (dataLength >= numBytes) {
putBytesArray(target, offset, numBytes,
data);
} else {
// whole key fits into the normalized key
putBytesArray(target, offset, dataLength, data);
int v1 = offset + numBytes;
offset += dataLength;
long valueOfTimestamp = record.f1.asRecord().getTimestamp() - Long.MIN_VALUE;
if ((dataLength + TIMESTAMP_BYTE_SIZE) <= numBytes) {
// whole timestamp fits into the normalized key
target.putLong(offset, valueOfTimestamp);
offset += TIMESTAMP_BYTE_SIZE;
// fill in the remaining space with zeros
while (offset < v1) {
target.put(offset++, ((byte)
(0)));
}
} else {
// only part of the timestamp fits into normalized key
for (int i = 0; offset < v1; offset++ , i++) {
target.put(offset, ((byte) (valueOfTimestamp >>> ((7 - i) << 3))));
}
}
}
}
| 3.26 |
flink_CombineValueIterator_set_rdh
|
/**
* Sets the interval for the values that are to be returned by this iterator.
*
* @param first
* The position of the first value to be returned.
* @param last
* The position of the last value to be returned.
*/
public void set(int first, int last) {
this.last = last;
this.position =
first;
this.iteratorAvailable = true;
}
| 3.26 |
flink_InputSelection_from_rdh
|
/**
* Returns a {@code Builder} that uses the input mask of the specified {@code selection} as
* the initial mask.
*/
public static Builder from(InputSelection selection) {
Builder builder = new Builder();
builder.inputMask = selection.inputMask;
return builder;
}
| 3.26 |
flink_InputSelection_areAllInputsSelected_rdh
|
/**
* Tests if all inputs are selected.
*
* @return {@code true} if the input mask equals -1, {@code false} otherwise.
*/
public boolean areAllInputsSelected() {
return inputMask == (-1L);
}
/**
* Fairly select one of the two inputs for reading. When {@code inputMask} includes two inputs
* and both inputs are available, alternately select one of them. Otherwise, select the
* available one of {@code inputMask}, or return {@link InputSelection#NONE_AVAILABLE} to
* indicate no input is selected.
*
* <p>Note that this supports only two inputs for performance reasons.
*
* @param availableInputsMask
* The mask of all available inputs.
* @param lastReadInputIndex
* The index of last read input.
* @return the index of the input for reading or {@link InputSelection#NONE_AVAILABLE} (if
{@code inputMask} is empty or the inputs in {@code inputMask}
| 3.26 |
flink_InputSelection_select_rdh
|
/**
* Selects an input identified by the given {@code inputId}.
*
* @param inputId
* the input id numbered starting from 1 to 64, and `1` indicates the first
* input. Specially, `-1` indicates all inputs.
* @return a reference to this object.
*/
public Builder select(int inputId) {
if ((inputId > 0) && (inputId <= 64)) {
inputMask |= 1L << (inputId - 1);
} else if (inputId == (-1L)) {
inputMask = -1L;
} else {
throw new IllegalArgumentException("The inputId must be in the range of 1 to 64, or be -1.");
}
return this;
}
| 3.26 |
flink_InputSelection_build_rdh
|
/**
* Build normalized mask, if all inputs were manually selected, inputMask will be normalized
* to -1.
*/
public InputSelection build(int inputCount) {
long allSelectedMask = (1L << inputCount) - 1;
if (inputMask == allSelectedMask) {
inputMask = -1;
} else if (inputMask > allSelectedMask) {throw new IllegalArgumentException(String.format("inputMask [%d] selects more than expected number of inputs [%d]", inputMask,
inputCount));
}
return build();
}
| 3.26 |
flink_InputSelection_isInputSelected_rdh
|
/**
* Tests if the input specified by {@code inputId} is selected.
*
* @param inputId
* The input id, see the description of {@code inputId} in {@link Builder#select(int)}.
* @return {@code true} if the input is selected, {@code false} otherwise.
*/
public boolean isInputSelected(int inputId) {
return
(inputMask & (1L << (inputId - 1))) != 0;
}
| 3.26 |
flink_InMemoryPartition_appendRecord_rdh
|
// --------------------------------------------------------------------------------------------------
/**
* Inserts the given object into the current buffer. This method returns a pointer that can be
* used to address the written record in this partition.
*
* @param record
* The object to be written to the partition.
* @return A pointer to the object in the partition.
* @throws IOException
* Thrown when the write failed.
*/
public final long appendRecord(T record) throws IOException {
long pointer = this.writeView.m1();
try {
this.serializer.serialize(record, this.writeView);
this.recordCounter++;
return pointer;} catch (EOFException e) {
// we ran out of pages.
// first, reset the pages and then we need to trigger a compaction
// int oldCurrentBuffer =
this.writeView.resetTo(pointer);
// for (int bufNum = this.partitionPages.size() - 1; bufNum > oldCurrentBuffer;
// bufNum--) {
// this.availableMemory.addMemorySegment(this.partitionPages.remove(bufNum));
// }
throw e;
}
}
| 3.26 |
flink_InMemoryPartition_resetOverflowBuckets_rdh
|
/**
* resets overflow bucket counters and returns freed memory and should only be used for resizing
*
* @return freed memory segments
*/
public ArrayList<MemorySegment> resetOverflowBuckets() {
this.numOverflowSegments
= 0;
this.nextOverflowBucket = 0;
ArrayList<MemorySegment> result = new ArrayList<MemorySegment>(this.overflowSegments.length);
for (int i = 0; i < this.overflowSegments.length; i++) {
if (this.overflowSegments[i] != null) {
result.add(this.overflowSegments[i]);
}
}
this.overflowSegments = new MemorySegment[2];
return result;}
| 3.26 |
flink_InMemoryPartition_getBlockCount_rdh
|
/**
*
* @return number of segments owned by partition
*/
public int getBlockCount() {
return this.partitionPages.size();
}
| 3.26 |
flink_InMemoryPartition_getPartitionNumber_rdh
|
// --------------------------------------------------------------------------------------------------
/**
* Gets the partition number of this partition.
*
* @return This partition's number.
*/
public int getPartitionNumber() {
return this.partitionNumber;
}
| 3.26 |
flink_InMemoryPartition_setPartitionNumber_rdh
|
/**
* overwrites partition number and should only be used on compaction partition
*
* @param number
* new partition
*/
public void setPartitionNumber(int number) {
this.partitionNumber = number;
}
| 3.26 |
flink_InMemoryPartition_allocateSegments_rdh
|
/**
* attempts to allocate specified number of segments and should only be used by compaction
* partition fails silently if not enough segments are available since next compaction could
* still succeed
*
* @param numberOfSegments
* allocation count
*/
public void allocateSegments(int numberOfSegments) {
while (getBlockCount() < numberOfSegments) {
MemorySegment next = this.availableMemory.nextSegment();
if
(next != null) {
this.partitionPages.add(next);
} else {
return;
}
}
}
| 3.26 |
flink_InMemoryPartition_setIsCompacted_rdh
|
/**
* sets compaction status (should only be set <code>true</code> directly after compaction and
* <code>false</code> when garbage was created)
*
* @param compacted
* compaction status
*/
public void setIsCompacted(boolean compacted) {
this.compacted = compacted;
}
| 3.26 |
flink_InMemoryPartition_isCompacted_rdh
|
/**
*
* @return true if garbage exists in partition
*/
public boolean isCompacted() {
return
this.compacted;
}
| 3.26 |
flink_InMemoryPartition_m0_rdh
|
/**
* number of records in partition including garbage
*
* @return number record count
*/
public long m0() {return this.recordCounter;
}
| 3.26 |
flink_InMemoryPartition_resetRWViews_rdh
|
/**
* resets read and write views and should only be used on compaction partition
*/
public void resetRWViews()
{this.writeView.resetTo(0L);
this.readView.setReadPosition(0L);}
| 3.26 |
flink_InMemoryPartition_resetRecordCounter_rdh
|
/**
* sets record counter to zero and should only be used on compaction partition
*/
public void resetRecordCounter() {
this.recordCounter = 0L;
}
| 3.26 |
flink_InMemoryPartition_clearAllMemory_rdh
|
/**
* releases all of the partition's segments (pages and overflow buckets)
*
* @param target
* memory pool to release segments to
*/
public void clearAllMemory(List<MemorySegment> target) {
// return the overflow segments
if (this.overflowSegments != null) {
for (int k = 0; k < this.numOverflowSegments; k++) {
target.add(this.overflowSegments[k]);}
}
// return the partition buffers
target.addAll(this.partitionPages);
this.partitionPages.clear();
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.