name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Transformation_getBufferTimeout_rdh
|
/**
* Returns the buffer timeout of this {@code Transformation}.
*
* @see #setBufferTimeout(long)
*/
public long getBufferTimeout() {
return bufferTimeout;
}
| 3.26 |
flink_Transformation_getOutputType_rdh
|
/**
* Returns the output type of this {@code Transformation} as a {@link TypeInformation}. Once
* this is used once the output type cannot be changed anymore using {@link #setOutputType}.
*
* @return The output type of this {@code Transformation}
*/
public TypeInformation<T> getOutputType() {
if (outputType instanceof MissingTypeInfo) {
MissingTypeInfo typeInfo = ((MissingTypeInfo) (this.outputType));
throw new InvalidTypesException(((((("The return type of function '" + typeInfo.getFunctionName()) + "' could not be determined automatically, due to type erasure. ") + "You can give type information hints by using the returns(...) ") + "method on the result of the transformation call, or by letting ") + "your function implement the 'ResultTypeQueryable' ") + "interface.", typeInfo.getTypeException());
}
typeUsed = true;
return this.outputType;
}
| 3.26 |
flink_Transformation_getManagedMemoryOperatorScopeUseCaseWeights_rdh
|
/**
* Get operator scope use cases that this transformation needs managed memory for, and the
* use-case-specific weights for this transformation. The weights are used for sharing managed
* memory across transformations for the use cases. Check the individual {@link ManagedMemoryUseCase} for the specific weight definition.
*/
public Map<ManagedMemoryUseCase, Integer> getManagedMemoryOperatorScopeUseCaseWeights() {
return Collections.unmodifiableMap(managedMemoryOperatorScopeUseCaseWeights);
}
| 3.26 |
flink_Transformation_getUserProvidedNodeHash_rdh
|
/**
* Gets the user provided hash.
*
* @return The user provided hash.
*/
public String getUserProvidedNodeHash() {
return userProvidedNodeHash;
}
| 3.26 |
flink_Transformation_setDescription_rdh
|
/**
* Changes the description of this {@code Transformation}.
*/
public void setDescription(String description) {
this.description = Preconditions.checkNotNull(description);
}
| 3.26 |
flink_Transformation_setUid_rdh
|
/**
* Sets an ID for this {@link Transformation}. This is will later be hashed to a uidHash which
* is then used to create the JobVertexID (that is shown in logs and the web ui).
*
* <p>The specified ID is used to assign the same operator ID across job submissions (for
* example when starting a job from a savepoint).
*
* <p><strong>Important</strong>: this ID needs to be unique per transformation and job.
* Otherwise, job submission will fail.
*
* @param uid
* The unique user-specified ID of this transformation.
*/
public void setUid(String uid) {
this.uid = uid;
}
| 3.26 |
flink_Transformation_setParallelism_rdh
|
/**
* Sets the parallelism of this {@code Transformation}.
*
* @param parallelism
* The new parallelism to set on this {@code Transformation}.
*/
public void setParallelism(int parallelism) {
setParallelism(parallelism, true);
}
| 3.26 |
flink_Transformation_setMaxParallelism_rdh
|
/**
* Sets the maximum parallelism for this stream transformation.
*
* @param maxParallelism
* Maximum parallelism for this stream transformation.
*/
public void setMaxParallelism(int maxParallelism) {
OperatorValidationUtils.validateMaxParallelism(maxParallelism, UPPER_BOUND_MAX_PARALLELISM);
this.maxParallelism = maxParallelism;
}
| 3.26 |
flink_Transformation_declareManagedMemoryUseCaseAtOperatorScope_rdh
|
/**
* Declares that this transformation contains certain operator scope managed memory use case.
*
* @param managedMemoryUseCase
* The use case that this transformation declares needing managed
* memory for.
* @param weight
* Use-case-specific weights for this transformation. Used for sharing managed
* memory across transformations for OPERATOR scope use cases. Check the individual {@link ManagedMemoryUseCase} for the specific weight definition.
* @return The previous weight, if exist.
*/
public Optional<Integer> declareManagedMemoryUseCaseAtOperatorScope(ManagedMemoryUseCase managedMemoryUseCase, int weight) {
Preconditions.checkNotNull(managedMemoryUseCase);
Preconditions.checkArgument(managedMemoryUseCase.scope == Scope.OPERATOR, "Use case is not operator scope.");
Preconditions.checkArgument(weight > 0, "Weights for operator scope use cases must be greater than 0.");
return Optional.ofNullable(managedMemoryOperatorScopeUseCaseWeights.put(managedMemoryUseCase, weight));
}
| 3.26 |
flink_Transformation_setSlotSharingGroup_rdh
|
/**
* Sets the slot sharing group of this transformation. Parallel instances of operations that are
* in the same slot sharing group will be co-located in the same TaskManager slot, if possible.
*
* <p>Initially, an operation is in the default slot sharing group. This can be explicitly set
* with constructing a {@link SlotSharingGroup} with name {@code "default"}.
*
* @param slotSharingGroup
* which contains name and its resource spec.
*/
public void setSlotSharingGroup(SlotSharingGroup slotSharingGroup) {
this.slotSharingGroup = Optional.of(slotSharingGroup);
}
| 3.26 |
flink_Transformation_getSlotSharingGroup_rdh
|
/**
* Returns the slot sharing group of this transformation if present.
*
* @see #setSlotSharingGroup(SlotSharingGroup)
*/public Optional<SlotSharingGroup> getSlotSharingGroup() {
return f1;
}
| 3.26 |
flink_Transformation_setBufferTimeout_rdh
|
/**
* Set the buffer timeout of this {@code Transformation}. The timeout defines how long data may
* linger in a partially full buffer before being sent over the network.
*
* <p>Lower timeouts lead to lower tail latencies, but may affect throughput. For Flink 1.5+,
* timeouts of 1ms are feasible for jobs with high parallelism.
*
* <p>A value of -1 means that the default buffer timeout should be used. A value of zero
* indicates that no buffering should happen, and all records/events should be immediately sent
* through the network, without additional buffering.
*/
public void setBufferTimeout(long bufferTimeout) {
checkArgument(bufferTimeout >= (-1));
this.bufferTimeout = bufferTimeout;
}
| 3.26 |
flink_Transformation_getParallelism_rdh
|
/**
* Returns the parallelism of this {@code Transformation}.
*/
public int getParallelism() {
return parallelism;
}
| 3.26 |
flink_Transformation_getDescription_rdh
|
/**
* Returns the description of this {@code Transformation}.
*/
public String getDescription() { return description;
}
| 3.26 |
flink_Transformation_setOutputType_rdh
|
/**
* Tries to fill in the type information. Type information can be filled in later when the
* program uses a type hint. This method checks whether the type information has ever been
* accessed before and does not allow modifications if the type was accessed already. This
* ensures consistency by making sure different parts of the operation do not assume different
* type information.
*
* @param outputType
* The type information to fill in.
* @throws IllegalStateException
* Thrown, if the type information has been accessed before.
*/
public void setOutputType(TypeInformation<T> outputType) {
if (typeUsed) {
throw new IllegalStateException((("TypeInformation cannot be filled in for the type after it has been used. " + "Please make sure that the type info hints are the first call after") + " the transformation function, ") +
"before any access to types or semantic properties, etc.");
}
this.outputType = outputType;
}
| 3.26 |
flink_Transformation_setResources_rdh
|
/**
* Sets the minimum and preferred resources for this stream transformation.
*
* @param minResources
* The minimum resource of this transformation.
* @param preferredResources
* The preferred resource of this transformation.
*/
public void setResources(ResourceSpec minResources, ResourceSpec preferredResources) {
OperatorValidationUtils.validateMinAndPreferredResources(minResources, preferredResources);
this.minResources = checkNotNull(minResources);
this.preferredResources = checkNotNull(preferredResources);
}
| 3.26 |
flink_MultiShotLatch_await_rdh
|
/**
* Waits until {@link #trigger()} is called.
*/
public void await() throws InterruptedException {
synchronized(lock) {
while (!triggered) {
lock.wait();
}
triggered = false;
}
}
| 3.26 |
flink_MultiShotLatch_trigger_rdh
|
/**
* Fires the latch. Code that is blocked on {@link #await()} will now return.
*/
public void trigger() {
synchronized(lock) {
triggered = true;
lock.notifyAll();
}
}
| 3.26 |
flink_ColumnSummary_getTotalCount_rdh
|
/**
* The number of all rows in this column including both nulls and non-nulls.
*/
public long getTotalCount() {
return getNullCount() + getNonNullCount();
}
| 3.26 |
flink_ColumnSummary_containsNonNull_rdh
|
/**
* True if this column contains any non-null values.
*/
public boolean containsNonNull() {
return getNonNullCount() > 0L;
}
| 3.26 |
flink_FileSourceSplit_getReaderPosition_rdh
|
/**
* Gets the (checkpointed) position of the reader, if set. This value is typically absent for
* splits when assigned from the enumerator to the readers, and present when the splits are
* recovered from a checkpoint.
*/
public Optional<CheckpointedPosition> getReaderPosition() {
return Optional.ofNullable(f1);
}
| 3.26 |
flink_FileSourceSplit_length_rdh
|
/**
* Returns the number of bytes in the file region described by this source split.
*/
public long length() {
return length;
}
| 3.26 |
flink_FileSourceSplit_fileSize_rdh
|
/**
* Returns the full file size in bytes, from {@link FileStatus#getLen()}.
*/
public long fileSize() {
return fileSize;
}
| 3.26 |
flink_FileSourceSplit_fileModificationTime_rdh
|
/**
* Returns the modification time of the file, from {@link FileStatus#getModificationTime()}.
*/
public long fileModificationTime() {
return fileModificationTime;
}
| 3.26 |
flink_FileSourceSplit_updateWithCheckpointedPosition_rdh
|
/**
* Creates a copy of this split where the checkpointed position is replaced by the given new
* position.
*
* <p><b>IMPORTANT:</b> Subclasses that add additional information to the split must override
* this method to return that subclass type. This contract is enforced by checks in the file
* source implementation. We did not try to enforce this contract via generics in this split
* class, because it leads to very ugly and verbose use of generics.
*/public FileSourceSplit updateWithCheckpointedPosition(@Nullable
CheckpointedPosition position) {
return new FileSourceSplit(id, filePath, f0, length, fileModificationTime, fileSize, hostnames, position);
}
| 3.26 |
flink_FileSourceSplit_splitId_rdh
|
// ------------------------------------------------------------------------
// split properties
// ------------------------------------------------------------------------
@Override
public String splitId() {
return id;
}
| 3.26 |
flink_FileSourceSplit_m0_rdh
|
// ------------------------------------------------------------------------
// utils
// ------------------------------------------------------------------------
@Override
public String m0() { final String hosts = (hostnames.length == 0) ? "(no host info)" : " hosts=" + Arrays.toString(hostnames);
return String.format("FileSourceSplit: %s [%d, %d) %s ID=%s position=%s", filePath, f0, f0 + length, hosts, id, f1);
}
| 3.26 |
flink_FileSourceSplit_offset_rdh
|
/**
* Returns the start of the file region referenced by this source split. The position is
* inclusive, the value indicates the first byte that is part of the split.
*/
public long offset() {
return f0;
}
| 3.26 |
flink_FlinkContainersSettings_zookeeperHostname_rdh
|
/**
* Sets the {@code zookeeperHostname} and returns a reference to this Builder enabling
* method chaining.
*
* @param zookeeperHostname
* The Zookeeper hostname.
* @return A reference to this Builder.
*/
public Builder zookeeperHostname(String zookeeperHostname) {
this.zookeeperHostname = zookeeperHostname;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_defaultConfig_rdh
|
/**
* {@code FlinkContainersConfig} based on defaults.
*
* @return The Flink containers config.
*/
public static FlinkContainersSettings defaultConfig() {return builder().build();}
| 3.26 |
flink_FlinkContainersSettings_jobManagerHostname_rdh
|
/**
* Sets the job manager hostname and returns a reference to this Builder enabling method
* chaining.
*
* @param jobManagerHostname
* The job manager hostname to set.
* @return A reference to this Builder.
*/
public Builder jobManagerHostname(String jobManagerHostname) {
return setConfigOption(JobManagerOptions.ADDRESS, jobManagerHostname);
}
| 3.26 |
flink_FlinkContainersSettings_checkpointPath_rdh
|
/**
* Sets the {@code checkpointPath} and returns a reference to this Builder enabling method
* chaining.
*
* @param checkpointPath
* The checkpoint path to set.
* @return A reference to this Builder.
*/
public Builder
checkpointPath(String checkpointPath) {
this.checkpointPath = checkpointPath;
return setConfigOption(CheckpointingOptions.CHECKPOINTS_DIRECTORY, toUri(checkpointPath));
}
| 3.26 |
flink_FlinkContainersSettings_enableZookeeperHA_rdh
|
/**
* Enables Zookeeper HA. NOTE: this option uses default HA configuration. If you want to use
* non-default configuration, you should provide all settings, including the HA_MODE
* directly via the {@code basedOn()} method instead.
*
* @return A reference to this Builder.
*/
public Builder enableZookeeperHA() {
zookeeperHA =
true;
setConfigOption(HighAvailabilityOptions.HA_MODE, "zookeeper");
setConfigOption(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, f1);
setConfigOption(HighAvailabilityOptions.HA_CLUSTER_ID, "flink-container-" + UUID.randomUUID());
setConfigOption(HighAvailabilityOptions.HA_STORAGE_PATH, toUri(DEFAULT_HA_STORAGE_PATH));
return this;
}
| 3.26 |
flink_FlinkContainersSettings_getDefaultCheckpointPath_rdh
|
/**
* Gets default checkpoint path.
*
* @return The default checkpoint path.
*/
public static String getDefaultCheckpointPath() {
return DEFAULT_CHECKPOINT_PATH;
}
| 3.26 |
flink_FlinkContainersSettings_getDefaultFlinkHome_rdh
|
/**
* Gets default flink home.
*
* @return The default flink home path.
*/
public static String getDefaultFlinkHome() {
return DEFAULT_FLINK_HOME;
}
| 3.26 |
flink_FlinkContainersSettings_jarPaths_rdh
|
/**
* Sets the {@code jarPaths} and returns a reference to this Builder enabling method
* chaining.
*
* @param jarPaths
* The {@code jarPaths} to set.
* @return A reference to this Builder.
*/
public Builder jarPaths(Collection<String> jarPaths) {
this.jarPaths = jarPaths;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_getNumSlotsPerTaskManager_rdh
|
/**
* Gets number slots per task manager.
*
* @return The number slots per task manager.
*/
public int getNumSlotsPerTaskManager() {
return numSlotsPerTaskManager;
}
| 3.26 |
flink_FlinkContainersSettings_numTaskManagers_rdh
|
/**
* Sets the {@code numTaskManagers} and returns a reference to this Builder enabling method
* chaining.
*
* @param numTaskManagers
* The {@code numTaskManagers} to set.
* @return A reference to this Builder.
*/
public Builder numTaskManagers(int numTaskManagers) {this.numTaskManagers = numTaskManagers;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_build_rdh
|
/**
* Returns a {@code FlinkContainersConfig} built from the parameters previously set.
*
* @return A {@code FlinkContainersConfig} built with parameters of this {@code FlinkContainersConfig.Builder}.
*/
public FlinkContainersSettings build() {
return new FlinkContainersSettings(this);
}
| 3.26 |
flink_FlinkContainersSettings_isBuildFromFlinkDist_rdh
|
/**
* Returns whether to build from flink-dist or from an existing base container. Also see the
* {@code baseImage} property.
*/
public Boolean isBuildFromFlinkDist() {
return buildFromFlinkDist;}
| 3.26 |
flink_FlinkContainersSettings_getHaStoragePath_rdh
|
/**
* Gets HA storage path.
*
* @return The ha storage path.
*/
public String getHaStoragePath() {
return
haStoragePath;
}
| 3.26 |
flink_FlinkContainersSettings_setConfigOption_rdh
|
/**
* Sets a single Flink configuration parameter (the options for flink-conf.yaml) and returns
* a reference to this Builder enabling method chaining.
*
* @param <T>
* The type parameter.
* @param option
* The option.
* @param value
* The value.
* @return A reference to this Builder.
*/
public <T> Builder setConfigOption(ConfigOption<T> option, T value) {
this.flinkConfiguration.set(option, value);
return this;}
| 3.26 |
flink_FlinkContainersSettings_m0_rdh
|
/**
* Gets jar paths.
*
* @return The jar paths.
*/public Collection<String> m0() {
return jarPaths;
}
| 3.26 |
flink_FlinkContainersSettings_fullConfiguration_rdh
|
/**
* Sets the {@code flinkConfiguration} value to {@code config} and returns a reference to
* this Builder enabling method chaining.
*
* @param <T>
* the type parameter
* @param config
* The {@code config} to set.
* @return A reference to this Builder.
*/
public <T> Builder fullConfiguration(Configuration config) {
this.flinkConfiguration = config;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_flinkDistLocation_rdh
|
/**
* Sets the {@code flinkDistLocation} and returns a reference to this Builder enabling
* method chaining.
*
* @param flinkDistLocation
* The {@code flinkDistLocation} to set.
* @return A reference to this Builder.
*/
public Builder flinkDistLocation(String flinkDistLocation) {
this.flinkDistLocation = flinkDistLocation;
this.buildFromFlinkDist = true;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_flinkHome_rdh
|
/**
* Sets the path of the Flink distribution inside the container. Returns a reference to this
* Builder enabling method chaining.
*
* @param flinkHome
* The {@code flinkHome} to set.
* @return A reference to this Builder.
*/
public Builder flinkHome(String
flinkHome) {
this.flinkHome = flinkHome;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_numSlotsPerTaskManager_rdh
|
/**
* Sets the {@code numSlotsPerTaskManager} and returns a reference to this Builder enabling
* method chaining. It also adds this property into the {@code flinkConfiguration} field.
*
* @param numSlotsPerTaskManager
* The {@code numSlotsPerTaskManager} to set.
* @return A reference to this Builder.
*/public Builder numSlotsPerTaskManager(int numSlotsPerTaskManager) {
this.numSlotsPerTaskManager = numSlotsPerTaskManager;
return setConfigOption(TaskManagerOptions.NUM_TASK_SLOTS, numSlotsPerTaskManager);
}
/**
* Sets the {@code jarPaths} and returns a reference to this Builder enabling method
* chaining.
*
* @param jarPaths
* The {@code jarPaths}
| 3.26 |
flink_FlinkContainersSettings_taskManagerHostnamePrefix_rdh
|
/**
* Sets the {@code taskManagerHostnamePrefix} and returns a reference to this Builder
* enabling method chaining.
*
* @param taskManagerHostnamePrefix
* The {@code taskManagerHostnamePrefix} to set.
* @return A reference to this Builder.
*/
public Builder taskManagerHostnamePrefix(String taskManagerHostnamePrefix) {
this.taskManagerHostnamePrefix = taskManagerHostnamePrefix;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_haStoragePath_rdh
|
/**
* Sets the {@code haStoragePath} and returns a reference to this Builder enabling method
* chaining.
*
* @param haStoragePath
* The path for storing HA data.
* @return A reference to this Builder.
*/
public Builder haStoragePath(String haStoragePath) {
this.haStoragePath = haStoragePath;
return setConfigOption(HighAvailabilityOptions.HA_STORAGE_PATH, toUri(haStoragePath));
}
| 3.26 |
flink_FlinkContainersSettings_baseImage_rdh
|
/**
* Sets the {@code baseImage} and returns a reference to this Builder enabling method
* chaining.
*
* @param baseImage
* The {@code baseImage} to set.
* @return A reference to this Builder.
*/
public Builder baseImage(String baseImage) {
this.baseImage = baseImage;this.buildFromFlinkDist = false;
return this;
}
| 3.26 |
flink_FlinkContainersSettings_builder_rdh
|
/**
* A new builder for {@code FlinkContainersConfig}.
*
* @return The builder.
*/
public static Builder builder() { return new Builder();
}
| 3.26 |
flink_FlinkContainersSettings_getBaseImage_rdh
|
/**
* Gets base image.
*
* @return The base image.
*/
public String getBaseImage() {
return baseImage;
}
| 3.26 |
flink_FlinkContainersSettings_basedOn_rdh
|
/**
* Merges the provided {@code config} with the default config, potentially overwriting the
* defaults in case of collisions. Returns a reference to this Builder enabling method
* chaining.
*
* @param <T>
* the type parameter
* @param config
* The {@code config} to add.
* @return A reference to this Builder.
*/
public <T> Builder basedOn(Configuration config) {
this.flinkConfiguration.addAll(config);
return this;
}
| 3.26 |
flink_FlinkContainersSettings_isZookeeperHA_rdh
|
/**
* Is zookeeper HA boolean.
*/
public Boolean isZookeeperHA() {
return zookeeperHA;
}
| 3.26 |
flink_FlinkContainersSettings_setLogProperty_rdh
|
/**
* Sets a single Flink logging configuration property in the log4j format and returns a
* reference to this Builder enabling method chaining.
*
* @param key
* The property key.
* @param value
* The property value.
* @return A reference to this Builder.
*/
public Builder setLogProperty(String key, String
value) {
this.logProperties.setProperty(key, value);
return this;
}
| 3.26 |
flink_FlinkContainersSettings_getLogProperties_rdh
|
/**
* Gets logging properties.
*
* @return The logging properties.
*/public Properties getLogProperties() {
return logProperties;
}
| 3.26 |
flink_FlinkContainersSettings_getCheckpointPath_rdh
|
/**
* Gets checkpoint path.
*
* @return The checkpoint path.
*/
public String getCheckpointPath() {
return f0;
}
| 3.26 |
flink_PartitionRequestClientFactory_createPartitionRequestClient_rdh
|
/**
* Atomically establishes a TCP connection to the given remote address and creates a {@link NettyPartitionRequestClient} instance for this connection.
*/
NettyPartitionRequestClient createPartitionRequestClient(ConnectionID connectionId) throws IOException, InterruptedException {
// We map the input ConnectionID to a new value to restrict the number of tcp connections
connectionId = new ConnectionID(connectionId.getResourceID(), connectionId.getAddress(), connectionId.getConnectionIndex() % maxNumberOfConnections);
while (true) {
final CompletableFuture<NettyPartitionRequestClient> newClientFuture = new CompletableFuture<>();
final CompletableFuture<NettyPartitionRequestClient> clientFuture = clients.putIfAbsent(connectionId, newClientFuture);
final NettyPartitionRequestClient client;
if (clientFuture == null) {
try {
client = connectWithRetries(connectionId);
} catch (Throwable e) {
newClientFuture.completeExceptionally(new IOException("Could not create Netty client.", e));
clients.remove(connectionId, newClientFuture);
throw e;
}
newClientFuture.complete(client);
} else {
try {
client = clientFuture.get(); } catch (ExecutionException e) {
ExceptionUtils.rethrowIOException(ExceptionUtils.stripExecutionException(e));
return null;
}
}
// Make sure to increment the reference count before handing a client
// out to ensure correct bookkeeping for channel closing.
if (client.validateClientAndIncrementReferenceCounter()) {
return client;
} else if (client.canBeDisposed()) {
client.closeConnection();
} else {
destroyPartitionRequestClient(connectionId, client);
}
}
}
| 3.26 |
flink_PartitionRequestClientFactory_destroyPartitionRequestClient_rdh
|
/**
* Removes the client for the given {@link ConnectionID}.
*/
void destroyPartitionRequestClient(ConnectionID connectionId, PartitionRequestClient client) {
final CompletableFuture<NettyPartitionRequestClient> future = clients.get(connectionId);
if ((future != null) && future.isDone()) {
future.thenAccept(futureClient -> {
if (client.equals(futureClient)) {
clients.remove(connectionId, future);
}
});
}
}
| 3.26 |
flink_AllGroupCombineDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<GroupCombineFunction<IN, OUT>, OUT> context) {
this.taskContext = context;
}
| 3.26 |
flink_StreamPhysicalPythonCorrelateRule_findTableFunction_rdh
|
// find only calc and table function
private boolean findTableFunction(FlinkLogicalCalc calc) {
RelNode child = ((RelSubset) (calc.getInput())).getOriginal();
if (child instanceof FlinkLogicalTableFunctionScan) {
FlinkLogicalTableFunctionScan scan = ((FlinkLogicalTableFunctionScan) (child));
return PythonUtil.isPythonCall(scan.getCall(), null);
} else if (child instanceof FlinkLogicalCalc) {
FlinkLogicalCalc childCalc = ((FlinkLogicalCalc) (child));
return findTableFunction(childCalc);
}
return false;}
| 3.26 |
flink_BridgingSqlAggFunction_of_rdh
|
/**
* Creates an instance of a aggregate function during translation.
*/
public static BridgingSqlAggFunction of(FlinkContext context, FlinkTypeFactory typeFactory, ContextResolvedFunction resolvedFunction) {
final DataTypeFactory dataTypeFactory = context.getCatalogManager().getDataTypeFactory();
final TypeInference typeInference
= resolvedFunction.getDefinition().getTypeInference(dataTypeFactory);return of(dataTypeFactory, typeFactory, SqlKind.OTHER_FUNCTION, resolvedFunction, typeInference);
}
| 3.26 |
flink_BridgingSqlAggFunction_createOrderRequirement_rdh
|
// --------------------------------------------------------------------------------------------
private static boolean createOrderRequirement() {
return false;
}
| 3.26 |
flink_HadoopInputs_createHadoopInput_rdh
|
/**
* Creates a Flink {@link InputFormat} that wraps the given Hadoop {@link org.apache.hadoop.mapreduce.InputFormat}.
*
* @return A Flink InputFormat that wraps the Hadoop InputFormat.
*/
public static <K, V> HadoopInputFormat<K, V> createHadoopInput(InputFormat<K,
V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
return new
HadoopInputFormat(mapreduceInputFormat, key, value, job);
}
| 3.26 |
flink_HadoopInputs_readHadoopFile_rdh
|
/**
* Creates a Flink {@link InputFormat} that wraps the given Hadoop {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}.
*
* @return A Flink InputFormat that wraps the Hadoop FileInputFormat.
*/
public static <K, V> HadoopInputFormat<K, V> readHadoopFile(FileInputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, String inputPath) throws IOException {
return readHadoopFile(mapreduceInputFormat, key, value, inputPath, Job.getInstance());
}
| 3.26 |
flink_FlinkResultSetMetaData_getType_rdh
|
/**
* Get column type name according type in {@link Types}.
*
* @param type
* the type in {@link Types}
* @return type class name
*/
static String getType(int type) throws SQLException {// see javax.sql.rowset.RowSetMetaDataImpl
switch (type) {
case Types.NUMERIC :
case Types.DECIMAL :
return BigDecimal.class.getName();
case Types.BOOLEAN :
case Types.BIT :
return Boolean.class.getName();
case Types.TINYINT :
return Byte.class.getName();
case Types.SMALLINT :
return Short.class.getName();
case Types.INTEGER :
return Integer.class.getName();
case Types.BIGINT :
return Long.class.getName();
case Types.REAL :
case Types.FLOAT :
return Float.class.getName();
case Types.DOUBLE :
return Double.class.getName();
case Types.VARCHAR :
case Types.CHAR :
return String.class.getName();
case Types.BINARY :
case Types.VARBINARY :
case Types.LONGVARBINARY :
return "byte[]";
case Types.DATE :
return Date.class.getName();
case
Types.TIME :
return Time.class.getName();
case Types.TIMESTAMP :
return Timestamp.class.getName();
case Types.TIMESTAMP_WITH_TIMEZONE :
return OffsetDateTime.class.getName();
case Types.JAVA_OBJECT :
return
Map.class.getName();
case Types.ARRAY :return Array.class.getName();
case Types.STRUCT :
return RowData.class.getName();
}
throw new SQLFeatureNotSupportedException(String.format("Not support data type [%s]", type));
}
| 3.26 |
flink_LogicalPipelinedRegionComputeUtil_computePipelinedRegions_rdh
|
/**
* Utils for computing {@link LogicalPipelinedRegion}s.
*/public final class LogicalPipelinedRegionComputeUtil {
public static Set<Set<LogicalVertex>> computePipelinedRegions(final Iterable<? extends LogicalVertex> topologicallySortedVertices) {
final Map<LogicalVertex, Set<LogicalVertex>> vertexToRegion = buildRawRegions(topologicallySortedVertices, LogicalPipelinedRegionComputeUtil::getMustBePipelinedConsumedResults);
// Since LogicalTopology is a DAG, there is no need to do cycle detection nor to merge
// regions on cycles.
return uniqueVertexGroups(vertexToRegion);
}
| 3.26 |
flink_GroupReduceOperator_translateToDataFlow_rdh
|
// --------------------------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
protected GroupReduceOperatorBase<?, OUT, ?> translateToDataFlow(Operator<IN> input) {
String name = (getName() != null) ? getName() : "GroupReduce at " + defaultName;
// wrap CombineFunction in GroupCombineFunction if combinable
if (combinable && (function instanceof CombineFunction<?, ?>)) {
this.function = (function instanceof RichGroupReduceFunction<?, ?>) ? new RichCombineToGroupCombineWrapper(((RichGroupReduceFunction<?, ?>) (function))) : new CombineToGroupCombineWrapper(((CombineFunction<?, ?>) (function)));
}
// distinguish between grouped reduce and non-grouped reduce
if (grouper == null) {
// non grouped reduce
UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType());
GroupReduceOperatorBase<IN, OUT, GroupReduceFunction<IN, OUT>> po = new GroupReduceOperatorBase<>(function, operatorInfo, new int[0], name);
po.setCombinable(combinable);
po.setInput(input);
// the parallelism for a non grouped reduce can only be 1
po.setParallelism(1);
return po;
}
if (grouper.getKeys() instanceof SelectorFunctionKeys) {
@SuppressWarnings("unchecked")
SelectorFunctionKeys<IN, ?> selectorKeys = ((SelectorFunctionKeys<IN, ?>) (grouper.getKeys()));
if (grouper instanceof SortedGrouping) {
SortedGrouping<IN> sortedGrouping = ((SortedGrouping<IN>) (grouper));
SelectorFunctionKeys<IN, ?> sortKeys
= sortedGrouping.getSortSelectionFunctionKey();
Ordering groupOrder = sortedGrouping.getGroupOrdering();
PlanUnwrappingSortedReduceGroupOperator<IN, OUT, ?, ?> po = translateSelectorFunctionSortedReducer(selectorKeys, sortKeys, groupOrder, function, getResultType(), name, input, isCombinable());
po.setParallelism(this.getParallelism());po.setCustomPartitioner(grouper.getCustomPartitioner());
return po;
} else {
PlanUnwrappingReduceGroupOperator<IN, OUT,
?> po = translateSelectorFunctionReducer(selectorKeys, function, getResultType(), name, input, isCombinable());
po.setParallelism(this.getParallelism());
po.setCustomPartitioner(grouper.getCustomPartitioner());
return po;
}
} else if (grouper.getKeys() instanceof ExpressionKeys) {
int[] logicalKeyPositions = grouper.getKeys().computeLogicalKeyPositions();
UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType());
GroupReduceOperatorBase<IN, OUT, GroupReduceFunction<IN, OUT>> po = new GroupReduceOperatorBase<>(function, operatorInfo, logicalKeyPositions, name);
po.setCombinable(combinable);
po.setInput(input);
po.setParallelism(getParallelism());
po.setCustomPartitioner(grouper.getCustomPartitioner());
// set group order
if (grouper instanceof SortedGrouping) {
SortedGrouping<IN> sortedGrouper = ((SortedGrouping<IN>) (grouper));
int[] sortKeyPositions = sortedGrouper.getGroupSortKeyPositions();
Order[] sortOrders = sortedGrouper.getGroupSortOrders();
Ordering
o = new Ordering();
for (int i = 0; i < sortKeyPositions.length; i++) {
o.appendOrdering(sortKeyPositions[i], null, sortOrders[i]);
}
po.setGroupOrder(o);
}
return po;} else {
throw new UnsupportedOperationException("Unrecognized key type.");
}
}
| 3.26 |
flink_GroupReduceOperator_translateSelectorFunctionReducer_rdh
|
// --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
private static <IN, OUT, K> PlanUnwrappingReduceGroupOperator<IN, OUT, K> translateSelectorFunctionReducer(SelectorFunctionKeys<IN, ?> rawKeys, GroupReduceFunction<IN, OUT> function, TypeInformation<OUT> outputType, String name,
Operator<IN> input, boolean combinable) {
SelectorFunctionKeys<IN, K> keys = ((SelectorFunctionKeys<IN, K>) (rawKeys));
TypeInformation<Tuple2<K, IN>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys);
Operator<Tuple2<K, IN>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys);
PlanUnwrappingReduceGroupOperator<IN, OUT, K> reducer = new PlanUnwrappingReduceGroupOperator(function, keys, name, outputType, typeInfoWithKey, combinable);
reducer.setInput(keyedInput);
return reducer;
}
| 3.26 |
flink_GroupReduceOperator_isCombinable_rdh
|
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
@Internal
public boolean isCombinable() {
return combinable;
}
| 3.26 |
flink_SlotSharingSlotAllocator_determineSlotsPerSharingGroup_rdh
|
/**
* Distributes free slots across the slot-sharing groups of the job. Slots are distributed as
* evenly as possible. If a group requires less than an even share of slots the remainder is
* distributed over the remaining groups.
*/
private static Map<SlotSharingGroupId, Integer> determineSlotsPerSharingGroup(JobInformation jobInformation, int freeSlots, int
minRequiredSlots, Map<SlotSharingGroupId, SlotSharingGroupMetaInfo> slotSharingGroupMetaInfo) {
int numUnassignedSlots = freeSlots;
int numUnassignedSlotSharingGroups = jobInformation.getSlotSharingGroups().size();
int numMinSlotsRequiredByRemainingGroups = minRequiredSlots;
final Map<SlotSharingGroupId, Integer>
slotSharingGroupParallelism = new HashMap<>();
for (SlotSharingGroupId slotSharingGroup : m0(slotSharingGroupMetaInfo)) {
final int minParallelism = slotSharingGroupMetaInfo.get(slotSharingGroup).getMinLowerBound();
// if we reached this point we know we have more slots than we need to fulfill the
// minimum requirements for each slot sharing group.
// this means that a certain number of slots are already implicitly reserved (to fulfill
// the minimum requirement of other groups); so we only need to distribute the remaining
// "optional" slots while only accounting for the requirements beyond the minimum
// the number of slots this group can use beyond the minimum
final int maxOptionalSlots = slotSharingGroupMetaInfo.get(slotSharingGroup).getMaxUpperBound() - minParallelism;
// the number of slots that are not implicitly reserved for minimum requirements
final int freeOptionalSlots = numUnassignedSlots - numMinSlotsRequiredByRemainingGroups;
// the number of slots this group is allowed to use beyond the minimum requirements
final int optionalSlotShare = freeOptionalSlots / numUnassignedSlotSharingGroups;
final int groupParallelism = minParallelism + Math.min(maxOptionalSlots, optionalSlotShare);
slotSharingGroupParallelism.put(slotSharingGroup, groupParallelism);
numMinSlotsRequiredByRemainingGroups -= minParallelism;
numUnassignedSlots -= groupParallelism;
numUnassignedSlotSharingGroups--;
}
return slotSharingGroupParallelism;
}
| 3.26 |
flink_JoinNode_computeOperatorSpecificDefaultEstimates_rdh
|
/**
* The default estimates build on the principle of inclusion: The smaller input key domain is
* included in the larger input key domain. We also assume that every key from the larger input
* has one join partner in the smaller input. The result cardinality is hence the larger one.
*/
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
long card1 = getFirstPredecessorNode().getEstimatedNumRecords();
long card2 = getSecondPredecessorNode().getEstimatedNumRecords();
this.estimatedNumRecords = ((card1 < 0) || (card2 < 0)) ? -1 : Math.max(card1, card2);
if (this.estimatedNumRecords >= 0) {
float width1 = getFirstPredecessorNode().getEstimatedAvgWidthPerOutputRecord();
float width2 = getSecondPredecessorNode().getEstimatedAvgWidthPerOutputRecord();
float width = ((width1 <= 0) || (width2 <= 0)) ? -1 :
width1 + width2;
if (width > 0) {
this.estimatedOutputSize = ((long) (width * this.estimatedNumRecords));
}
}
}
| 3.26 |
flink_FileMergingSnapshotManagerBuilder_build_rdh
|
/**
* Create file-merging snapshot manager based on configuration.
*
* <p>TODO (FLINK-32074): Support another type of FileMergingSnapshotManager that merges files
* across different checkpoints.
*
* @return the created manager.
*/
public FileMergingSnapshotManager build() {
return new WithinCheckpointFileMergingSnapshotManager(id, ioExecutor == null ? Runnable::run : ioExecutor);
}
| 3.26 |
flink_FileMergingSnapshotManagerBuilder_setIOExecutor_rdh
|
/**
* Set the executor for io operation in manager. If null(default), all io operation will be
* executed synchronously.
*/
public FileMergingSnapshotManagerBuilder setIOExecutor(@Nullable
Executor ioExecutor) {
this.ioExecutor = ioExecutor;
return this;
}
| 3.26 |
flink_ProducerMergedPartitionFileIndex_getSize_rdh
|
/**
* Get the total size in bytes of this region, including the fields and the buffers.
*/
@Override
public int getSize() {
return f0 + numBuffers;
}
| 3.26 |
flink_ProducerMergedPartitionFileIndex_getRegion_rdh
|
/**
* Get the subpartition's {@link FixedSizeRegion} containing the specific buffer index.
*
* @param subpartitionId
* the subpartition id
* @param bufferIndex
* the buffer index
* @return the region containing the buffer index, or return emtpy if the region is not found.
*/
Optional<FixedSizeRegion> getRegion(TieredStorageSubpartitionId subpartitionId, int bufferIndex) {
synchronized(lock) {
return indexCache.get(subpartitionId.getSubpartitionId(), bufferIndex);
}
}
| 3.26 |
flink_ProducerMergedPartitionFileIndex_addBuffers_rdh
|
/**
* Add buffers to the index.
*
* @param buffers
* to be added. Note, the provided buffers are required to be physically
* consecutive and in the same order as in the file.
*/
void addBuffers(List<FlushedBuffer> buffers) {
if (buffers.isEmpty()) {
return;
}
Map<Integer, List<FixedSizeRegion>> convertedRegions = convertToRegions(buffers);
synchronized(lock)
{
convertedRegions.forEach(indexCache::put);
}
}
| 3.26 |
flink_ProducerMergedPartitionFileIndex_convertToRegions_rdh
|
// ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private static Map<Integer, List<FixedSizeRegion>> convertToRegions(List<FlushedBuffer> buffers) {
Map<Integer, List<FixedSizeRegion>> subpartitionRegionMap = new HashMap<>();
Iterator<FlushedBuffer> iterator = buffers.iterator();
FlushedBuffer firstBufferInRegion = iterator.next();
FlushedBuffer
lastBufferInRegion = firstBufferInRegion;
while (iterator.hasNext()) {
FlushedBuffer currentBuffer = iterator.next();
if ((currentBuffer.getSubpartitionId() != firstBufferInRegion.getSubpartitionId()) || (currentBuffer.getBufferIndex() != (lastBufferInRegion.getBufferIndex() + 1)))
{
// The current buffer belongs to a new region, add the current region to the map
addRegionToMap(firstBufferInRegion, lastBufferInRegion, subpartitionRegionMap);firstBufferInRegion = currentBuffer;
}
lastBufferInRegion = currentBuffer;}
// Add the last region to the map
addRegionToMap(firstBufferInRegion, lastBufferInRegion, subpartitionRegionMap);
return subpartitionRegionMap;
}
| 3.26 |
flink_ExtendedParser_getCompletionHints_rdh
|
/**
* Returns completion hints for the given statement at the given cursor position. The completion
* happens case insensitively.
*
* @param statement
* Partial or slightly incorrect SQL statement
* @param cursor
* cursor position
* @return completion hints that fit at the current cursor position
*/
public String[] getCompletionHints(String statement, int cursor) {
String normalizedStatement = statement.trim().toUpperCase();
List<String> hints = new
ArrayList<>();
for (ExtendedParseStrategy strategy : PARSE_STRATEGIES) {
for (String hint : strategy.getHints()) {if (hint.startsWith(normalizedStatement) && (cursor < hint.length()))
{
hints.add(getCompletionHint(normalizedStatement, hint));
}
}
}
return hints.toArray(new String[0]);
}
| 3.26 |
flink_ExtendedParser_parse_rdh
|
/**
* Parse the input statement to the {@link Operation}.
*
* @param statement
* the command to evaluate
* @return parsed operation that represents the command
*/
public Optional<Operation> parse(String statement) {
for (ExtendedParseStrategy strategy : PARSE_STRATEGIES) {
if (strategy.match(statement)) {
return Optional.of(strategy.convert(statement));
}
}
return Optional.empty();
}
| 3.26 |
flink_ConfigUtils_m0_rdh
|
/**
* Puts an array of values of type {@code IN} in a {@link WritableConfig} as a {@link ConfigOption} of type {@link List} of type {@code OUT}. If the {@code values} is {@code null}
* or empty, then nothing is put in the configuration.
*
* @param configuration
* the configuration object to put the list in
* @param key
* the {@link ConfigOption option} to serve as the key for the list in the
* configuration
* @param values
* the array of values to put as value for the {@code key}
* @param mapper
* the transformation function from {@code IN} to {@code OUT}.
*/public static <IN, OUT> void m0(final WritableConfig configuration, final ConfigOption<List<OUT>> key, @Nullable
final IN[] values, final Function<IN,
OUT> mapper) {
checkNotNull(configuration);
checkNotNull(key);
checkNotNull(mapper);
if (values == null) {
return;
}
encodeCollectionToConfig(configuration, key, Arrays.asList(values), mapper);
}
| 3.26 |
flink_ConfigUtils_encodeCollectionToConfig_rdh
|
/**
* Puts a {@link Collection} of values of type {@code IN} in a {@link WritableConfig} as a
* {@link ConfigOption} of type {@link List} of type {@code OUT}. If the {@code values} is
* {@code null} or empty, then nothing is put in the configuration.
*
* @param configuration
* the configuration object to put the list in
* @param key
* the {@link ConfigOption option} to serve as the key for the list in the
* configuration
* @param values
* the collection of values to put as value for the {@code key}
* @param mapper
* the transformation function from {@code IN} to {@code OUT}.
*/
public static <IN, OUT> void encodeCollectionToConfig(final WritableConfig configuration, final ConfigOption<List<OUT>> key, @Nullable
final Collection<IN> values, final Function<IN, OUT> mapper) {
checkNotNull(configuration);
checkNotNull(key);checkNotNull(mapper);
if (values == null) {
return;
}
final List<OUT> encodedOption = values.stream().filter(Objects::nonNull).map(mapper).filter(Objects::nonNull).collect(Collectors.toCollection(ArrayList::new));
configuration.set(key, encodedOption);
}
/**
* Gets a {@link List} of values of type {@code IN} from a {@link ReadableConfig} and transforms
* it to a {@link List} of type {@code OUT} based on the provided {@code mapper} function.
*
* @param configuration
* the configuration object to get the value out of
* @param key
* the {@link ConfigOption option} to serve as the key for the list in the
* configuration
* @param mapper
* the transformation function from {@code IN} to {@code OUT}.
* @return the transformed values in a list of type {@code OUT}
| 3.26 |
flink_SortOperationFactory_createLimitWithOffset_rdh
|
/**
* Creates a valid {@link SortQueryOperation} with offset (possibly merged into a preceding
* {@link SortQueryOperation}).
*
* @param offset
* offset to start from
* @param child
* relational expression on top of which to apply the sort operation
* @param postResolverFactory
* factory for creating resolved expressions
* @return valid sort operation with applied offset
*/
QueryOperation createLimitWithOffset(int offset, QueryOperation child, PostResolverFactory postResolverFactory) {
SortQueryOperation previousSort = validateAndGetChildSort(child, postResolverFactory);
if (offset < 0) {
throw new ValidationException("Offset should be greater or equal 0");
} if
(previousSort.getOffset() != (-1)) {
throw new ValidationException("OFFSET already defined");
}
return new SortQueryOperation(previousSort.getOrder(), previousSort.getChild(), offset, -1);
}
| 3.26 |
flink_SortOperationFactory_createSort_rdh
|
/**
* Creates a valid {@link SortQueryOperation}.
*
* <p><b>NOTE:</b> If the collation is not explicitly specified for an expression, the
* expression is wrapped in a default ascending order. If no expression is specified, the result
* is not sorted but only limited.
*
* @param orders
* expressions describing order
* @param child
* relational expression on top of which to apply the sort operation
* @param postResolverFactory
* factory for creating resolved expressions
* @return valid sort operation
*/
QueryOperation createSort(List<ResolvedExpression> orders, QueryOperation child,
PostResolverFactory postResolverFactory) {
final OrderWrapper orderWrapper = new OrderWrapper(postResolverFactory);
List<ResolvedExpression> convertedOrders = orders.stream().map(f -> f.accept(orderWrapper)).collect(Collectors.toList());
return new SortQueryOperation(convertedOrders, child);
}
| 3.26 |
flink_SortOperationFactory_createLimitWithFetch_rdh
|
/**
* Creates a valid {@link SortQueryOperation} with fetch (possibly merged into a preceding
* {@link SortQueryOperation}).
*
* @param fetch
* fetch to limit
* @param child
* relational expression on top of which to apply the sort operation
* @param postResolverFactory
* factory for creating resolved expressions
* @return valid sort operation with applied offset
*/
QueryOperation createLimitWithFetch(int fetch, QueryOperation child, PostResolverFactory postResolverFactory) {
SortQueryOperation previousSort = validateAndGetChildSort(child, postResolverFactory);
if (fetch < 0) {
throw new ValidationException("Fetch should be greater or equal 0");
}int offset = Math.max(previousSort.getOffset(), 0);
return new SortQueryOperation(previousSort.getOrder(), previousSort.getChild(), offset, fetch);
}
| 3.26 |
flink_SourceReaderContext_currentParallelism_rdh
|
/**
* Get the current parallelism of this Source.
*
* @return the parallelism of the Source.
*/default int currentParallelism() {
throw new UnsupportedOperationException();
}
| 3.26 |
flink_ResolvedCatalogView_getOptions_rdh
|
// --------------------------------------------------------------------------------------------
// Delegations to original CatalogView
// --------------------------------------------------------------------------------------------
@Override
public Map<String, String> getOptions() {
return origin.getOptions();
}
| 3.26 |
flink_HiveFunctionArguments_create_rdh
|
// create from a CallContext
public static HiveFunctionArguments create(CallContext callContext) {
DataType[] argTypes = callContext.getArgumentDataTypes().toArray(new DataType[0]);
Object[]
args = new Object[argTypes.length];
BitSet literalIndices = new BitSet(args.length);
for (int i = 0; i < args.length; i++) {
if (callContext.isArgumentLiteral(i)) {
literalIndices.set(i);
args[i] = callContext.getArgumentValue(i, argTypes[i].getLogicalType().getDefaultConversion()).orElse(null);
// we always use string type for string constant arg because that's what hive UDFs
// expect.
// it may happen that the type is char when call the function
// in Flink SQL for calcite treat string literal as char type.
if (args[i] instanceof String) {
argTypes[i] = DataTypes.STRING();
}
}
}return new HiveFunctionArguments(args, argTypes, literalIndices);
}
| 3.26 |
flink_GroupReduceCombineDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<GroupCombineFunction<IN, OUT>, OUT> context) {
this.taskContext = context;
this.running = true;
}
| 3.26 |
flink_FileBasedOneShotLatch_await_rdh
|
/**
* Waits until the latch file is created.
*
* <p>When this method returns, subsequent invocations will not block even after the latch file
* is deleted. Note that this method may not return if the latch file is deleted before this
* method returns.
*
* @throws InterruptedException
* if interrupted while waiting
*/
public void await() throws InterruptedException {
if (isReleasedOrReleasable()) {
return;
}
awaitLatchFile(watchService);
}
| 3.26 |
flink_PythonTableFunctionOperator_isFinishResult_rdh
|
/**
* The received udtf execution result is a finish message when it is a byte with value 0x00.
*/
private boolean isFinishResult(byte[] rawUdtfResult, int length) {
return (length == 1) && (rawUdtfResult[0] == 0x0);
}
| 3.26 |
flink_SingleOutputStreamOperator_m1_rdh
|
/**
* Sets the buffering timeout for data produced by this operation. The timeout defines how long
* data may linger in a partially full buffer before being sent over the network.
*
* <p>Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms
* still sustain high throughput, even for jobs with high parallelism.
*
* <p>A value of '-1' means that the default buffer timeout should be used. A value of '0'
* indicates that no buffering should happen, and all records/events should be immediately sent
* through the network, without additional buffering.
*
* @param timeoutMillis
* The maximum time between two output flushes.
* @return The operator with buffer timeout set.
*/
public SingleOutputStreamOperator<T> m1(long timeoutMillis) {
checkArgument(timeoutMillis >= (-1), "timeout must be >= -1");
transformation.setBufferTimeout(timeoutMillis);
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_setChainingStrategy_rdh
|
/**
* Sets the {@link ChainingStrategy} for the given operator affecting the way operators will
* possibly be co-located on the same thread for increased performance.
*
* @param strategy
* The selected {@link ChainingStrategy}
* @return The operator with the modified chaining strategy
*/
@PublicEvolving
private SingleOutputStreamOperator<T> setChainingStrategy(ChainingStrategy strategy) {
if (transformation instanceof PhysicalTransformation) {
((PhysicalTransformation<T>) (transformation)).setChainingStrategy(strategy);
} else {
throw new UnsupportedOperationException("Cannot set chaining strategy on " + transformation);
}
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_m0_rdh
|
/**
* Sets the parallelism for this operator.
*
* @param parallelism
* The parallelism for this operator.
* @return The operator with set parallelism.
*/
public SingleOutputStreamOperator<T> m0(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism, canBeParallel());
transformation.setParallelism(parallelism);
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_setDescription_rdh
|
/**
* Sets the description for this operation.
*
* <p>Description is used in json plan and web ui, but not in logging and metrics where only
* name is available. Description is expected to provide detailed information about the sink,
* while name is expected to be more simple, providing summary information only, so that we can
* have more user-friendly logging messages and metric tags without losing useful messages for
* debugging.
*
* @param description
* The description for this operation.
* @return The operation with new description.
*/
@PublicEvolvingpublic SingleOutputStreamOperator<T> setDescription(String description) {transformation.setDescription(description);
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_slotSharingGroup_rdh
|
/**
* Sets the slot sharing group of this operation. Parallel instances of operations that are in
* the same slot sharing group will be co-located in the same TaskManager slot, if possible.
*
* <p>Operations inherit the slot sharing group of input operations if all input operations are
* in the same slot sharing group and no slot sharing group was explicitly specified.
*
* <p>Initially an operation is in the default slot sharing group. An operation can be put into
* the default group explicitly by setting the slot sharing group with name {@code "default"}.
*
* @param slotSharingGroup
* Which contains name and its resource spec.
*/
@PublicEvolving
public SingleOutputStreamOperator<T> slotSharingGroup(SlotSharingGroup slotSharingGroup) {
transformation.setSlotSharingGroup(slotSharingGroup);
return this;
}
/**
* Gets the {@link DataStream} that contains the elements that are emitted from an operation
* into the side output with the given {@link OutputTag}
| 3.26 |
flink_SingleOutputStreamOperator_setUidHash_rdh
|
/**
* Sets an user provided hash for this operator. This will be used AS IS the create the
* JobVertexID.
*
* <p>The user provided hash is an alternative to the generated hashes, that is considered when
* identifying an operator through the default hash mechanics fails (e.g. because of changes
* between Flink versions).
*
* <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting.
* The provided hash needs to be unique per transformation and job. Otherwise, job submission
* will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an
* operator chain and trying so will let your job fail.
*
* <p>A use case for this is in migration between Flink versions or changing the jobs in a way
* that changes the automatically generated hashes. In this case, providing the previous hashes
* directly through this method (e.g. obtained from old logs) can help to reestablish a lost
* mapping from states to their target operator.
*
* @param uidHash
* The user provided hash for this operator. This will become the JobVertexID,
* which is shown in the logs and web ui.
* @return The operator with the user provided hash.
*/
@PublicEvolving
public SingleOutputStreamOperator<T> setUidHash(String uidHash) {
transformation.setUidHash(uidHash);
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_disableChaining_rdh
|
/**
* Turns off chaining for this operator so thread co-location will not be used as an
* optimization.
*
* <p>Chaining can be turned off for the whole job by {@link StreamExecutionEnvironment#disableOperatorChaining()} however it is not advised for
* performance considerations.
*
* @return The operator with chaining disabled
*/
@PublicEvolving
public SingleOutputStreamOperator<T> disableChaining() {
return setChainingStrategy(ChainingStrategy.NEVER);
}
| 3.26 |
flink_SingleOutputStreamOperator_forceNonParallel_rdh
|
/**
* Sets the parallelism and maximum parallelism of this operator to one. And mark this operator
* cannot set a non-1 degree of parallelism.
*
* @return The operator with only one parallelism.
*/@PublicEvolving
public SingleOutputStreamOperator<T> forceNonParallel() {
transformation.setParallelism(1);
transformation.setMaxParallelism(1);
nonParallel = true;
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_cache_rdh
|
/**
* Cache the intermediate result of the transformation. Only support bounded streams and
* currently only block mode is supported. The cache is generated lazily at the first time the
* intermediate result is computed. The cache will be clear when {@link CachedDataStream#invalidate()} called or the {@link StreamExecutionEnvironment} close.
*
* @return CachedDataStream that can use in later job to reuse the cached intermediate result.
*/
@PublicEvolving
public CachedDataStream<T> cache() {
if (!(this.transformation instanceof PhysicalTransformation)) {
throw new IllegalStateException("Cache can only be called with physical transformation or side output transformation");
}
return new CachedDataStream<>(this.environment, this.transformation);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.