name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_TableChange_modifyPhysicalColumnType_rdh
|
/**
* A table change that modify the physical column data type.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <new_column_type>
* </pre>
*
* @param oldColumn
* the definition of the old column.
* @param newType
* the type of the new column.
* @return a TableChange represents the modification.
*/
static ModifyPhysicalColumnType modifyPhysicalColumnType(Column oldColumn, DataType newType) {
return new ModifyPhysicalColumnType(oldColumn, newType);}
| 3.26 |
flink_TableChange_modifyColumnComment_rdh
|
/**
* A table change to modify the column comment.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <original_column_type> COMMENT '<new_column_comment>'
* </pre>
*
* @param oldColumn
* the definition of the old column.
* @param newComment
* the modified comment.
* @return a TableChange represents the modification.
*/
static ModifyColumnComment modifyColumnComment(Column oldColumn, String newComment) {
return new ModifyColumnComment(oldColumn, newComment);
}
| 3.26 |
flink_TableChange_add_rdh
|
/**
* A table change to add a watermark.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> ADD WATERMARK FOR <row_time> AS <row_time_expression>
* </pre>
*
* @param watermarkSpec
* the added watermark definition.
* @return a TableChange represents the modification.
*/
static AddWatermark add(WatermarkSpec watermarkSpec) {
return new AddWatermark(watermarkSpec);}
| 3.26 |
flink_TableChange_modifyColumnName_rdh
|
/**
* A table change to modify the column name.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> RENAME <old_column_name> TO <new_column_name>
* </pre>
*
* @param oldColumn
* the definition of the old column.
* @param newName
* the name of the new column.
* @return a TableChange represents the modification.
*/
static ModifyColumnName modifyColumnName(Column oldColumn, String newName) {
return new ModifyColumnName(oldColumn, newName);
}
| 3.26 |
flink_TableChange_m0_rdh
|
/**
* A table change to add a unique constraint.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> ADD PRIMARY KEY (<column_name>...) NOT ENFORCED
* </pre>
*
* @param constraint
* the added constraint definition.
* @return a TableChange represents the modification.
*/
static AddUniqueConstraint m0(UniqueConstraint constraint) {
return new AddUniqueConstraint(constraint);
}
| 3.26 |
flink_TableChange_getConstraint_rdh
|
/**
* Returns the unique constraint to add.
*/
public UniqueConstraint getConstraint() {
return constraint;
}
| 3.26 |
flink_TableChange_getValue_rdh
|
/**
* Returns the Option value to set.
*/
public String getValue() {
return value;
}
| 3.26 |
flink_TableChange_getConstraintName_rdh
|
/**
* Returns the constraint name.
*/
public String getConstraintName() {
return constraintName;
}
| 3.26 |
flink_TableChange_dropColumn_rdh
|
/**
* A table change to drop column.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> DROP COLUMN <column_name>
* </pre>
*
* @param columnName
* the column to drop.
* @return a TableChange represents the modification.
*/
static DropColumn dropColumn(String columnName) {
return new DropColumn(columnName);
}
| 3.26 |
flink_TableChange_getNewPosition_rdh
|
/**
* Returns the position of the modified {@link Column} instance. When the return value is
* null, it means modify the column at the original position. When the return value is
* FIRST, it means move the modified column to the first. When the return value is AFTER, it
* means move the column after the referred column.
*/@Nullable
public ColumnPosition getNewPosition() {
return newPosition;
}
| 3.26 |
flink_TableChange_getNewConstraint_rdh
|
/**
* Returns the modified unique constraint.
*/
public UniqueConstraint getNewConstraint() {
return newConstraint;
}
| 3.26 |
flink_TableChange_after_rdh
|
/**
* Get the position to place the column after the specified column.
*/
static ColumnPosition after(String column) {
return new After(column);
}
| 3.26 |
flink_TableChange_getKey_rdh
|
/**
* Returns the Option key to reset.
*/ public String getKey() {return key;
}
| 3.26 |
flink_TableChange_getNewColumnName_rdh
|
/**
* Returns the new column name after renaming the column name.
*/
public String getNewColumnName() {return newColumn.getName();
}
| 3.26 |
flink_TableChange_modify_rdh
|
/**
* A table change to modify a watermark.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY WATERMARK FOR <row_time> AS <row_time_expression>
* </pre>
*
* @param newWatermarkSpec
* the modified watermark definition.
* @return a TableChange represents the modification.
*/
static ModifyWatermark modify(WatermarkSpec newWatermarkSpec) {
return new ModifyWatermark(newWatermarkSpec);
}
| 3.26 |
flink_TableChange_dropConstraint_rdh
|
/**
* A table change to drop constraint.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> DROP CONSTRAINT <constraint_name>
* </pre>
*
* @param constraintName
* the constraint to drop.
* @return a TableChange represents the modification.
*/
static DropConstraint dropConstraint(String constraintName) {
return new DropConstraint(constraintName);
}
| 3.26 |
flink_TableChange_getOldColumnName_rdh
|
/**
* Returns the origin column name.
*/
public String getOldColumnName() {
return oldColumn.getName();
}
| 3.26 |
flink_TableChange_getNewWatermark_rdh
|
/**
* Returns the modified watermark.
*/
public WatermarkSpec getNewWatermark() {
return newWatermark;
}
| 3.26 |
flink_TableChange_getNewColumn_rdh
|
/**
* Returns the modified {@link Column} instance.
*/
public Column getNewColumn() {
return newColumn;
}
| 3.26 |
flink_TableChange_getColumnName_rdh
|
/**
* Returns the column name.
*/
public String getColumnName() {
return columnName;
}
| 3.26 |
flink_TableChange_getOldColumn_rdh
|
/**
* Returns the original {@link Column} instance.
*/
public Column getOldColumn() {
return oldColumn;
}
| 3.26 |
flink_TableChange_set_rdh
|
/**
* A table change to set the table option.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> SET '<key>' = '<value>';
* </pre>
*
* @param key
* the option name to set.
* @param value
* the option value to set.
* @return a TableChange represents the modification.
*/
static SetOption set(String key, String value) {
return new SetOption(key, value);
}
| 3.26 |
flink_TableChange_modifyColumnPosition_rdh
|
/**
* A table change to modify the column position.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <original_column_type> <column_position>
* </pre>
*
* @param oldColumn
* the definition of the old column.
* @param columnPosition
* the new position of the column.
* @return a TableChange represents the modification.
*/
static ModifyColumnPosition modifyColumnPosition(Column oldColumn, ColumnPosition columnPosition) {
return new ModifyColumnPosition(oldColumn, columnPosition);
}
| 3.26 |
flink_TableChange_getWatermark_rdh
|
/**
* Returns the watermark to add.
*/
public WatermarkSpec getWatermark() {
return watermarkSpec;
}
| 3.26 |
flink_TableChange_first_rdh
|
/**
* Get the position to place the column at the first.
*/
static ColumnPosition first() {
return First.INSTANCE;
}
| 3.26 |
flink_TableChange_reset_rdh
|
/**
* A table change to reset the table option.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> RESET '<key>'
* </pre>
*
* @param key
* the option name to reset.
* @return a TableChange represents the modification.
*/
static ResetOption reset(String key) {
return new ResetOption(key);
}
| 3.26 |
flink_TaskManagerRuntimeInfo_getTaskManagerBindAddress_rdh
|
/**
* Gets the bind address of the Taskmanager.
*
* @return The bind address of the TaskManager.
*/
default String getTaskManagerBindAddress() {
return m0().getString(TaskManagerOptions.BIND_HOST);
}
| 3.26 |
flink_HiveFunctionWrapper_deserializeUDF_rdh
|
/**
* Deserialize UDF used the udfSerializedString held on.
*
* @return the UDF deserialized
*/
@SuppressWarnings("unchecked")
private UDFType deserializeUDF() {
return ((UDFType) (deserializeObjectFromKryo(udfSerializedBytes, ((Class<Serializable>) (getUDFClass())))));
}
| 3.26 |
flink_HiveFunctionWrapper_createFunction_rdh
|
/**
* Instantiate a Hive function instance.
*
* @return a Hive function instance
*/
public UDFType createFunction() {
if (udfSerializedBytes != null) {
// deserialize the string to udf instance
return deserializeUDF();
} else if (instance != null) { return
instance;
} else {
UDFType func;
try {
func = functionClz.newInstance();
} catch (InstantiationException |
IllegalAccessException e) {
throw
new FlinkHiveUDFException(String.format("Failed to create function from %s", functionClz.getName()), e);
}
if (!(func instanceof UDF)) {
// We cache the function if it is not the Simple UDF,
// as we always have to create new instance for Simple UDF.
instance = func;
}
return func;
}
}
| 3.26 |
flink_EitherTypeInfo_getLeftType_rdh
|
// --------------------------------------------------------------------------------------------
public TypeInformation<L> getLeftType() {
return leftType;
}
| 3.26 |
flink_ConfigOptions_durationType_rdh
|
/**
* Defines that the value of the option should be of {@link Duration} type.
*/
public TypedConfigOptionBuilder<Duration> durationType() {
return new TypedConfigOptionBuilder<>(key, Duration.class);
}
| 3.26 |
flink_ConfigOptions_defaultValue_rdh
|
/**
* Creates a ConfigOption with the given default value.
*
* @param value
* The default value for the config option
* @return The config option with the default value.
*/
public ConfigOption<T> defaultValue(T value) {
return new ConfigOption<>(key, clazz,
ConfigOption.EMPTY_DESCRIPTION, value, false);
}
| 3.26 |
flink_ConfigOptions_mapType_rdh
|
/**
* Defines that the value of the option should be a set of properties, which can be
* represented as {@code Map<String, String>}.
*/
public TypedConfigOptionBuilder<Map<String, String>> mapType() {
return new TypedConfigOptionBuilder<>(key, PROPERTIES_MAP_CLASS);
}
/**
* Creates a ConfigOption with the given default value.
*
* <p>This method does not accept "null". For options with no default value, choose one of
* the {@code noDefaultValue}
| 3.26 |
flink_ConfigOptions_asList_rdh
|
/**
* Defines that the option's type should be a list of previously defined atomic type.
*/
public ListConfigOptionBuilder<T> asList() {
return new ListConfigOptionBuilder<>(key, clazz);
}
| 3.26 |
flink_ConfigOptions_booleanType_rdh
|
/**
* Defines that the value of the option should be of {@link Boolean} type.
*/
public TypedConfigOptionBuilder<Boolean> booleanType() {
return new TypedConfigOptionBuilder<>(key, Boolean.class);}
| 3.26 |
flink_ConfigOptions_enumType_rdh
|
/**
* Defines that the value of the option should be of {@link Enum} type.
*
* @param enumClass
* Concrete type of the expected enum.
*/
public <T extends Enum<T>> TypedConfigOptionBuilder<T> enumType(Class<T> enumClass) {
return new TypedConfigOptionBuilder<>(key, enumClass);
}
| 3.26 |
flink_ConfigOptions_intType_rdh
|
/**
* Defines that the value of the option should be of {@link Integer} type.
*/
public TypedConfigOptionBuilder<Integer> intType() {
return new TypedConfigOptionBuilder<>(key, Integer.class);
}
| 3.26 |
flink_ConfigOptions_defaultValues_rdh
|
/**
* Creates a ConfigOption with the given default value.
*
* @param values
* The list of default values for the config option
* @return The config option with the default value.
*/
@SafeVarargs
public final ConfigOption<List<E>> defaultValues(E... values) {
return new ConfigOption<>(key, clazz, ConfigOption.EMPTY_DESCRIPTION, Arrays.asList(values), true);
}
| 3.26 |
flink_ConfigOptions_doubleType_rdh
|
/**
* Defines that the value of the option should be of {@link Double} type.
*/
public TypedConfigOptionBuilder<Double> doubleType() {
return new TypedConfigOptionBuilder<>(key, Double.class);
}
| 3.26 |
flink_ConfigOptions_floatType_rdh
|
/**
* Defines that the value of the option should be of {@link Float} type.
*/
public TypedConfigOptionBuilder<Float> floatType() {
return new TypedConfigOptionBuilder<>(key, Float.class);
}
| 3.26 |
flink_ConfigOptions_memoryType_rdh
|
/**
* Defines that the value of the option should be of {@link MemorySize} type.
*/
public TypedConfigOptionBuilder<MemorySize> memoryType() {
return new TypedConfigOptionBuilder<>(key, MemorySize.class);
}
| 3.26 |
flink_ConfigOptions_key_rdh
|
/**
* Starts building a new {@link ConfigOption}.
*
* @param key
* The key for the config option.
* @return The builder for the config option with the given key.
*/
public static OptionBuilder key(String key)
{
checkNotNull(key);
return new OptionBuilder(key);
}
| 3.26 |
flink_ConfigOptions_longType_rdh
|
/**
* Defines that the value of the option should be of {@link Long} type.
*/
public TypedConfigOptionBuilder<Long> longType() {
return new TypedConfigOptionBuilder<>(key, Long.class);
}
| 3.26 |
flink_ConfigOptions_stringType_rdh
|
/**
* Defines that the value of the option should be of {@link String} type.
*/
public TypedConfigOptionBuilder<String> stringType() {
return new TypedConfigOptionBuilder<>(key, String.class);
}
| 3.26 |
flink_OverWindowPartitionedOrdered_as_rdh
|
/**
* Assigns an alias for this window that the following {@code select()} clause can refer to.
*
* @param alias
* alias for this over window
* @return the fully defined over window
*/
public OverWindow as(Expression alias) {
return new OverWindow(alias, partitionBy, orderBy, valueLiteral(OverWindowRange.UNBOUNDED_RANGE), Optional.empty());
}
| 3.26 |
flink_OverWindowPartitionedOrdered_preceding_rdh
|
/**
* Set the preceding offset (based on time or row-count intervals) for over window.
*
* @param preceding
* preceding offset relative to the current row.
* @return an over window with defined preceding
*/
public OverWindowPartitionedOrderedPreceding preceding(Expression preceding) {
return new OverWindowPartitionedOrderedPreceding(partitionBy, orderBy, preceding);
}
| 3.26 |
flink_JarManifestParser_findOnlyEntryClass_rdh
|
/**
* Returns a JAR file with its entry class as specified in the manifest.
*
* @param jarFiles
* JAR files to parse
* @throws NoSuchElementException
* if no JAR file contains an entry class attribute
* @throws IllegalArgumentException
* if multiple JAR files contain an entry class manifest
* attribute
*/
static JarFileWithEntryClass findOnlyEntryClass(Iterable<File> jarFiles) throws IOException {
List<JarFileWithEntryClass> jarsWithEntryClasses = new ArrayList<>();
for (File jarFile : jarFiles) {
findEntryClass(jarFile).ifPresent(entryClass -> jarsWithEntryClasses.add(new JarFileWithEntryClass(jarFile, entryClass)));
}
int size = jarsWithEntryClasses.size();
if (size == 0)
{
throw new NoSuchElementException("No JAR with manifest attribute for entry class");
}
if (size == 1) {
return jarsWithEntryClasses.get(0);
}
// else: size > 1
throw new IllegalArgumentException("Multiple JARs with manifest attribute for entry class: " + jarsWithEntryClasses);
}
| 3.26 |
flink_JarManifestParser_findEntryClass_rdh
|
/**
* Returns the entry class as specified in the manifest of the provided JAR file.
*
* <p>The following manifest attributes are checked in order to find the entry class:
*
* <ol>
* <li>{@link PackagedProgram#MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS}
* <li>{@link PackagedProgram#MANIFEST_ATTRIBUTE_MAIN_CLASS}
* </ol>
*
* @param jarFile
* JAR file to parse
* @return Optional holding entry class
* @throws IOException
* If there is an error accessing the JAR
*/
@VisibleForTesting
static Optional<String> findEntryClass(File jarFile) throws IOException {
return findFirstManifestAttribute(jarFile, PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS, PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS);
}
| 3.26 |
flink_AvroRowDeserializationSchema_convertAvroRecordToRow_rdh
|
// --------------------------------------------------------------------------------------------
private Row convertAvroRecordToRow(Schema schema, RowTypeInfo typeInfo, IndexedRecord record) {
final List<Schema.Field> fields = schema.getFields();
final TypeInformation<?>[] fieldInfo = typeInfo.getFieldTypes();
final int length = fields.size();
final Row row = new Row(length);
for (int i = 0; i < length; i++) {
final Schema.Field v7 = fields.get(i);
row.setField(i, convertAvroType(v7.schema(), fieldInfo[i], record.get(i)));
}
return row;
}
| 3.26 |
flink_SqlShowPartitions_getPartitionKVs_rdh
|
/**
* Get partition spec as key-value strings.
*/public LinkedHashMap<String, String> getPartitionKVs() {
return SqlPartitionUtils.getPartitionKVs(getPartitionSpec());
}
| 3.26 |
flink_SpillingBuffer_moveAll_rdh
|
/**
* Utility method that moves elements. It avoids copying the data into a dedicated array first,
* as the {@link ArrayList#addAll(java.util.Collection)} method does.
*
* @param <E>
* @param source
* @param target
*/
private static final <E> void moveAll(ArrayList<E> source, ArrayList<E> target) {
target.ensureCapacity(target.size() + source.size());
for (int i = source.size() - 1; i >= 0; i--) {
target.add(source.remove(i));
}
}
| 3.26 |
flink_SpillingBuffer_close_rdh
|
/**
*
* @return A list with all memory segments that have been taken from the memory segment source.
*/
public List<MemorySegment> close() throws IOException
{
final ArrayList<MemorySegment> segments = new ArrayList<MemorySegment>(this.fullSegments.size() + this.numMemorySegmentsInWriter);
// if the buffer is still being written, clean that up
if (getCurrentSegment() != null) {segments.add(getCurrentSegment());clear();
}
moveAll(this.fullSegments, segments);
this.fullSegments.clear();
// clean up the writer
if (this.writer != null) {
// closing before the first flip, collect the memory in the writer
this.writer.close();
for (int i = this.numMemorySegmentsInWriter; i > 0; i--) {
segments.add(this.writer.getNextReturnedBlock());
}
this.writer.closeAndDelete();
this.writer = null;
}
// clean up the views
if (this.inMemInView != null) {this.inMemInView = null;
}
if (this.externalInView != null) {
if (!this.externalInView.isClosed()) {
this.externalInView.close();
}
this.externalInView = null;
}
return segments;
}
| 3.26 |
flink_ProcessorArchitecture_getProcessorArchitecture_rdh
|
// ------------------------------------------------------------------------
/**
* Gets the ProcessorArchitecture of the system running this process.
*/
public static ProcessorArchitecture getProcessorArchitecture() {return
CURRENT;
}
| 3.26 |
flink_ProcessorArchitecture_getAlternativeNames_rdh
|
/**
* Gets the alternative names for the processor architecture. Alternative names are for example
* "i586" for "x86", or "x86_64" for "amd64".
*/
public List<String> getAlternativeNames() {
return alternativeNames;
}
| 3.26 |
flink_ProcessorArchitecture_getAddressSize_rdh
|
/**
* Gets the address size of the memory (32 bit, 64 bit).
*/
public MemoryAddressSize getAddressSize() {
return addressSize;
}
| 3.26 |
flink_SchedulerFactory_create_rdh
|
/**
* Create a {@link ScheduledThreadPoolExecutor} using the provided corePoolSize. The following
* behaviour is configured:
*
* <ul>
* <li>rejected executions are logged if the executor is {@link java.util.concurrent.ThreadPoolExecutor#isShutdown shutdown}
* <li>otherwise, {@link RejectedExecutionException} is thrown
* <li>any uncaught exception fails the JVM (using {@link org.apache.flink.runtime.util.FatalExitExceptionHandler FatalExitExceptionHandler})
* </ul>
*/
public static ScheduledThreadPoolExecutor create(int corePoolSize, String name, Logger log) {
AtomicInteger cnt
= new AtomicInteger(0);
return new ScheduledThreadPoolExecutor(corePoolSize, runnable -> {
Thread thread = new Thread(runnable);
thread.setName((name + "-") +
cnt.incrementAndGet());
thread.setUncaughtExceptionHandler(INSTANCE);
return thread;
}, new IgnoreShutdownRejectedExecutionHandler(log));
}
| 3.26 |
flink_TaskSlot_add_rdh
|
// ----------------------------------------------------------------------------------
// State changing methods
// ----------------------------------------------------------------------------------
/**
* Add the given task to the task slot. This is only possible if there is not already another
* task with the same execution attempt id added to the task slot. In this case, the method
* returns true. Otherwise the task slot is left unchanged and false is returned.
*
* <p>In case that the task slot state is not active an {@link IllegalStateException} is thrown.
* In case that the task's job id and allocation id don't match with the job id and allocation
* id for which the task slot has been allocated, an {@link IllegalArgumentException} is thrown.
*
* @param task
* to be added to the task slot
* @throws IllegalStateException
* if the task slot is not in state active
* @return true if the task was added to the task slot; otherwise false
*/
public boolean add(T task) {
// Check that this slot has been assigned to the job sending this task
Preconditions.checkArgument(task.getJobID().equals(jobId), "The task's job id does not match the " + "job id for which the slot has been allocated.");
Preconditions.checkArgument(task.getAllocationId().equals(allocationId), "The task's allocation " + "id does not match the allocation id for which the slot has been allocated.");
Preconditions.checkState(TaskSlotState.ACTIVE == state, "The task slot is not in state active.");
T oldTask = tasks.put(task.getExecutionId(), task);
if (oldTask !=
null) {
tasks.put(task.getExecutionId(), oldTask);
return false;
} else {
return true;
}
}
| 3.26 |
flink_TaskSlot_markInactive_rdh
|
/**
* Mark the slot as inactive/allocated. A slot can only be marked as inactive/allocated if it's
* in state allocated or active.
*
* @return True if the new state of the slot is allocated; otherwise false
*/
public boolean markInactive() {
if ((TaskSlotState.ACTIVE == state) || (TaskSlotState.ALLOCATED == state)) {
state = TaskSlotState.ALLOCATED;
return true;
} else {
return false;}
}
| 3.26 |
flink_TaskSlot_generateSlotOffer_rdh
|
/**
* Generate the slot offer from this TaskSlot.
*
* @return The sot offer which this task slot can provide
*/
public SlotOffer generateSlotOffer() {Preconditions.checkState((TaskSlotState.ACTIVE == state) || (TaskSlotState.ALLOCATED == state), "The task slot is not in state active or allocated.");
Preconditions.checkState(allocationId != null, "The task slot are not allocated");
return new SlotOffer(allocationId, index, resourceProfile);
}
| 3.26 |
flink_TaskSlot_getTasks_rdh
|
/**
* Get all tasks running in this task slot.
*
* @return Iterator to all currently contained tasks in this task slot.
*/
public Iterator<T> getTasks() {return tasks.values().iterator();
}
| 3.26 |
flink_TaskSlot_m1_rdh
|
/**
* Remove the task identified by the given execution attempt id.
*
* @param executionAttemptId
* identifying the task to be removed
* @return The removed task if there was any; otherwise null.
*/
public T m1(ExecutionAttemptID executionAttemptId) {
return tasks.remove(executionAttemptId);
}
| 3.26 |
flink_TaskSlot_markActive_rdh
|
/**
* Mark this slot as active. A slot can only be marked active if it's in state allocated.
*
* <p>The method returns true if the slot was set to active. Otherwise it returns false.
*
* @return True if the new state of the slot is active; otherwise false
*/
public boolean markActive() {
if ((TaskSlotState.ALLOCATED
== state) || (TaskSlotState.ACTIVE == state)) {
state = TaskSlotState.ACTIVE;
return true;
} else {
return false;
}
}
| 3.26 |
flink_TaskSlot_clear_rdh
|
/**
* Removes all tasks from this task slot.
*/
public void clear() {
tasks.clear();}
| 3.26 |
flink_TaskSlot_getIndex_rdh
|
// ----------------------------------------------------------------------------------
// State accessors
// ----------------------------------------------------------------------------------
public int getIndex() {
return index;
}
| 3.26 |
flink_TaskSlot_closeAsync_rdh
|
/**
* Close the task slot asynchronously.
*
* <p>Slot is moved to {@link TaskSlotState#RELEASING} state and only once. If there are active
* tasks running in the slot then they are failed. The future of all tasks terminated and slot
* cleaned up is initiated only once and always returned in case of multiple attempts to close
* the slot.
*
* @param cause
* cause of closing
* @return future of all running task if any being done and slot cleaned up.
*/
CompletableFuture<Void> closeAsync(Throwable cause) {
if (!isReleasing()) {
state = TaskSlotState.RELEASING;
if (!isEmpty()) {
// we couldn't free the task slot because it still contains task, fail the tasks
// and set the slot state to releasing so that it gets eventually freed
tasks.values().forEach(task -> task.failExternally(cause));
}
final CompletableFuture<Void> shutdownFuture = FutureUtils.waitForAll(tasks.values().stream().map(TaskSlotPayload::getTerminationFuture).collect(Collectors.toList())).thenRun(memoryManager::shutdown);
verifyAllManagedMemoryIsReleasedAfter(shutdownFuture);
FutureUtils.forward(shutdownFuture, closingFuture);
}
return closingFuture;
}
| 3.26 |
flink_Executors_newDirectExecutorServiceWithNoOpShutdown_rdh
|
/**
* Creates a new {@link ExecutorService} that runs the passed tasks in the calling thread but
* doesn't implement proper shutdown behavior. Tasks can be still submitted even after {@link ExecutorService#shutdown()} is called.
*
* @see #newDirectExecutorService()
*/
public static ExecutorService newDirectExecutorServiceWithNoOpShutdown() {
return new DirectExecutorService(false);
}
| 3.26 |
flink_Executors_directExecutor_rdh
|
/**
* Return a direct executor. The direct executor directly executes the runnable in the calling
* thread.
*
* @return Direct executor
*/
public static Executor directExecutor() {
return
DirectExecutor.INSTANCE;
}
| 3.26 |
flink_Executors_newDirectExecutorService_rdh
|
/**
* Creates a more {@link ExecutorService} that runs the passed task in the calling thread.
*/public
static ExecutorService newDirectExecutorService() {
return new
DirectExecutorService(true);
}
| 3.26 |
flink_CompositeBuffer_getFullBufferData_rdh
|
/**
* Returns the full buffer data in one piece of {@link MemorySegment}. If there is multiple
* partial buffers, the partial data will be copied to the given target {@link MemorySegment}.
*/
public Buffer getFullBufferData(MemorySegment
segment) {
checkState(!partialBuffers.isEmpty());
checkState(currentLength <= segment.size());
if (partialBuffers.size() == 1) {
return partialBuffers.get(0);
}
int offset = 0;
for (Buffer buffer : partialBuffers) {
segment.put(offset, buffer.getNioBufferReadable(), buffer.readableBytes());
offset += buffer.readableBytes();
}
recycleBuffer();
return new NetworkBuffer(segment, DummyBufferRecycler.INSTANCE, dataType, isCompressed, currentLength);
}
| 3.26 |
flink_LongParser_parseField_rdh
|
/**
* Static utility to parse a field of type long from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final long parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if ((length == 0) || (bytes[startPos] == delimiter)) {
throw new NumberFormatException("Orphaned minus sign.");
}
}
for (; length > 0; startPos++ , length--) {
if (bytes[startPos] == delimiter) {
return neg ? -val : val;
}
if ((bytes[startPos] < 48) || (bytes[startPos] > 57))
{
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
// check for overflow / underflow
if (val < 0) {
// this is an overflow/underflow, unless we hit exactly the Long.MIN_VALUE
if (neg && (val == Long.MIN_VALUE)) {
if ((length == 1) || (bytes[startPos + 1] == delimiter)) {
return Long.MIN_VALUE;
} else {
throw new NumberFormatException("value overflow");
}
} else {
throw new NumberFormatException("value overflow");
}
}
}
return neg ? -val : val;
}
| 3.26 |
flink_NonReusingBuildSecondHashJoinIterator_open_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void open() throws IOException, MemoryAllocationException, InterruptedException {
this.hashJoin.open(this.secondInput, this.firstInput,
buildSideOuterJoin);
}
| 3.26 |
flink_JobGraphGenerator_connectJobVertices_rdh
|
// ------------------------------------------------------------------------
// Connecting Vertices
// ------------------------------------------------------------------------
/**
* NOTE: The channel for global and local strategies are different if we connect a union. The
* global strategy channel is then the channel into the union node, the local strategy channel
* the one from the union to the actual target operator.
*
* @throws CompilerException
*/
private DistributionPattern connectJobVertices(Channel channel, int inputNumber, final JobVertex sourceVertex, final TaskConfig sourceConfig, final JobVertex targetVertex, final TaskConfig targetConfig, boolean isBroadcast) throws CompilerException {
// ------------ connect the vertices to the job graph --------------
final DistributionPattern distributionPattern;
switch (channel.getShipStrategy()) {
case FORWARD :
distributionPattern = DistributionPattern.POINTWISE;
break;
case PARTITION_RANDOM :
case BROADCAST :
case PARTITION_HASH :
case PARTITION_CUSTOM : case PARTITION_RANGE :
case PARTITION_FORCED_REBALANCE :
distributionPattern = DistributionPattern.ALL_TO_ALL;
break;
default :
throw new RuntimeException("Unknown runtime ship strategy: " + channel.getShipStrategy());
}
final ResultPartitionType resultType;
switch (channel.getDataExchangeMode()) {case PIPELINED :
resultType = ResultPartitionType.PIPELINED;
break;
case BATCH :
// BLOCKING results are currently not supported in closed loop iterations
//
// See https://issues.apache.org/jira/browse/FLINK-1713 for details
resultType = (channel.getSource().isOnDynamicPath()) ? ResultPartitionType.PIPELINED : ResultPartitionType.BLOCKING;
break;
case PIPELINE_WITH_BATCH_FALLBACK :
throw new UnsupportedOperationException(("Data exchange mode "
+ channel.getDataExchangeMode()) + " currently not supported.");
default :
throw new UnsupportedOperationException("Unknown data exchange mode.");
}
JobEdge edge = targetVertex.connectNewDataSetAsInput(sourceVertex, distributionPattern, resultType, isBroadcast);
// -------------- configure the source task's ship strategy strategies in task config
// --------------
final int
outputIndex = sourceConfig.getNumOutputs();
sourceConfig.addOutputShipStrategy(channel.getShipStrategy());
if (outputIndex == 0) {
sourceConfig.setOutputSerializer(channel.getSerializer());
}
if (channel.getShipStrategyComparator() != null) {
sourceConfig.setOutputComparator(channel.getShipStrategyComparator(), outputIndex);
}
if (channel.getShipStrategy() == ShipStrategyType.PARTITION_RANGE) {
final DataDistribution dataDistribution = channel.getDataDistribution();
if (dataDistribution != null) {
sourceConfig.setOutputDataDistribution(dataDistribution, outputIndex);
} else {
throw new RuntimeException("Range partitioning requires data distribution.");
}
}
if
(channel.getShipStrategy() == ShipStrategyType.PARTITION_CUSTOM) {
if (channel.getPartitioner() != null) {
sourceConfig.setOutputPartitioner(channel.getPartitioner(), outputIndex);
} else {
throw new CompilerException("The ship strategy was set to custom partitioning, but no partitioner was set.");
}
}
// ---------------- configure the receiver -------------------
if (isBroadcast) {
targetConfig.addBroadcastInputToGroup(inputNumber);
} else {
targetConfig.addInputToGroup(inputNumber);
}
// ---------------- attach the additional infos to the job edge -------------------
String shipStrategy = JsonMapper.getShipStrategyString(channel.getShipStrategy());
if ((channel.getShipStrategyKeys() != null) && (channel.getShipStrategyKeys().size() > 0)) {
shipStrategy += " on " + (channel.getShipStrategySortOrder()
== null ? channel.getShipStrategyKeys().toString() : Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
}
String localStrategy;
if ((channel.getLocalStrategy() == null) || (channel.getLocalStrategy() == LocalStrategy.NONE)) {
localStrategy = null;} else {
localStrategy = JsonMapper.getLocalStrategyString(channel.getLocalStrategy());
if (((localStrategy != null) && (channel.getLocalStrategyKeys() != null)) && (channel.getLocalStrategyKeys().size() > 0)) {
localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ? channel.getLocalStrategyKeys().toString() : Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
}
}
String caching = (channel.getTempMode() == TempMode.NONE) ? null : channel.getTempMode().toString();
edge.setShipStrategyName(shipStrategy);
edge.setForward(channel.getShipStrategy() == ShipStrategyType.FORWARD);
edge.setPreProcessingOperationName(localStrategy);
edge.setOperatorLevelCachingDescription(caching);
return distributionPattern;
}
| 3.26 |
flink_JobGraphGenerator_compileJobGraph_rdh
|
/**
* Translates a {@link org.apache.flink.optimizer.plan.OptimizedPlan} into a {@link org.apache.flink.runtime.jobgraph.JobGraph}.
*
* @param program
* Optimized plan that is translated into a JobGraph.
* @return JobGraph generated from the plan.
*/
public JobGraph compileJobGraph(OptimizedPlan program) {
return compileJobGraph(program, null);
}
| 3.26 |
flink_JobGraphGenerator_postVisit_rdh
|
/**
* This method implements the post-visit during the depth-first traversal. When the post visit
* happens, all of the descendants have been processed, so this method connects all of the
* current node's predecessors to the current node.
*
* @param node
* The node currently processed during the post-visit.
* @see org.apache.flink.util.Visitor#postVisit(org.apache.flink.util.Visitable) t
*/
@Override
public void postVisit(PlanNode node) {
try {
// --------- check special cases for which we handle post visit differently ----------
// skip data source node (they have no inputs)
// also, do nothing for union nodes, we connect them later when gathering the inputs for
// a task
// solution sets have no input. the initial solution set input is connected when the
// iteration node is in its postVisit
if (((node instanceof SourcePlanNode) || (node instanceof NAryUnionPlanNode)) || (node instanceof SolutionSetPlanNode)) {
return;
}
// if this is a blocking shuffle vertex, we add one IntermediateDataSetID to its
// predecessor and return
if (checkAndConfigurePersistentIntermediateResult(node)) {
return;
}
// check if we have an iteration. in that case, translate the step function now
if (node instanceof IterationPlanNode) {
// prevent nested iterations
if (node.isOnDynamicPath()) {
throw new CompilerException("Nested Iterations are not possible at the moment!");
}
// if we recursively go into an iteration (because the constant path of one
// iteration contains
// another one), we push the current one onto the stack
if (this.currentIteration != null) {
this.iterationStack.add(this.currentIteration);
}
this.currentIteration = ((IterationPlanNode) (node));
this.currentIteration.acceptForStepFunction(this);
// pop the current iteration from the stack
if (this.iterationStack.isEmpty()) {
this.currentIteration = null;
} else {this.currentIteration = this.iterationStack.remove(this.iterationStack.size() - 1);
}
// inputs for initial bulk partial solution or initial workset are already connected
// to the iteration head in the head's post visit.
// connect the initial solution set now.
if (node instanceof WorksetIterationPlanNode) {
// connect the initial solution set
WorksetIterationPlanNode wsNode = ((WorksetIterationPlanNode) (node));
JobVertex headVertex = this.iterations.get(wsNode).getHeadTask();
TaskConfig v25 = new TaskConfig(headVertex.getConfiguration());
int inputIndex = v25.getDriverStrategy().getNumInputs();
v25.setIterationHeadSolutionSetInputIndex(inputIndex);
translateChannel(wsNode.getInitialSolutionSetInput(), inputIndex, headVertex, v25, false);
}
return;
}
final JobVertex targetVertex = this.vertices.get(node);
// --------- Main Path: Translation of channels ----------
//
// There are two paths of translation: One for chained tasks (or merged tasks in
// general),
// which do not have their own task vertex. The other for tasks that have their own
// vertex,
// or are the primary task in a vertex (to which the others are chained).
// check whether this node has its own task, or is merged with another one
if (targetVertex == null) {
// node's task is merged with another task. it is either chained, of a merged head
// vertex
// from an iteration
final TaskInChain
chainedTask;
if ((chainedTask = this.chainedTasks.get(node)) != null) {
// Chained Task. Sanity check first...
final Iterator<Channel> inConns = node.getInputs().iterator();
if (!inConns.hasNext()) {
throw new
CompilerException("Bug: Found chained task with no input.");
}final Channel inConn = inConns.next();
if (inConns.hasNext()) {
throw new CompilerException("Bug: Found a chained task with more than one input!");
}
if ((inConn.getLocalStrategy() != null) && (inConn.getLocalStrategy() != LocalStrategy.NONE)) {
throw new CompilerException("Bug: Found a chained task with an input local strategy.");
}
if ((inConn.getShipStrategy() != null) && (inConn.getShipStrategy() != ShipStrategyType.FORWARD))
{throw new CompilerException("Bug: Found a chained task with an input ship strategy other than FORWARD.");
}
JobVertex container = chainedTask.getContainingVertex();if (container == null) {
final PlanNode sourceNode = inConn.getSource();
container = this.vertices.get(sourceNode);
if (container == null) {
// predecessor is itself chained
container = this.chainedTasks.get(sourceNode).getContainingVertex();
if (container == null)
{
throw new IllegalStateException("Bug: Chained task predecessor has not been assigned its containing vertex.");
}
} else {
// predecessor is a proper task job vertex and this is the first chained
// task. add a forward connection entry.
new TaskConfig(container.getConfiguration()).addOutputShipStrategy(ShipStrategyType.FORWARD);
}
chainedTask.setContainingVertex(container);
}
// add info about the input serializer type
chainedTask.getTaskConfig().setInputSerializer(inConn.getSerializer(), 0);
// update name of container task
String containerTaskName = container.getName();
if (containerTaskName.startsWith("CHAIN ")) {
container.setName((containerTaskName + " -> ") + chainedTask.getTaskName());
} else {
container.setName((("CHAIN " + containerTaskName) + " -> ") + chainedTask.getTaskName());
}
// update resource of container task
container.setResources(container.getMinResources().merge(node.getMinResources()), container.getPreferredResources().merge(node.getPreferredResources()));
this.chainedTasksInSequence.add(chainedTask);
return;} else if ((node instanceof BulkPartialSolutionPlanNode) || (node instanceof WorksetPlanNode)) {
// merged iteration head task. the task that the head is merged with will take
// care of it
return;
} else {
throw new CompilerException("Bug: Unrecognized merged task vertex.");
}
}
// -------- Here, we translate non-chained tasks -------------
if (this.currentIteration != null) {
JobVertex head = this.iterations.get(this.currentIteration).getHeadTask();
// Exclude static code paths from the co-location constraint, because otherwise
// their execution determines the deployment slots of the co-location group
if (node.isOnDynamicPath()) {
targetVertex.setStrictlyCoLocatedWith(head);
}
}
// create the config that will contain all the description of the inputs
final TaskConfig
targetVertexConfig = new TaskConfig(targetVertex.getConfiguration());
// get the inputs. if this node is the head of an iteration, we obtain the inputs from
// the
// enclosing iteration node, because the inputs are the initial inputs to the iteration.
final Iterator<Channel> inConns;
if (node instanceof BulkPartialSolutionPlanNode) {
inConns = ((BulkPartialSolutionPlanNode) (node)).getContainingIterationNode().getInputs().iterator();
// because the partial solution has its own vertex, is has only one (logical) input.
// note this in the task configuration
targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0);
} else if (node instanceof WorksetPlanNode) {
WorksetPlanNode wspn = ((WorksetPlanNode) (node));
// input that is the initial workset
inConns = Collections.singleton(wspn.getContainingIterationNode().getInput2()).iterator();
// because we have a stand-alone (non-merged) workset iteration head, the initial
// workset will
// be input 0 and the solution set will be input 1
targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0);
targetVertexConfig.setIterationHeadSolutionSetInputIndex(1);
} else {
inConns = node.getInputs().iterator();
}
if (!inConns.hasNext()) {
throw new CompilerException("Bug: Found a non-source task with no input.");
}
int v38 = 0;
while (inConns.hasNext()) {
Channel input = inConns.next();
v38 += translateChannel(input, v38, targetVertex, targetVertexConfig, false);
}
// broadcast variables
int broadcastInputIndex = 0;
for (NamedChannel broadcastInput : node.getBroadcastInputs()) {
int broadcastInputIndexDelta = translateChannel(broadcastInput, broadcastInputIndex, targetVertex, targetVertexConfig, true);
targetVertexConfig.setBroadcastInputName(broadcastInput.getName(), broadcastInputIndex);
targetVertexConfig.setBroadcastInputSerializer(broadcastInput.getSerializer(), broadcastInputIndex);
broadcastInputIndex += broadcastInputIndexDelta;
}
} catch (Exception e) {
throw new CompilerException("An error occurred while translating the optimized plan to a JobGraph: " + e.getMessage(), e);
}
}
| 3.26 |
flink_JobGraphGenerator_preVisit_rdh
|
/**
* This methods implements the pre-visiting during a depth-first traversal. It create the job
* vertex and sets local strategy.
*
* @param node
* The node that is currently processed.
* @return True, if the visitor should descend to the node's children, false if not.
* @see org.apache.flink.util.Visitor#preVisit(org.apache.flink.util.Visitable)
*/
@Override
public boolean preVisit(PlanNode node) {
// check if we have visited this node before. in non-tree graphs, this happens
if ((this.vertices.containsKey(node) || this.chainedTasks.containsKey(node)) || this.iterations.containsKey(node)) {
// return false to prevent further descend
return false;
}
// the vertex to be created for the current node
final JobVertex vertex;
try {
if (node instanceof SinkPlanNode) {
vertex = createDataSinkVertex(((SinkPlanNode) (node)));
} else if (node instanceof SourcePlanNode) {
vertex = createDataSourceVertex(((SourcePlanNode) (node)));
} else if (node instanceof BulkIterationPlanNode) {
BulkIterationPlanNode iterationNode = ((BulkIterationPlanNode) (node));
// for the bulk iteration, we skip creating anything for now. we create the graph
// for the step function in the post visit.
// check that the root of the step function has the same parallelism as the
// iteration.
// because the tail must have the same parallelism as the head, we can only merge
// the last
// operator with the tail, if they have the same parallelism. not merging is
// currently not
// implemented
PlanNode root = iterationNode.getRootOfStepFunction();
if (root.getParallelism() != node.getParallelism()) {
throw new CompilerException("Error: The final operator of the step "
+ "function has a different parallelism than the iteration operator itself.");
}
IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++);
this.iterations.put(iterationNode, descr);
vertex = null;
} else if (node instanceof WorksetIterationPlanNode) {
WorksetIterationPlanNode iterationNode =
((WorksetIterationPlanNode) (node));
// we have the same constraints as for the bulk iteration
PlanNode v12 = iterationNode.getNextWorkSetPlanNode(); PlanNode solutionSetDelta =
iterationNode.getSolutionSetDeltaPlanNode();
if (v12.getParallelism() != node.getParallelism()) {
throw new CompilerException("It is currently not supported that the final operator of the step " + "function has a different parallelism than the iteration operator itself.");
}
if (solutionSetDelta.getParallelism() != node.getParallelism()) {
throw new CompilerException("It is currently not supported that the final operator of the step " + "function has a different parallelism than the iteration operator itself.");
}
IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++);
this.iterations.put(iterationNode, descr);
vertex = null;
} else if (node instanceof SingleInputPlanNode) {
vertex =
createSingleInputVertex(((SingleInputPlanNode) (node)));
} else if (node instanceof DualInputPlanNode) {
vertex = createDualInputVertex(((DualInputPlanNode) (node)));
} else if (node instanceof NAryUnionPlanNode) {// skip the union for now
vertex = null;
} else if (node instanceof BulkPartialSolutionPlanNode) {// create a head node (or not, if it is merged into its successor)
vertex = createBulkIterationHead(((BulkPartialSolutionPlanNode) (node)));
} else if (node instanceof SolutionSetPlanNode) {
// this represents an access into the solution set index.
// we do not create a vertex for the solution set here (we create the head at the
// workset place holder)
// we adjust the joins / cogroups that go into the solution set here
for (Channel c : node.getOutgoingChannels()) {
DualInputPlanNode target = ((DualInputPlanNode) (c.getTarget()));
JobVertex accessingVertex = this.vertices.get(target);
TaskConfig conf = new TaskConfig(accessingVertex.getConfiguration());
int inputNum = (c == target.getInput1()) ? 0 : c == target.getInput2() ? 1 : -1;
// sanity checks
if (inputNum == (-1)) {
throw new CompilerException();
}// adjust the driver
if (conf.getDriver().equals(JoinDriver.class)) {
conf.setDriver(inputNum == 0 ? JoinWithSolutionSetFirstDriver.class : JoinWithSolutionSetSecondDriver.class);
} else if (conf.getDriver().equals(CoGroupDriver.class)) {
conf.setDriver(inputNum == 0 ? CoGroupWithSolutionSetFirstDriver.class : CoGroupWithSolutionSetSecondDriver.class);
} else {
throw new CompilerException("Found join with solution set using incompatible operator (only Join/CoGroup are valid).");
}
}
// make sure we do not visit this node again. for that, we add a 'already seen'
// entry into one of the sets
this.chainedTasks.put(node, ALREADY_VISITED_PLACEHOLDER);
vertex = null;
} else if (node instanceof WorksetPlanNode) {
// create the iteration head here
vertex = createWorksetIterationHead(((WorksetPlanNode) (node)));
} else {
throw new CompilerException("Unrecognized node type: " + node.getClass().getName());
}
} catch (Exception e) {
throw new CompilerException((("Error translating node '" + node) + "': ") + e.getMessage(), e);
}
// check if a vertex was created, or if it was chained or skipped
if (vertex != null) {
// set parallelism
int pd = node.getParallelism();
vertex.setParallelism(pd);
vertex.setMaxParallelism(pd);
vertex.setSlotSharingGroup(sharingGroup);
// check whether this vertex is part of an iteration step function
if (this.currentIteration != null) {
// check that the task has the same parallelism as the iteration as such
PlanNode iterationNode = ((PlanNode) (this.currentIteration));
if (iterationNode.getParallelism() < pd) {
throw new CompilerException("Error: All functions that are part of an iteration must have the same, or a lower, parallelism than the iteration operator.");
}
// store the id of the iterations the step functions participate in
IterationDescriptor descr = this.iterations.get(this.currentIteration);
new TaskConfig(vertex.getConfiguration()).setIterationId(descr.getId());
}
// store in the map
this.vertices.put(node, vertex);
}
// returning true causes deeper descend
return true;
}
| 3.26 |
flink_JobGraphGenerator_createSingleInputVertex_rdh
|
// ------------------------------------------------------------------------
// Methods for creating individual vertices
// ------------------------------------------------------------------------
private JobVertex createSingleInputVertex(SingleInputPlanNode node) throws CompilerException {
final String taskName = node.getNodeName();
final DriverStrategy ds = node.getDriverStrategy();
// check, whether chaining is possible
boolean chaining;
{
Channel inConn = node.getInput();
PlanNode pred = inConn.getSource();
chaining = (((((((((ds.getPushChainDriverClass() != null) && (!(pred instanceof NAryUnionPlanNode))) && // first op after union is stand-alone, because union is merged
(!(pred instanceof
BulkPartialSolutionPlanNode))) && // partial solution merges anyways
(!(pred instanceof WorksetPlanNode))) && // workset merges anyways
(!(pred instanceof IterationPlanNode))) && // cannot chain with iteration heads currently
(inConn.getShipStrategy() == ShipStrategyType.FORWARD)) && (inConn.getLocalStrategy() == LocalStrategy.NONE)) && (pred.getOutgoingChannels().size() == 1)) && (node.getParallelism() == pred.getParallelism())) && node.getBroadcastInputs().isEmpty();// cannot chain the nodes that produce the next workset or the next solution set, if
// they are not the
// in a tail
if ((this.currentIteration instanceof WorksetIterationPlanNode) && (node.getOutgoingChannels().size() > 0)) {
WorksetIterationPlanNode wspn = ((WorksetIterationPlanNode) (this.currentIteration));
if ((wspn.getSolutionSetDeltaPlanNode() == pred) || (wspn.getNextWorkSetPlanNode() == pred)) {
chaining = false;
}
}
// cannot chain the nodes that produce the next workset in a bulk iteration if a
// termination criterion follows
if (this.currentIteration instanceof BulkIterationPlanNode) {
BulkIterationPlanNode wspn = ((BulkIterationPlanNode) (this.currentIteration));
if ((node == wspn.getRootOfTerminationCriterion()) && (wspn.getRootOfStepFunction() == pred)) {
chaining = false;
}
else if ((node.getOutgoingChannels().size() > 0) && ((wspn.getRootOfStepFunction() == pred) || (wspn.getRootOfTerminationCriterion() == pred))) {
chaining = false;
}
}
}
final JobVertex vertex;
final TaskConfig config;
if (chaining) {
vertex = null;config = new TaskConfig(new Configuration());
this.chainedTasks.put(node, new TaskInChain(node,
ds.getPushChainDriverClass(), config, taskName));
} else
{
// create task vertex
vertex = new JobVertex(taskName);
vertex.setResources(node.getMinResources(), node.getPreferredResources());
vertex.setInvokableClass((this.currentIteration != null) && node.isOnDynamicPath() ? IterationIntermediateTask.class : BatchTask.class);
config = new TaskConfig(vertex.getConfiguration());
config.setDriver(ds.getDriverClass());
}
// set user code
config.setStubWrapper(node.getProgramOperator().getUserCodeWrapper());
config.setStubParameters(node.getProgramOperator().getParameters());
// set the driver strategy
config.setDriverStrategy(ds);
for (int i = 0; i < ds.getNumRequiredComparators(); i++) {
config.setDriverComparator(node.getComparator(i), i);
}
// assign memory, file-handles, etc.
assignDriverResources(node, config);
return vertex;
}
| 3.26 |
flink_AsyncWaitOperator_outputCompletedElement_rdh
|
/**
* Outputs one completed element. Watermarks are always completed if it's their turn to be
* processed.
*
* <p>This method will be called from {@link #processWatermark(Watermark)} and from a mail
* processing the result of an async function call.
*/
private void outputCompletedElement() {
if (queue.hasCompletedElements()) {
// emit only one element to not block the mailbox thread unnecessarily
queue.emitCompletedElement(timestampedCollector);
// if there are more completed elements, emit them with subsequent mails
if (queue.hasCompletedElements()) {
try {
mailboxExecutor.execute(this::outputCompletedElement, "AsyncWaitOperator#outputCompletedElement");
} catch (RejectedExecutionException mailboxClosedException) {
// This exception can only happen if the operator is cancelled which means all
// pending records can be safely ignored since they will be processed one more
// time after recovery.
LOG.debug("Attempt to complete element is ignored since the mailbox rejected the execution.", mailboxClosedException);
}
}
}
}
| 3.26 |
flink_AsyncWaitOperator_tryOnce_rdh
|
/**
* Increments number of attempts and fire the attempt.
*/
private void tryOnce(RetryableResultHandlerDelegator resultHandlerDelegator) throws Exception {
// increment current attempt number
resultHandlerDelegator.currentAttempts++;
// fire a new attempt
userFunction.asyncInvoke(resultHandlerDelegator.resultHandler.inputRecord.getValue(), resultHandlerDelegator);
}
| 3.26 |
flink_AsyncWaitOperator_timerTriggered_rdh
|
/**
* Rewrite the timeout process to deal with retry state.
*/
private void timerTriggered() throws Exception {
if (!resultHandler.completed.get()) {
// cancel delayed retry timer first
cancelRetryTimer();
// force reset retryAwaiting to prevent the handler to trigger retry unnecessarily
retryAwaiting.set(false);
userFunction.timeout(resultHandler.inputRecord.getValue(), this);
}
}
| 3.26 |
flink_AsyncWaitOperator_registerTimer_rdh
|
/**
* Utility method to register timeout timer.
*/
private ScheduledFuture<?> registerTimer(ProcessingTimeService processingTimeService, long
timeout, ThrowingConsumer<Void,
Exception> callback) {
final long timeoutTimestamp = timeout + processingTimeService.getCurrentProcessingTime();
return processingTimeService.registerTimer(timeoutTimestamp, timestamp -> callback.accept(null));
}
| 3.26 |
flink_FromJarEntryClassInformationProvider_getJarFile_rdh
|
/**
* Returns the specified {@code jarFile}.
*
* @return The specified {@code jarFile}.
* @see #getJobClassName()
*/
@Override
public Optional<File> getJarFile() {
return Optional.of(jarFile);
}
/**
* Returns the specified job class name that is either available in the corresponding {@code jarFile}. It can return an empty {@code Optional} if the job class is the entry class of the
* jar.
*
* @return Returns the job class that can be found in the respective {@code jarFile}. It can
also return an empty {@code Optional}
| 3.26 |
flink_FromJarEntryClassInformationProvider_createFromCustomJar_rdh
|
/**
* Creates a {@code FromJarEntryClassInformationProvider} for a custom Jar archive. At least the
* {@code jarFile} or the {@code jobClassName} has to be set.
*
* @param jarFile
* The Jar archive.
* @param jobClassName
* The name of the job class.
* @return The {@code FromJarEntryClassInformationProvider} referring to the passed information.
*/
public static FromJarEntryClassInformationProvider createFromCustomJar(File jarFile, @Nullable
String jobClassName) {
return new FromJarEntryClassInformationProvider(jarFile, jobClassName);
}
| 3.26 |
flink_FromJarEntryClassInformationProvider_createFromPythonJar_rdh
|
/**
* Creates a {@code FromJarEntryClassInformationProvider} for a job implemented in Python.
*
* @return A {@code FromJarEntryClassInformationProvider} for a job implemented in Python
*/
public static FromJarEntryClassInformationProvider createFromPythonJar() {
return new FromJarEntryClassInformationProvider(new File(PackagedProgramUtils.getPythonJar().getPath()), PackagedProgramUtils.getPythonDriverClassName());
}
| 3.26 |
flink_ListView_setList_rdh
|
/**
* Replaces the entire view's content with the content of the given {@link List}.
*/
public void setList(List<T> list) {
this.list = list;
}
| 3.26 |
flink_ListView_getList_rdh
|
/**
* Returns the entire view's content as an instance of {@link List}.
*/
public List<T> getList() {
return list;
}
| 3.26 |
flink_ListView_newListViewDataType_rdh
|
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Utility method for creating a {@link DataType} of {@link ListView} explicitly.
*/
public static DataType newListViewDataType(DataType elementDataType) {
return DataTypes.STRUCTURED(ListView.class, DataTypes.FIELD("list", DataTypes.ARRAY(elementDataType).bridgedTo(List.class)));
}
| 3.26 |
flink_ListView_add_rdh
|
/**
* Adds the given value to the list.
*
* @throws Exception
* Thrown if the system cannot add data.
* @param value
* The element to be appended to this list view.
*/
public void add(T value) throws Exception {
list.add(value);
}
| 3.26 |
flink_ListView_m0_rdh
|
/**
* Returns an iterable of the list view.
*
* @throws Exception
* Thrown if the system cannot get data.
* @return The iterable of the list.
*/
public Iterable<T> m0() throws Exception {
return list;
}
| 3.26 |
flink_ListView_clear_rdh
|
/**
* Removes all of the elements from this list view.
*/
@Override
public void clear() {
list.clear();
}
| 3.26 |
flink_ExecutionEnvironment_m0_rdh
|
// ----------------------------------- Generic Input Format
// ---------------------------------------
/**
* Generic method to create an input {@link DataSet} with in {@link InputFormat}. The DataSet
* will not be immediately created - instead, this method returns a DataSet that will be lazily
* created from the input format once the program is executed.
*
* <p>Since all data sets need specific information about their types, this method needs to
* determine the type of the data produced by the input format. It will attempt to determine the
* data type by reflection, unless the input format implements the {@link ResultTypeQueryable}
* interface. In the latter case, this method will invoke the {@link ResultTypeQueryable#getProducedType()} method to determine data type produced by the input
* format.
*
* @param inputFormat
* The input format used to create the data set.
* @return A {@link DataSet} that represents the data created by the input format.
* @see #createInput(InputFormat, TypeInformation)
*/
public <X> DataSource<X> m0(InputFormat<X, ?> inputFormat) {
if (inputFormat == null) {
throw new IllegalArgumentException("InputFormat must not be null.");
}
try {
return createInput(inputFormat, TypeExtractor.getInputFormatTypes(inputFormat));
} catch (Exception e) {
throw new InvalidProgramException(("The type returned by the input format could not be automatically determined. " + "Please specify the TypeInformation of the produced type explicitly by using the ") + "'createInput(InputFormat, TypeInformation)' method instead.", e);
}
}
| 3.26 |
flink_ExecutionEnvironment_generateSequence_rdh
|
/**
* Creates a new data set that contains a sequence of numbers. The data set will be created in
* parallel, so there is no guarantee about the order of the elements.
*
* @param from
* The number to start at (inclusive).
* @param to
* The number to stop at (inclusive).
* @return A DataSet, containing all number in the {@code [from, to]} interval.
*/
public DataSource<Long> generateSequence(long from, long to) {
return fromParallelCollection(new
NumberSequenceIterator(from, to), BasicTypeInfo.LONG_TYPE_INFO, Utils.getCallLocationName());
}
| 3.26 |
flink_ExecutionEnvironment_fromElements_rdh
|
/**
* Creates a new data set that contains the given elements. The framework will determine the
* type according to the based type user supplied. The elements should be the same or be the
* subclass to the based type. The sequence of elements must not be empty. Note that this
* operation will result in a non-parallel data source, i.e. a data source with a parallelism of
* one.
*
* @param type
* The base class type for every element in the collection.
* @param data
* The elements to make up the data set.
* @return A DataSet representing the given list of elements.
*/
@SafeVarargs
public final <X> DataSource<X> fromElements(Class<X> type, X... data) {
if (data == null) {
throw new IllegalArgumentException("The data must not be null.");
}
if (data.length == 0) {
throw new IllegalArgumentException("The number of elements must not be zero.");
}
TypeInformation<X> typeInfo;try
{
typeInfo = TypeExtractor.getForClass(type);} catch (Exception e) {
throw new RuntimeException((("Could not create TypeInformation for type " + type.getName()) + "; please specify the TypeInformation manually via ") + "ExecutionEnvironment#fromElements(Collection, TypeInformation)", e);
}
return fromCollection(Arrays.asList(data), typeInfo, Utils.getCallLocationName());
}
| 3.26 |
flink_ExecutionEnvironment_registerJobListener_rdh
|
/**
* Register a {@link JobListener} in this environment. The {@link JobListener} will be notified
* on specific job status changed.
*/
@PublicEvolving
public void registerJobListener(JobListener jobListener) {
checkNotNull(jobListener, "JobListener cannot be null");
jobListeners.add(jobListener);
}
| 3.26 |
flink_ExecutionEnvironment_getConfig_rdh
|
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
/**
* Gets the config object that defines execution parameters.
*
* @return The environment's execution configuration.
*/
public ExecutionConfig getConfig() {
return config;}
| 3.26 |
flink_ExecutionEnvironment_addDefaultKryoSerializer_rdh
|
/**
* Adds a new Kryo default serializer to the Runtime.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/
public void addDefaultKryoSerializer(Class<?> type, Class<? extends Serializer<?>> serializerClass) {config.addDefaultKryoSerializer(type, serializerClass);
}
| 3.26 |
flink_ExecutionEnvironment_getExecutionPlan_rdh
|
/**
* Creates the plan with which the system will execute the program, and returns it as a String
* using a JSON representation of the execution data flow graph.
*
* @return The execution plan of the program, as a JSON String.
* @throws Exception
* Thrown, if the compiler could not be instantiated.
*/
public String getExecutionPlan() throws Exception {
Plan p = createProgramPlan(getJobName(), false);
return ExecutionPlanUtil.getExecutionPlanAsJSON(p);
}
| 3.26 |
flink_ExecutionEnvironment_registerDataSink_rdh
|
/**
* Adds the given sink to this environment. Only sinks that have been added will be executed
* once the {@link #execute()} or {@link #execute(String)} method is called.
*
* @param sink
* The sink to add for execution.
*/
@Internal
void registerDataSink(DataSink<?> sink) {this.sinks.add(sink);
}
| 3.26 |
flink_ExecutionEnvironment_readTextFile_rdh
|
/**
* Creates a {@link DataSet} that represents the Strings produced by reading the given file line
* wise. The {@link java.nio.charset.Charset} with the given name will be used to read the
* files.
*
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName
* The name of the character set used to read the file.
* @return A {@link DataSet} that represents the data read from the given file as text lines.
*/
public DataSource<String> readTextFile(String filePath, String charsetName) {
Preconditions.checkNotNull(filePath, "The file path may not be null.");
TextInputFormat format = new TextInputFormat(new Path(filePath));
format.setCharsetName(charsetName);
return new DataSource<>(this, format, BasicTypeInfo.STRING_TYPE_INFO, Utils.getCallLocationName());
}
| 3.26 |
flink_ExecutionEnvironment_m1_rdh
|
/**
* Creates a new data set that contains elements in the iterator. The iterator is splittable,
* allowing the framework to create a parallel data source that returns the elements in the
* iterator.
*
* <p>Because the iterator will remain unmodified until the actual execution happens, the type
* of data returned by the iterator must be given explicitly in the form of the type
* information. This method is useful for cases where the type is generic. In that case, the
* type class (as given in {@link #fromParallelCollection(SplittableIterator, Class)} does not
* supply all type information.
*
* @param iterator
* The iterator that produces the elements of the data set.
* @param type
* The TypeInformation for the produced data set.
* @return A DataSet representing the elements in the iterator.
* @see #fromParallelCollection(SplittableIterator, Class)
*/
public <X> DataSource<X> m1(SplittableIterator<X> iterator, TypeInformation<X> type) {
return fromParallelCollection(iterator, type, Utils.getCallLocationName());
}
| 3.26 |
flink_ExecutionEnvironment_createInput_rdh
|
/**
* Generic method to create an input DataSet with in {@link InputFormat}. The {@link DataSet}
* will not be immediately created - instead, this method returns a {@link DataSet} that will be
* lazily created from the input format once the program is executed.
*
* <p>The {@link DataSet} is typed to the given TypeInformation. This method is intended for
* input formats that where the return type cannot be determined by reflection analysis, and
* that do not implement the {@link ResultTypeQueryable} interface.
*
* @param inputFormat
* The input format used to create the data set.
* @return A {@link DataSet} that represents the data created by the input format.
* @see #createInput(InputFormat)
*/
public <X> DataSource<X> createInput(InputFormat<X, ?> inputFormat, TypeInformation<X> producedType) {
if (inputFormat == null) {
throw new IllegalArgumentException("InputFormat must not be null.");
}
if (producedType == null) {
throw new IllegalArgumentException("Produced type information must not be null.");
}
return new DataSource<>(this, inputFormat, producedType, Utils.getCallLocationName());
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.