name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_InPlaceMutableHashTable_updateTableEntryWithReduce_rdh
|
/**
* Looks up the table entry that has the same key as the given record, and updates it by
* performing a reduce step.
*
* @param record
* The record to update.
* @throws Exception
*/
public void updateTableEntryWithReduce(T record) throws Exception {T match = prober.getMatchFor(record, reuse);
if (match == null) {
prober.insertAfterNoMatch(record);
} else {
// do the reduce step
T res = reducer.reduce(match, record);
// We have given reuse to the reducer UDF, so create new one if object reuse is
// disabled
if (!objectReuseEnabled) {
reuse = buildSideSerializer.createInstance();
}prober.updateMatch(res);
}
}
| 3.26 |
flink_InPlaceMutableHashTable_overwriteRecordAt_rdh
|
/**
* Overwrites a record at the specified position. The record is read from a DataInputView
* (this will be the staging area). WARNING: The record must not be larger than the original
* record.
*
* @param pointer
* Points to the position to overwrite.
* @param input
* The DataInputView to read the record from
* @param size
* The size of the record
* @throws IOException
*/
public void overwriteRecordAt(long pointer, DataInputView input, int size) throws IOException {
setWritePosition(pointer);
outView.write(input, size);
}
| 3.26 |
flink_InPlaceMutableHashTable_insertAfterNoMatch_rdh
|
/**
* This method can be called after getMatchFor returned null. It inserts the given record to
* the hash table. Important: The given record should have the same key as the record that
* was given to getMatchFor! WARNING; Don't do any modifications to the table between
* getMatchFor and insertAfterNoMatch!
*
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
public void insertAfterNoMatch(T record) throws IOException {
if (closed) {
return;
}
// create new link
long pointerToAppended;
try {
pointerToAppended = recordArea.appendPointerAndRecord(END_OF_LIST, record);
} catch (EOFException ex) {
m0();
insert(record);
return;
}
// add new link to the end of the list
if (f1 == INVALID_PREV_POINTER) {
// list was empty
bucketSegments[bucketSegmentIndex].putLong(bucketOffset, pointerToAppended);
} else {
// update the pointer of the last element of the list.
recordArea.overwritePointerAt(f1, pointerToAppended);
}
numElements++;
resizeTableIfNecessary();
}
| 3.26 |
flink_InPlaceMutableHashTable_setReadPosition_rdh
|
// ----------------------- Input -----------------------
public void setReadPosition(long position) {
inView.setReadPosition(position);
}
| 3.26 |
flink_InPlaceMutableHashTable_getMatchFor_rdh
|
/**
* Searches the hash table for the record with the given key. (If there would be multiple
* matches, only one is returned.)
*
* @param record
* The record whose key we are searching for
* @param targetForMatch
* If a match is found, it will be written here
* @return targetForMatch if a match is found, otherwise null.
*/
@Override
public T getMatchFor(PT record, T targetForMatch) {
if (closed) {
return null;
}
final int hashCode = MathUtils.jenkinsHash(probeTypeComparator.hash(record));final int bucket = hashCode & numBucketsMask;
bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits;// which segment contains the bucket
final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits;// offset of the bucket in the segment
curElemPtr
= bucketSegment.getLong(bucketOffset);
pairComparator.setReference(record);
T currentRecordInList = targetForMatch;
f1 = INVALID_PREV_POINTER;
try {
while ((curElemPtr != END_OF_LIST) && (!closed)) {
recordArea.setReadPosition(curElemPtr);
nextPtr = recordArea.readPointer();
currentRecordInList = recordArea.readRecord(currentRecordInList);
recordEnd = recordArea.getReadPosition();
if (pairComparator.equalToReference(currentRecordInList)) {
// we found an element with a matching key, and not just a hash collision
return currentRecordInList;
}
f1 = curElemPtr;
curElemPtr = nextPtr;
}
} catch (IOException ex) {
throw new RuntimeException("Error deserializing record from the hashtable: " + ex.getMessage(), ex);
}
return null;
}
| 3.26 |
flink_RowTimeMiniBatchDeduplicateFunction_miniBatchDeduplicateOnRowTime_rdh
|
/**
* Processes element to deduplicate on keys with row time semantic, sends current element if it
* is last or first row, retracts previous element if needed.
*
* @param state
* state of function
* @param bufferedRows
* latest row received by deduplicate function
* @param out
* underlying collector
* @param generateUpdateBefore
* flag to generate UPDATE_BEFORE message or not
* @param generateInsert
* flag to gennerate INSERT message or not
* @param rowtimeIndex
* the index of rowtime field
* @param keepLastRow
* flag to keep last row or keep first row
*/
private static void miniBatchDeduplicateOnRowTime(ValueState<RowData> state, List<RowData> bufferedRows, Collector<RowData> out, boolean generateUpdateBefore, boolean generateInsert, int rowtimeIndex, boolean keepLastRow) throws Exception {
if (bufferedRows.isEmpty()) {
return;
}
RowData preRow = state.value();
// Note: we output all changelog here rather than comparing the first and the last
// record in buffer then output at most two changelog.
// The motivation is we need all changelog in versioned table of temporal join.
for (RowData currentRow : bufferedRows) { checkInsertOnly(currentRow);
if (isDuplicate(preRow, currentRow, rowtimeIndex, keepLastRow)) {
updateDeduplicateResult(generateUpdateBefore, generateInsert, preRow, currentRow, out);
preRow = currentRow;
}
}
state.update(preRow);
}
| 3.26 |
flink_FlinkTypeSystem_m1_rdh
|
/**
* Java numeric will always have invalid precision/scale, use its default decimal
* precision/scale instead.
*/
private RelDataType m1(RelDataTypeFactory typeFactory, RelDataType relDataType) {
return RelDataTypeFactoryImpl.isJavaType(relDataType) ? typeFactory.decimalOf(relDataType) : relDataType;
}
| 3.26 |
flink_CloseableIterable_empty_rdh
|
/**
* Returns an empty iterator.
*/
static <T> CloseableIterable<T> empty() {
return new CloseableIterable.Empty<>();
}
| 3.26 |
flink_FlinkCompletableFutureAssert_m0_rdh
|
/**
* An equivalent of {@link #succeedsWithin(Duration)}, that doesn't rely on timeouts.
*
* @return a new assertion object on the future's result
*/
public ObjectAssert<T> m0() {
final T object = assertEventuallySucceeds(info, actual);
return new ObjectAssert<>(object);
}
| 3.26 |
flink_FlinkCompletableFutureAssert_eventuallyFailsWith_rdh
|
/**
* An equivalent of {@link #failsWithin(Duration)}, that doesn't rely on timeouts.
*
* @param exceptionClass
* type of the exception we expect the future to complete with
* @return a new assertion instance on the future's exception.
* @param <E>
* type of the exception we expect the future to complete with
*/
public <E extends Throwable> ThrowableAssertAlternative<E> eventuallyFailsWith(Class<E> exceptionClass) {
return eventuallyFails().withThrowableOfType(exceptionClass);
}
| 3.26 |
flink_FlinkCompletableFutureAssert_withThrowableOfType_rdh
|
/**
* Checks that the underlying throwable is of the given type and returns a {@link ThrowableAssertAlternative} to chain further assertions on the underlying throwable.
*
* @param type
* the expected {@link Throwable} type
* @param <T>
* the expected {@link Throwable} type
* @return a {@link ThrowableAssertAlternative} built with underlying throwable.
*/
public <T extends Throwable> ThrowableAssertAlternative<T> withThrowableOfType(Class<T> type) {
final ThrowableAssertAlternative<Throwable> throwableAssert = new ThrowableAssertAlternative<>(throwable).isInstanceOf(type);
@SuppressWarnings("unchecked")
final ThrowableAssertAlternative<T> cast = ((ThrowableAssertAlternative<T>) (throwableAssert));
return cast;
}
| 3.26 |
flink_DataSet_join_rdh
|
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two {@link DataSet DataSets} on key equality
* and provides multiple ways to combine joining elements into one DataSet.
*
* <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other
* The other DataSet with which this DataSet is joined.
* @param strategy
* The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSets to continue the definition of the Join transformation.
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> join(DataSet<R> other, JoinHint strategy) {
return new JoinOperatorSets<>(this, other, strategy);
}
| 3.26 |
flink_DataSet_leftOuterJoin_rdh
|
/**
* Initiates a Left Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two {@link DataSet DataSets} on key
* equality and provides multiple ways to combine joining elements into one DataSet.
*
* <p>Elements of the <b>left</b> DataSet (i.e. {@code this}) that do not have a matching
* element on the other side are joined with {@code null} and emitted to the resulting DataSet.
*
* @param other
* The other DataSet with which this DataSet is joined.
* @param strategy
* The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> leftOuterJoin(DataSet<R> other, JoinHint strategy) {
switch (strategy) {
case OPTIMIZER_CHOOSES :
case REPARTITION_SORT_MERGE :
case REPARTITION_HASH_FIRST :
case REPARTITION_HASH_SECOND :
case BROADCAST_HASH_SECOND :
return new JoinOperatorSetsBase<>(this,
other, strategy, JoinType.LEFT_OUTER);
default :
throw new InvalidProgramException("Invalid JoinHint for LeftOuterJoin: " + strategy);
}
}
| 3.26 |
flink_DataSet_m0_rdh
|
/**
* Syntactic sugar for {@link #aggregate(Aggregations, int)} using {@link Aggregations#MAX} as
* the aggregation function.
*
* <p><strong>Note:</strong> This operation is not to be confused with {@link #maxBy(int...)},
* which selects one element with maximum value at the specified field positions.
*
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the max'ed DataSet.
* @see #aggregate(Aggregations, int)
* @see #maxBy(int...)
*/
public AggregateOperator<T> m0(int field) {
return
aggregate(Aggregations.MAX, field);
}
| 3.26 |
flink_DataSet_partitionCustom_rdh
|
/**
* Partitions a DataSet on the key returned by the selector, using a custom partitioner. This
* method takes the key selector to get the key to partition on, and a partitioner that accepts
* the key type.
*
* <p>Note: This method works only on single field keys, i.e. the selector cannot return tuples
* of fields.
*
* @param partitioner
* The partitioner to assign partitions to keys.
* @param keyExtractor
* The KeyExtractor with which the DataSet is partitioned.
* @return The partitioned DataSet.
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionCustom(Partitioner<K> partitioner, KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(this, new Keys.SelectorFunctionKeys<>(keyExtractor, getType(), keyType), clean(partitioner), Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_coGroup_rdh
|
// --------------------------------------------------------------------------------------------
// Co-Grouping
// --------------------------------------------------------------------------------------------
/**
* Initiates a CoGroup transformation.
*
* <p>A CoGroup transformation combines the elements of two {@link DataSet DataSets} into one
* DataSet. It groups each DataSet individually on a key and gives groups of both DataSets with
* equal keys together into a {@link org.apache.flink.api.common.functions.RichCoGroupFunction}.
* If a DataSet has a group with no matching key in the other DataSet, the CoGroupFunction is
* called with an empty group for the non-existing group.
*
* <p>The CoGroupFunction can iterate over the elements of both groups and return any number of
* elements including none.
*
* <p>This method returns a {@link CoGroupOperatorSets} on which one of the {@code where}
* methods can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other
* The other DataSet of the CoGroup transformation.
* @return A CoGroupOperatorSets to continue the definition of the CoGroup transformation.
* @see CoGroupOperatorSets
* @see CoGroupOperator
* @see DataSet
*/
public <R> CoGroupOperator.CoGroupOperatorSets<T, R> coGroup(DataSet<R> other) {
return new CoGroupOperator.CoGroupOperatorSets<>(this, other);
}
| 3.26 |
flink_DataSet_aggregate_rdh
|
// --------------------------------------------------------------------------------------------
// Non-grouped aggregations
// --------------------------------------------------------------------------------------------
/**
* Applies an Aggregate transformation on a non-grouped {@link Tuple} {@link DataSet}.
*
* <p><b>Note: Only Tuple DataSets can be aggregated.</b> The transformation applies a built-in
* {@link Aggregations Aggregation} on a specified field of a Tuple DataSet. Additional
* aggregation functions can be added to the resulting {@link AggregateOperator} by calling
* {@link AggregateOperator#and(Aggregations, int)}.
*
* @param agg
* The built-in aggregation function that is computed.
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the aggregated DataSet.
* @see Tuple
* @see Aggregations
* @see AggregateOperator
* @see DataSet
*/
public AggregateOperator<T> aggregate(Aggregations agg, int field) {
return new AggregateOperator<>(this, agg, field, Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_count_rdh
|
/**
* Convenience method to get the count (number of elements) of a DataSet.
*
* @return A long integer that represents the number of elements in the data set.
*/public long count() throws Exception {
final String id = new AbstractID().toString(); output(new Utils.CountHelper<T>(id)).name("count()");
JobExecutionResult res = getExecutionEnvironment().execute();
return res.<Long>getAccumulatorResult(id);
}
| 3.26 |
flink_DataSet_fillInType_rdh
|
// --------------------------------------------------------------------------------------------
// Type Information handling
// --------------------------------------------------------------------------------------------
/**
* Tries to fill in the type information. Type information can be filled in later when the
* program uses a type hint. This method checks whether the type information has ever been
* accessed before and does not allow modifications if the type was accessed already. This
* ensures consistency by making sure different parts of the operation do not assume different
* type information.
*
* @param typeInfo
* The type information to fill in.
* @throws IllegalStateException
* Thrown, if the type information has been accessed before.
*/
protected void fillInType(TypeInformation<T> typeInfo) {
if (typeUsed) {
throw new IllegalStateException(("TypeInformation cannot be filled in for the type after it has been used. " + "Please make sure that the type info hints are the first call after the transformation function, ") + "before any access to types or semantic properties, etc.");
}
this.type = typeInfo;
}
| 3.26 |
flink_DataSet_joinWithTiny_rdh
|
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two {@link DataSet DataSets} on key equality
* and provides multiple ways to combine joining elements into one DataSet.
*
* <p>This method also gives the hint to the optimizer that the second DataSet to join is much
* smaller than the first one.
*
* <p>This method returns a {@link JoinOperatorSets} on which {@link JoinOperatorSets#where(String...)} needs to be called to define the join key of the first
* joining (i.e., this) DataSet.
*
* @param other
* The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSets to continue the definition of the Join transformation.
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> joinWithTiny(DataSet<R> other) {
return new JoinOperatorSets<>(this, other, JoinHint.BROADCAST_HASH_SECOND);
}
| 3.26 |
flink_DataSet_crossWithTiny_rdh
|
/**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two {@link DataSet DataSets} into one
* DataSet. It builds all pair combinations of elements of both DataSets, i.e., it builds a
* Cartesian product. This method also gives the hint to the optimizer that the second DataSet
* to cross is much smaller than the first one.
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps
* each pair of crossed elements into a {@link Tuple2}, with the element of the first input
* being the first field of the tuple and the element of the second input being the second field
* of the tuple.
*
* <p>Call {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)}
* to define a {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair
* of input elements.
*
* @param other
* The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/public <R> CrossOperator.DefaultCross<T, R> crossWithTiny(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(this, other, CrossHint.SECOND_IS_SMALL, Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_cross_rdh
|
// --------------------------------------------------------------------------------------------
// Cross
// --------------------------------------------------------------------------------------------
/**
* Continues a Join transformation and defines the {@link Tuple} fields of the second join
* {@link DataSet} that should be used as join keys.
*
* <p><b>Note: Fields can only be selected as join keys on Tuple DataSets.</b>
*
* <p>The resulting {@link DefaultJoin} wraps each pair of joining elements into a {@link Tuple2}, with the element of the first input being the first field of the tuple and the
* element of the second input being the second field of the tuple.
*
* @param fields
* The indexes of the Tuple fields of the second join DataSet that should be used
* as keys.
* @return A DefaultJoin that represents the joined DataSet.
*/
/**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two {@link DataSet DataSets} into one
* DataSet. It builds all pair combinations of elements of both DataSets, i.e., it builds a
* Cartesian product.
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps
* each pair of crossed elements into a {@link Tuple2}, with the element of the first input
* being the first field of the tuple and the element of the second input being the second field
* of the tuple.
*
* <p>Call {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)}
* to define a {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair
* of input elements.
*
* @param other
* The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/public <R> CrossOperator.DefaultCross<T, R> cross(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(this, other, CrossHint.OPTIMIZER_CHOOSES, Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_writeAsFormattedText_rdh
|
/**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link TextFormatter#format(Object)} is
* written.
*
* @param filePath
* The path pointing to the location the text file is written to.
* @param writeMode
* Control the behavior for existing files. Options are NO_OVERWRITE and
* OVERWRITE.
* @param formatter
* formatter that is applied on every element of the DataSet.
* @return The DataSink that writes the DataSet.
* @see TextOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<String>
writeAsFormattedText(String filePath, WriteMode writeMode, TextFormatter<T> formatter) {
return map(new FormattingMapper<>(clean(formatter))).writeAsText(filePath, writeMode);
}
| 3.26 |
flink_DataSet_maxBy_rdh
|
/**
* Selects an element with maximum value.
*
* <p>The maximum is computed over the specified fields in lexicographical order.
*
* <p><strong>Example 1</strong>: Given a data set with elements <code>[0, 1], [1, 0]</code>,
* the results will be:
*
* <ul>
* <li><code>maxBy(0)</code>: <code>[1, 0]</code>
* <li><code>maxBy(1)</code>: <code>[0, 1]</code>
* </ul>
*
* <p><strong>Example 2</strong>: Given a data set with elements <code>[0, 0], [0, 1]</code>,
* the results will be:
*
* <ul>
* <li><code>maxBy(0, 1)</code>: <code>[0, 1]</code>
* </ul>
*
* <p>If multiple values with maximum value at the specified fields exist, a random one will be
* picked.
*
* <p>Internally, this operation is implemented as a {@link ReduceFunction}.
*
* @param fields
* Field positions to compute the maximum over
* @return A {@link ReduceOperator} representing the maximum
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public ReduceOperator<T> maxBy(int... fields) {
if ((!getType().isTupleType()) || (!(getType() instanceof TupleTypeInfo))) {
throw new InvalidProgramException("DataSet#maxBy(int...) only works on Tuple types.");
}
return new ReduceOperator<>(this, new SelectByMaxFunction(((TupleTypeInfo) (getType())), fields), Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_filter_rdh
|
/**
* Applies a Filter transformation on a {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichFilterFunction} for each element of the DataSet and
* retains only those element for which the function returns true. Elements for which the
* function returns false are filtered.
*
* @param filter
* The FilterFunction that is called for each element of the DataSet.
* @return A FilterOperator that represents the filtered DataSet.
* @see org.apache.flink.api.common.functions.RichFilterFunction
* @see FilterOperator
* @see DataSet
*/
public FilterOperator<T> filter(FilterFunction<T> filter) {
if (filter == null)
{
throw new NullPointerException("Filter function must not be null.");
}
return new FilterOperator<>(this, clean(filter), Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_getType_rdh
|
/**
* Returns the {@link TypeInformation} for the type of this DataSet.
*
* @return The TypeInformation for the type of this DataSet.
* @see TypeInformation
*/
public TypeInformation<T> getType() {
if (type instanceof MissingTypeInfo) {
MissingTypeInfo typeInfo = ((MissingTypeInfo) (type));
throw new InvalidTypesException((((("The return type of function '" + typeInfo.getFunctionName()) + "' could not be determined automatically, due to type erasure. ") + "You can give type information hints by using the returns(...) method on the result of ") + "the transformation call, or by letting your function implement the 'ResultTypeQueryable' ") + "interface.", typeInfo.getTypeException());
}
typeUsed = true;
return this.type;
}
| 3.26 |
flink_DataSet_runOperation_rdh
|
// --------------------------------------------------------------------------------------------
// Custom Operators
// -------------------------------------------------------------------------------------------
/**
* Runs a {@link CustomUnaryOperation} on the data set. Custom operations are typically complex
* operators that are composed of multiple steps.
*
* @param operation
* The operation to run.
* @return The data set produced by the operation.
*/
public <X> DataSet<X> runOperation(CustomUnaryOperation<T, X> operation) {
Preconditions.checkNotNull(operation, "The custom operator must not be null.");
operation.setInput(this);
return operation.createResult();
}
| 3.26 |
flink_DataSet_collect_rdh
|
/**
* Convenience method to get the elements of a DataSet as a List. As DataSet can contain a lot
* of data, this method should be used with caution.
*
* @return A List containing the elements of the DataSet
*/
public List<T> collect() throws Exception {
final String v9 = new AbstractID().toString();
final TypeSerializer<T> serializer = getType().createSerializer(getExecutionEnvironment().getConfig());
this.output(new Utils.CollectHelper<>(v9, serializer)).name("collect()");
JobExecutionResult res = getExecutionEnvironment().execute();
ArrayList<byte[]> accResult = res.getAccumulatorResult(v9);
if (accResult != null) {
try {
return SerializedListAccumulator.deserializeList(accResult, serializer);
} catch (ClassNotFoundException e) {
throw new RuntimeException("Cannot find type class of collected data type.",
e);
} catch (IOException e) {
throw new RuntimeException("Serialization error while deserializing collected data", e);
}
} else {
throw new RuntimeException("The call to collect() could not retrieve the DataSet.");
}
}
| 3.26 |
flink_DataSet_checkSameExecutionContext_rdh
|
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
protected static void checkSameExecutionContext(DataSet<?> set1, DataSet<?> set2) {
if (set1.getExecutionEnvironment() != set2.getExecutionEnvironment()) {
throw new IllegalArgumentException("The two inputs have different execution contexts.");
}
}
| 3.26 |
flink_DataSet_first_rdh
|
/**
* Returns a new set containing the first n elements in this {@link DataSet}.
*
* @param n
* The desired number of elements.
* @return A ReduceGroupOperator that represents the DataSet containing the elements.
*/
public GroupReduceOperator<T, T> first(int n) {
if (n < 1) {
throw new InvalidProgramException("Parameter n of first(n) must be at least 1.");
}
return
reduceGroup(new FirstReducer<T>(n));
}
| 3.26 |
flink_DataSet_joinWithHuge_rdh
|
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two {@link DataSet DataSets} on key equality
* and provides multiple ways to combine joining elements into one DataSet.
*
* <p>This method also gives the hint to the optimizer that the second DataSet to join is much
* larger than the first one.
*
* <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other
* The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> joinWithHuge(DataSet<R> other) {
return new JoinOperatorSets<>(this, other, JoinHint.BROADCAST_HASH_FIRST);
}
| 3.26 |
flink_DataSet_mapPartition_rdh
|
/**
* Applies a Map-style operation to the entire partition of the data. The function is called
* once per parallel partition of the data, and the entire partition is available through the
* given Iterator. The number of elements that each instance of the MapPartition function sees
* is non deterministic and depends on the parallelism of the operation.
*
* <p>This function is intended for operations that cannot transform individual elements,
* requires no grouping of elements. To transform individual elements, the use of {@code map()}
* and {@code flatMap()} is preferable.
*
* @param mapPartition
* The MapPartitionFunction that is called for the full DataSet.
* @return A MapPartitionOperator that represents the transformed DataSet.
* @see MapPartitionFunction
* @see MapPartitionOperator
*/
public <R> MapPartitionOperator<T, R> mapPartition(MapPartitionFunction<T, R> mapPartition) {
if (mapPartition == null) {
throw new NullPointerException("MapPartition function must not be null.");
}String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getMapPartitionReturnTypes(mapPartition, getType(), callLocation, true);
return new MapPartitionOperator<>(this, resultType, clean(mapPartition), callLocation);
}
| 3.26 |
flink_DataSet_print_rdh
|
/**
* Writes a DataSet to the standard output stream (stdout).
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param sinkIdentifier
* The string to prefix the output with.
* @return The DataSink that writes the DataSet.
* @deprecated Use {@link #printOnTaskManager(String)} instead.
*/
@Deprecated
@PublicEvolving
public DataSink<T> print(String sinkIdentifier) {return output(new PrintingOutputFormat<T>(sinkIdentifier, false));}
| 3.26 |
flink_DataSet_output_rdh
|
/**
* Emits a DataSet using an {@link OutputFormat}. This method adds a data sink to the program.
* Programs may have multiple data sinks. A DataSet may also have multiple consumers (data sinks
* or transformations) at the same time.
*
* @param outputFormat
* The OutputFormat to process the DataSet.
* @return The DataSink that processes the DataSet.
* @see OutputFormat
* @see DataSink
*/public DataSink<T> output(OutputFormat<T> outputFormat) {Preconditions.checkNotNull(outputFormat);
// configure the type if needed
if (outputFormat instanceof InputTypeConfigurable) {
((InputTypeConfigurable) (outputFormat)).setInputType(getType(), context.getConfig());
}
DataSink<T> sink = new DataSink<>(this, outputFormat, getType());
this.context.registerDataSink(sink);
return sink;
}
| 3.26 |
flink_DataSet_partitionByRange_rdh
|
/**
* Range-partitions a DataSet using the specified KeySelector.
*
* <p><b>Important:</b>This operation requires an extra pass over the DataSet to compute the
* range boundaries and shuffles the whole DataSet over the network. This can take significant
* amount of time.
*
* @param keyExtractor
* The KeyExtractor with which the DataSet is range-partitioned.
* @return The partitioned DataSet.
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionByRange(KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(this, PartitionMethod.RANGE, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), this.getType(), keyType), Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_iterateDelta_rdh
|
/**
* Initiates a delta iteration. A delta iteration is similar to a regular iteration (as started
* by {@link #iterate(int)}, but maintains state across the individual iteration steps. The
* Solution set, which represents the current state at the beginning of each iteration can be
* obtained via {@link org.apache.flink.api.java.operators.DeltaIteration#getSolutionSet()}. It
* can be be accessed by joining (or CoGrouping) with it. The DataSet that represents the
* workset of an iteration can be obtained via {@link org.apache.flink.api.java.operators.DeltaIteration#getWorkset()}. The solution set is updated
* by producing a delta for it, which is merged into the solution set at the end of each
* iteration step.
*
* <p>The delta iteration must be closed by calling {@link org.apache.flink.api.java.operators.DeltaIteration#closeWith(DataSet, DataSet)}. The two
* parameters are the delta for the solution set and the new workset (the data set that will be
* fed back). The return value of the {@code closeWith(DataSet, DataSet)} method is the
* resulting data set after the iteration has terminated. Delta iterations terminate when the
* feed back data set (the workset) is empty. In addition, a maximum number of steps is given as
* a fall back termination guard.
*
* <p>Elements in the solution set are uniquely identified by a key. When merging the solution
* set delta, contained elements with the same key are replaced.
*
* <p><b>NOTE:</b> Delta iterations currently support only tuple valued data types. This
* restriction will be removed in the future. The key is specified by the tuple position.
*
* <p>A code example for a delta iteration is as follows
*
* <pre>{@code DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration =
* initialState.iterateDelta(initialFeedbackSet, 100, 0);
*
* DataSet<Tuple2<Long, Long>> delta = iteration.groupBy(0).aggregate(Aggregations.AVG, 1)
* .join(iteration.getSolutionSet()).where(0).equalTo(0)
* .flatMap(new ProjectAndFilter());
*
* DataSet<Tuple2<Long, Long>> feedBack = delta.join(someOtherSet).where(...).equalTo(...).with(...);
*
* // close the delta iteration (delta and new workset are identical)
* DataSet<Tuple2<Long, Long>> result = iteration.closeWith(delta, feedBack);}</pre>
*
* @param workset
* The initial version of the data set that is fed back to the next iteration
* step (the workset).
* @param maxIterations
* The maximum number of iteration steps, as a fall back safeguard.
* @param keyPositions
* The position of the tuple fields that is used as the key of the solution
* set.
* @return The DeltaIteration that marks the start of a delta iteration.
* @see org.apache.flink.api.java.operators.DeltaIteration
*/
public <R> DeltaIteration<T, R> iterateDelta(DataSet<R> workset, int maxIterations, int... keyPositions) {
Preconditions.checkNotNull(workset);
Preconditions.checkNotNull(keyPositions);
Keys.ExpressionKeys<T> keys
= new Keys.ExpressionKeys<>(keyPositions, getType());
return new
DeltaIteration<>(getExecutionEnvironment(), getType(), this, workset, keys, maxIterations);
}
| 3.26 |
flink_DataSet_union_rdh
|
// --------------------------------------------------------------------------------------------
// Union
// --------------------------------------------------------------------------------------------
/**
* Creates a union of this DataSet with an other DataSet. The other DataSet must be of the same
* data type.
*
* @param other
* The other DataSet which is unioned with the current DataSet.
* @return The resulting DataSet.
*/
public UnionOperator<T> union(DataSet<T> other) {
return new UnionOperator<>(this, other, Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_printOnTaskManager_rdh
|
/**
* Writes a DataSet to the standard output streams (stdout) of the TaskManagers that execute the
* program (or more specifically, the data sink operators). On a typical cluster setup, the data
* will appear in the TaskManagers' <i>.out</i> files.
*
* <p>To print the data to the console or stdout stream of the client process instead, use the
* {@link #print()} method.
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param prefix
* The string to prefix each line of the output with. This helps identifying
* outputs from different printing sinks.
* @return The DataSink operator that writes the DataSet.
* @see #print()
*/
public DataSink<T> printOnTaskManager(String prefix) {
return output(new PrintingOutputFormat<T>(prefix, false));
}
| 3.26 |
flink_DataSet_sum_rdh
|
/**
* Syntactic sugar for aggregate (SUM, field).
*
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the summed DataSet.
* @see org.apache.flink.api.java.operators.AggregateOperator
*/
public AggregateOperator<T> sum(int field) {
return aggregate(Aggregations.SUM, field);}
| 3.26 |
flink_DataSet_minBy_rdh
|
/**
* Selects an element with minimum value.
*
* <p>The minimum is computed over the specified fields in lexicographical order.
*
* <p><strong>Example 1</strong>: Given a data set with elements <code>[0, 1], [1, 0]</code>,
* the results will be:
*
* <ul>
* <li><code>minBy(0)</code>: <code>[0, 1]</code>
* <li><code>minBy(1)</code>: <code>[1, 0]</code>
* </ul>
*
* <p><strong>Example 2</strong>: Given a data set with elements <code>[0, 0], [0, 1]</code>,
* the results will be:
*
* <ul>
* <li><code>minBy(0, 1)</code>: <code>[0, 0]</code>
* </ul>
*
* <p>If multiple values with minimum value at the specified fields exist, a random one will be
* picked.
*
* <p>Internally, this operation is implemented as a {@link ReduceFunction}.
*
* @param fields
* Field positions to compute the minimum over
* @return A {@link ReduceOperator} representing the minimum
*/@SuppressWarnings({ "unchecked", "rawtypes" })
public ReduceOperator<T> minBy(int... fields) {
if ((!getType().isTupleType()) || (!(getType() instanceof TupleTypeInfo))) {
throw new InvalidProgramException("DataSet#minBy(int...) only works on Tuple types.");}
return new ReduceOperator<>(this, new SelectByMinFunction(((TupleTypeInfo) (getType())), fields), Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_writeAsCsv_rdh
|
/**
* Writes a {@link Tuple} DataSet as CSV file(s) to the specified location with the specified
* field and line delimiters.
*
* <p><b>Note: Only a Tuple DataSet can written as a CSV file.</b> For each Tuple field the
* result of {@link Object#toString()} is written.
*
* @param filePath
* The path pointing to the location the CSV file is written to.
* @param rowDelimiter
* The row delimiter to separate Tuples.
* @param fieldDelimiter
* The field delimiter to separate Tuple fields.
* @param writeMode
* The behavior regarding existing files. Options are NO_OVERWRITE and
* OVERWRITE.
* @see Tuple
* @see CsvOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsCsv(String filePath,
String rowDelimiter, String fieldDelimiter, WriteMode writeMode) {
return internalWriteAsCsv(new Path(filePath), rowDelimiter, fieldDelimiter, writeMode);
}
| 3.26 |
flink_DataSet_sortPartition_rdh
|
/**
* Locally sorts the partitions of the DataSet on the extracted key in the specified order. The
* DataSet can be sorted on multiple values by returning a tuple from the KeySelector.
*
* <p>Note that no additional sort keys can be appended to a KeySelector sort keys. To sort the
* partitions by multiple values using KeySelector, the KeySelector must return a tuple
* consisting of the values.
*
* @param keyExtractor
* The KeySelector function which extracts the key values from the DataSet
* on which the DataSet is sorted.
* @param order
* The order in which the DataSet is sorted.
* @return The DataSet with sorted local partitions.
*/
public <K> SortPartitionOperator<T> sortPartition(KeySelector<T, K> keyExtractor, Order order) {
final
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new SortPartitionOperator<>(this, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), getType(), keyType), order, Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_fullOuterJoin_rdh
|
/**
* Initiates a Full Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two {@link DataSet DataSets} on key
* equality and provides multiple ways to combine joining elements into one DataSet.
*
* <p>Elements of <b>both</b> DataSets that do not have a matching element on the opposing side
* are joined with {@code null} and emitted to the resulting DataSet.
*
* @param other
* The other DataSet with which this DataSet is joined.
* @param strategy
* The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> fullOuterJoin(DataSet<R> other, JoinHint strategy) {
switch (strategy) {
case OPTIMIZER_CHOOSES :
case REPARTITION_SORT_MERGE :
case REPARTITION_HASH_FIRST :
case REPARTITION_HASH_SECOND :
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.FULL_OUTER);
default :
throw new InvalidProgramException("Invalid JoinHint for FullOuterJoin: " + strategy);}
}
| 3.26 |
flink_DataSet_distinct_rdh
|
/**
* Returns a distinct set of a {@link DataSet}.
*
* <p>If the input is a {@link org.apache.flink.api.common.typeutils.CompositeType} (Tuple or
* Pojo type), distinct is performed on all fields and each field must be a key type
*
* @return A DistinctOperator that represents the distinct DataSet.
*/
public DistinctOperator<T> distinct() {
return new DistinctOperator<>(this, null, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Grouping
// --------------------------------------------------------------------------------------------
/**
* Groups a {@link DataSet} using a {@link KeySelector} function. The KeySelector function is
* called for each element of the DataSet and extracts a single key value on which the DataSet
* is grouped.
*
* <p>This method returns an {@link UnsortedGrouping} on which one of the following grouping
* transformation can be applied.
*
* <ul>
* <li>{@link UnsortedGrouping#sortGroup(int, org.apache.flink.api.common.operators.Order)} to
* get a {@link SortedGrouping}.
* <li>{@link UnsortedGrouping#aggregate(Aggregations, int)} to apply an Aggregate
* transformation.
* <li>{@link UnsortedGrouping#reduce(org.apache.flink.api.common.functions.ReduceFunction)}
* to apply a Reduce transformation.
* <li>{@link UnsortedGrouping#reduceGroup(org.apache.flink.api.common.functions.GroupReduceFunction)}
* to apply a GroupReduce transformation.
* </ul>
*
* @param keyExtractor
* The {@link KeySelector} function which extracts the key values from the
* DataSet on which it is grouped.
* @return An {@link UnsortedGrouping}
| 3.26 |
flink_DataSet_partitionByHash_rdh
|
/**
* Partitions a DataSet using the specified KeySelector.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take
* significant amount of time.
*
* @param keyExtractor
* The KeyExtractor with which the DataSet is hash-partitioned.
* @return The partitioned DataSet.
* @see KeySelector
*/
public <K
extends Comparable<K>> PartitionOperator<T> partitionByHash(KeySelector<T, K> keyExtractor) {final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(this, PartitionMethod.HASH, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), this.getType(), keyType), Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_flatMap_rdh
|
/**
* Applies a FlatMap transformation on a {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichFlatMapFunction} for each element of the DataSet.
* Each FlatMapFunction call can return any number of elements including none.
*
* @param flatMapper
* The FlatMapFunction that is called for each element of the DataSet.
* @return A FlatMapOperator that represents the transformed DataSet.
* @see org.apache.flink.api.common.functions.RichFlatMapFunction
* @see FlatMapOperator
* @see DataSet
*/
public <R> FlatMapOperator<T, R> flatMap(FlatMapFunction<T, R> flatMapper) {if (flatMapper == null) {
throw new NullPointerException("FlatMap function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getFlatMapReturnTypes(flatMapper, getType(), callLocation, true);
return new FlatMapOperator<>(this, resultType,
clean(flatMapper), callLocation);
}
| 3.26 |
flink_DataSet_rebalance_rdh
|
/**
* Enforces a re-balancing of the DataSet, i.e., the DataSet is evenly distributed over all
* parallel instances of the following task. This can help to improve performance in case of
* heavy data skew and compute intensive operations.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take
* significant amount of time.
*
* @return The re-balanced DataSet.
*/
public PartitionOperator<T> rebalance() {
return new PartitionOperator<>(this, PartitionMethod.REBALANCE, Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_write_rdh
|
/**
* Writes a DataSet using a {@link FileOutputFormat} to a specified location. This method adds a
* data sink to the program.
*
* @param outputFormat
* The FileOutputFormat to write the DataSet.
* @param filePath
* The path to the location where the DataSet is written.
* @param writeMode
* The mode of writing, indicating whether to overwrite existing files.
* @return The DataSink that writes the DataSet.
* @see FileOutputFormat
*/
public DataSink<T> write(FileOutputFormat<T> outputFormat, String filePath, WriteMode writeMode) {
Preconditions.checkNotNull(filePath, "File path must not be null.");
Preconditions.checkNotNull(writeMode, "Write mode must not be null.");
Preconditions.checkNotNull(outputFormat, "Output format must not be null.");
outputFormat.setOutputFilePath(new Path(filePath));
outputFormat.setWriteMode(writeMode);
return output(outputFormat);
}
| 3.26 |
flink_DataSet_reduceGroup_rdh
|
/**
* Applies a GroupReduce transformation on a non-grouped {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichGroupReduceFunction} once with the full DataSet.
* The GroupReduceFunction can iterate over all elements of the DataSet and emit any number of
* output elements including none.
*
* @param reducer
* The GroupReduceFunction that is applied on the DataSet.
* @return A GroupReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichGroupReduceFunction
* @see org.apache.flink.api.java.operators.GroupReduceOperator
* @see DataSet
*/
public <R> GroupReduceOperator<T, R> reduceGroup(GroupReduceFunction<T, R> reducer) {
if (reducer == null) {
throw new NullPointerException("GroupReduce function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getGroupReduceReturnTypes(reducer, getType(), callLocation, true);
return new GroupReduceOperator<>(this, resultType, clean(reducer), callLocation);
}
| 3.26 |
flink_DataSet_rightOuterJoin_rdh
|
/**
* Initiates a Right Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two {@link DataSet DataSets} on key
* equality and provides multiple ways to combine joining elements into one DataSet.
*
* <p>Elements of the <b>right</b> DataSet (i.e. {@code other}) that do not have a matching
* element on {@code this} side are joined with {@code null} and emitted to the resulting
* DataSet.
*
* @param other
* The other DataSet with which this DataSet is joined.
* @param strategy
* The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> rightOuterJoin(DataSet<R> other, JoinHint strategy) {
switch (strategy) {
case OPTIMIZER_CHOOSES :
case REPARTITION_SORT_MERGE :
case REPARTITION_HASH_FIRST :case REPARTITION_HASH_SECOND :
case BROADCAST_HASH_FIRST :
return
new JoinOperatorSetsBase<>(this, other, strategy, JoinType.RIGHT_OUTER);
default :
throw new InvalidProgramException("Invalid JoinHint for RightOuterJoin: " + strategy);
}
}
| 3.26 |
flink_DataSet_reduce_rdh
|
/**
* Applies a Reduce transformation on a non-grouped {@link DataSet}.
*
* <p>The transformation consecutively calls a {@link org.apache.flink.api.common.functions.RichReduceFunction} until only a single element remains
* which is the result of the transformation. A ReduceFunction combines two elements into one
* new element of the same type.
*
* @param reducer
* The ReduceFunction that is applied on the DataSet.
* @return A ReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichReduceFunction
* @see ReduceOperator
* @see DataSet
*/
public ReduceOperator<T> reduce(ReduceFunction<T> reducer) {
if (reducer == null) {
throw new NullPointerException("Reduce function must not be null.");
}
return new ReduceOperator<>(this, clean(reducer),
Utils.getCallLocationName());
}
| 3.26 |
flink_DataSet_map_rdh
|
// --------------------------------------------------------------------------------------------
// Filter & Transformations
// --------------------------------------------------------------------------------------------
/**
* Applies a Map transformation on this DataSet.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.MapFunction} for
* each element of the DataSet. Each MapFunction call returns exactly one element.
*
* @param mapper
* The MapFunction that is called for each element of the DataSet.
* @return A MapOperator that represents the transformed DataSet.
* @see org.apache.flink.api.common.functions.MapFunction
* @see org.apache.flink.api.common.functions.RichMapFunction
* @see MapOperator
*/
public <R> MapOperator<T, R> map(MapFunction<T, R> mapper) {
if (mapper == null) {
throw new NullPointerException("Map function must not be null.");
}
String v1 = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getMapReturnTypes(mapper, getType(), v1, true);
return new MapOperator<>(this, resultType, clean(mapper), v1);
}
| 3.26 |
flink_DataSet_writeAsText_rdh
|
/**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param filePath
* The path pointing to the location the text file is written to.
* @param writeMode
* Control the behavior for existing files. Options are NO_OVERWRITE and
* OVERWRITE.
* @return The DataSink that writes the DataSet.
* @see TextOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsText(String filePath, WriteMode writeMode)
{
TextOutputFormat<T> tof = new TextOutputFormat<>(new Path(filePath));tof.setWriteMode(writeMode);
return output(tof);
}
| 3.26 |
flink_DataSet_printToErr_rdh
|
/**
* Writes a DataSet to the standard error stream (stderr).
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param sinkIdentifier
* The string to prefix the output with.
* @return The DataSink that writes the DataSet.
* @deprecated Use {@link #printOnTaskManager(String)} instead, or the {@link PrintingOutputFormat}.
*/
@Deprecated
@PublicEvolving
public DataSink<T> printToErr(String sinkIdentifier) {
return output(new PrintingOutputFormat<T>(sinkIdentifier, true));
}
| 3.26 |
flink_DataSet_min_rdh
|
/**
* Syntactic sugar for {@link #aggregate(Aggregations, int)} using {@link Aggregations#MIN} as
* the aggregation function.
*
* <p><strong>Note:</strong> This operation is not to be confused with {@link #minBy(int...)},
* which selects one element with the minimum value at the specified field positions.
*
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the min'ed DataSet.
* @see #aggregate(Aggregations, int)
* @see #minBy(int...)
*/
public AggregateOperator<T> min(int field) {
return aggregate(Aggregations.MIN, field);
}
| 3.26 |
flink_DataSet_combineGroup_rdh
|
/**
* Applies a GroupCombineFunction on a non-grouped {@link DataSet}. A CombineFunction is similar
* to a GroupReduceFunction but does not perform a full data exchange. Instead, the
* CombineFunction calls the combine method once per partition for combining a group of results.
* This operator is suitable for combining values into an intermediate format before doing a
* proper groupReduce where the data is shuffled across the node for further reduction. The
* GroupReduce operator can also be supplied with a combiner by implementing the RichGroupReduce
* function. The combine method of the RichGroupReduce function demands input and output type to
* be the same. The CombineFunction, on the other side, can have an arbitrary output type.
*
* @param combiner
* The GroupCombineFunction that is applied on the DataSet.
* @return A GroupCombineOperator which represents the combined DataSet.
*/
public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) {
if (combiner == null) {
throw
new NullPointerException("GroupCombine function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getGroupCombineReturnTypes(combiner, getType(), callLocation, true);
return new GroupCombineOperator<>(this, resultType, clean(combiner), callLocation);
}
| 3.26 |
flink_DataSet_project_rdh
|
// --------------------------------------------------------------------------------------------
// Projections
// --------------------------------------------------------------------------------------------
/**
* Applies a Project transformation on a {@link Tuple} {@link DataSet}.
*
* <p><b>Note: Only Tuple DataSets can be projected using field indexes.</b>
*
* <p>The transformation projects each Tuple of the DataSet onto a (sub)set of fields.
*
* <p>Additional fields can be added to the projection by calling {@link ProjectOperator#project(int[])}.
*
* <p><b>Note: With the current implementation, the Project transformation loses type
* information.</b>
*
* @param fieldIndexes
* The field indexes of the input tuple that are retained. The order of
* fields in the output tuple corresponds to the order of field indexes.
* @return A ProjectOperator that represents the projected DataSet.
* @see Tuple
* @see DataSet
* @see ProjectOperator
*/
public <OUT extends Tuple> ProjectOperator<?, OUT> project(int... fieldIndexes) {
return new Projection<>(this, fieldIndexes).projectTupleX();
}
| 3.26 |
flink_DataSet_crossWithHuge_rdh
|
/**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two {@link DataSet DataSets} into one
* DataSet. It builds all pair combinations of elements of both DataSets, i.e., it builds a
* Cartesian product. This method also gives the hint to the optimizer that the second DataSet
* to cross is much larger than the first one.
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps
* each pair of crossed elements into a {@link Tuple2}, with the element of the first input
* being the first field of the tuple and the element of the second input being the second field
* of the tuple.
*
* <p>Call {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)}
* to define a {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair
* of input elements.
*
* @param other
* The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/
public <R> CrossOperator.DefaultCross<T, R> crossWithHuge(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(this, other, CrossHint.FIRST_IS_SMALL, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Iterations
// --------------------------------------------------------------------------------------------
/**
* Initiates an iterative part of the program that executes multiple times and feeds back data
* sets. The iterative part needs to be closed by calling {@link org.apache.flink.api.java.operators.IterativeDataSet#closeWith(DataSet)}. The data set given
* to the {@code closeWith(DataSet)} method is the data set that will be fed back and used as
* the input to the next iteration. The return value of the {@code closeWith(DataSet)} method is
* the resulting data set after the iteration has terminated.
*
* <p>An example of an iterative computation is as follows:
*
* <pre>{@code DataSet<Double> input = ...;
*
* DataSet<Double> startOfIteration = input.iterate(10);
* DataSet<Double> toBeFedBack = startOfIteration
* .map(new MyMapper())
* .groupBy(...).reduceGroup(new MyReducer());
* DataSet<Double> result = startOfIteration.closeWith(toBeFedBack);}</pre>
*
* <p>The iteration has a maximum number of times that it executes. A dynamic termination can be
* realized by using a termination criterion (see {@link org.apache.flink.api.java.operators.IterativeDataSet#closeWith(DataSet, DataSet)}).
*
* @param maxIterations
* The maximum number of times that the iteration is executed.
* @return An IterativeDataSet that marks the start of the iterative part and needs to be closed
by {@link org.apache.flink.api.java.operators.IterativeDataSet#closeWith(DataSet)}
| 3.26 |
flink_KryoSerializerSnapshotData_writeSnapshotData_rdh
|
// --------------------------------------------------------------------------------------------
// Write
// --------------------------------------------------------------------------------------------
void writeSnapshotData(DataOutputView out) throws IOException {
writeTypeClass(out);
writeKryoRegistrations(out, kryoRegistrations);
writeDefaultKryoSerializers(out, f0);
writeDefaultKryoSerializerClasses(out, defaultKryoSerializerClasses);
}
| 3.26 |
flink_KryoSerializerSnapshotData_readTypeClass_rdh
|
// --------------------------------------------------------------------------------------------
// Read
// --------------------------------------------------------------------------------------------
private static <T> Class<T> readTypeClass(DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
return InstantiationUtil.resolveClassByName(in, userCodeClassLoader);
}
| 3.26 |
flink_KryoSerializerSnapshotData_createFrom_rdh
|
// --------------------------------------------------------------------------------------------
// Factories
// --------------------------------------------------------------------------------------------
static <T> KryoSerializerSnapshotData<T> createFrom(Class<T> typeClass, LinkedHashMap<Class<?>, SerializableSerializer<?>> defaultKryoSerializers, LinkedHashMap<Class<?>, Class<? extends Serializer<?>>> defaultKryoSerializerClasses, LinkedHashMap<String, KryoRegistration> kryoRegistrations) {
return new KryoSerializerSnapshotData<>(typeClass,
optionalMapOf(defaultKryoSerializers, Class::getName), optionalMapOf(defaultKryoSerializerClasses, Class::getName), optionalMapOf(kryoRegistrations, Function.identity()));
}
| 3.26 |
flink_KryoSerializerSnapshotData_getTypeClass_rdh
|
// --------------------------------------------------------------------------------------------
// Getters
// --------------------------------------------------------------------------------------------
Class<T> getTypeClass() {
return typeClass;
}
| 3.26 |
flink_Printer_close_rdh
|
/**
* Close the resource of the {@link Printer}.
*/ @Override
default void close() {
}
| 3.26 |
flink_Printer_createClearCommandPrinter_rdh
|
// --------------------------------------------------------------------------------------------
static ClearCommandPrinter createClearCommandPrinter() {
return ClearCommandPrinter.INSTANCE;
}
| 3.26 |
flink_AvroSerializationSchema_forSpecific_rdh
|
/**
* Creates {@link AvroSerializationSchema} that serializes {@link SpecificRecord} using provided
* schema.
*
* @param tClass
* the type to be serialized
* @return serialized record in form of byte array
*/
public static <T extends SpecificRecord> AvroSerializationSchema<T> forSpecific(Class<T> tClass, AvroEncoding encoding) {
return new AvroSerializationSchema<>(tClass, null, encoding);
}
| 3.26 |
flink_AvroSerializationSchema_forGeneric_rdh
|
/**
* Creates {@link AvroSerializationSchema} that serializes {@link GenericRecord} using provided
* schema.
*
* @param schema
* the schema that will be used for serialization
* @return serialized record in form of byte array
*/
public static AvroSerializationSchema<GenericRecord> forGeneric(Schema schema, AvroEncoding encoding) {
return new AvroSerializationSchema<>(GenericRecord.class, schema, encoding);
}
| 3.26 |
flink_TestcontainersSettings_logger_rdh
|
/**
* Sets the {@code baseImage} and returns a reference to this Builder enabling method
* chaining.
*
* @param logger
* The {@code logger} to set.
* @return A reference to this Builder.
*/
public Builder logger(Logger logger) {
this.logger = logger;
return this;
}
| 3.26 |
flink_TestcontainersSettings_getNetwork_rdh
|
/**
*
* @return The network.
*/
public Network getNetwork() {
return network;
}
| 3.26 |
flink_TestcontainersSettings_getEnvVars_rdh
|
/**
*
* @return The environment variables.
*/
public Map<String, String> getEnvVars() {
return envVars;
}
| 3.26 |
flink_TestcontainersSettings_build_rdh
|
/**
* Returns a {@code TestContainersSettings} built from the parameters previously set.
*
* @return A {@code TestContainersSettings} built with parameters of this {@code TestContainersSettings.Builder}
*/
public TestcontainersSettings build() {
return new TestcontainersSettings(this);
}
| 3.26 |
flink_TestcontainersSettings_network_rdh
|
/**
* Sets the {@code network} and returns a reference to this Builder enabling method
* chaining.
*
* @param network
* The {@code network} to set.
* @return A reference to this Builder.
*/
public Builder network(Network network) {
this.network = network;
return this;
}
| 3.26 |
flink_TestcontainersSettings_getDependencies_rdh
|
/**
*
* @return The dependencies (other containers).
*/
public Collection<GenericContainer<?>> getDependencies() {
return dependencies;}
| 3.26 |
flink_TestcontainersSettings_builder_rdh
|
/**
* A new builder for {@code TestcontainersSettings}.
*
* @return The builder.
*/
public static Builder
builder() {
return new Builder();}
| 3.26 |
flink_TestcontainersSettings_getLogger_rdh
|
/**
*
* @return The logger.
*/
public Logger getLogger() {
return logger;
}
| 3.26 |
flink_TestcontainersSettings_environmentVariable_rdh
|
/**
* Sets an environment variable and returns a reference to this Builder enabling method
* chaining.
*
* @param name
* The name of the environment variable.
* @param value
* The value of the environment variable.
* @return A reference to this Builder.
*/
public Builder environmentVariable(String name, String value) {
this.envVars.put(name, value);
return this;
}
| 3.26 |
flink_TestcontainersSettings_baseImage_rdh
|
/**
* Sets the {@code baseImage} and returns a reference to this Builder enabling method
* chaining.
*
* @param baseImage
* The {@code baseImage} to set.
* @return A reference to this Builder.
*/
public Builder baseImage(String baseImage) {
this.baseImage = baseImage;
return this;
}
| 3.26 |
flink_TestcontainersSettings_m0_rdh
|
/**
*
* @return The base image.
*/
public String m0() {
return baseImage; }
| 3.26 |
flink_RowtimeAttributeDescriptor_getAttributeName_rdh
|
/**
* Returns the name of the rowtime attribute.
*/
public String getAttributeName() {
return attributeName;
}
| 3.26 |
flink_RowtimeAttributeDescriptor_getTimestampExtractor_rdh
|
/**
* Returns the [[TimestampExtractor]] for the attribute.
*/
public TimestampExtractor getTimestampExtractor() {return timestampExtractor;
}
| 3.26 |
flink_DelimitedInputFormat_fillBuffer_rdh
|
/**
* Fills the read buffer with bytes read from the file starting from an offset.
*/
private boolean fillBuffer(int offset) throws IOException {
int maxReadLength = this.readBuffer.length - offset;
// special case for reading the whole split.
if (this.splitLength == FileInputFormat.READ_WHOLE_SPLIT_FLAG) {
int read = this.stream.read(this.readBuffer, offset, maxReadLength);
if (read == (-1)) {
this.stream.close();
this.stream = null;
return false;
} else {
this.readPos = offset;
this.limit = read + offset;
return true;
}
}
// else ..
int toRead;
if (this.splitLength > 0) {
// if we have more data, read that
toRead = (this.splitLength > maxReadLength) ? maxReadLength : ((int) (this.splitLength));
} else {
// if we have exhausted our split, we need to complete the current record, or read one
// more across the next split.
// the reason is that the next split will skip over the beginning until it finds the
// first
// delimiter, discarding it as an incomplete chunk of data that belongs to the last
// record in the
// previous split.
toRead = maxReadLength;
this.overLimit = true;
}
int read = this.stream.read(this.readBuffer, offset, toRead);
if (read == (-1)) {
this.stream.close();
this.stream = null;
return false;
}
else {
this.splitLength -= read;
this.readPos = offset;// position from where to start reading
this.limit = read + offset;// number of valid bytes in the read buffer
return true;
}}
| 3.26 |
flink_DelimitedInputFormat_loadGlobalConfigParams_rdh
|
/**
*
* @deprecated Please use {@code loadConfigParameters(Configuration config}
*/@Deprecated
protected static void loadGlobalConfigParams()
{
loadConfigParameters(GlobalConfiguration.loadConfiguration());
}
| 3.26 |
flink_DelimitedInputFormat_configure_rdh
|
// --------------------------------------------------------------------------------------------
// Pre-flight: Configuration, Splits, Sampling
// --------------------------------------------------------------------------------------------
/**
* Configures this input format by reading the path to the file from the configuration and the
* string that defines the record delimiter.
*
* @param parameters
* The configuration object to read the parameters from.
*/
@Override
public void configure(Configuration parameters) {
super.configure(parameters);
// the if() clauses are to prevent the configure() method from
// overwriting the values set by the setters
if (Arrays.equals(delimiter, new byte[]{ '\n' })) {
String delimString = parameters.getString(RECORD_DELIMITER, null);
if (delimString != null) {
setDelimiter(delimString);
}
}
// set the number of samples
if (numLineSamples == NUM_SAMPLES_UNDEFINED) {
String samplesString = parameters.getString(NUM_STATISTICS_SAMPLES, null);
if (samplesString != null) {
try {
setNumLineSamples(Integer.parseInt(samplesString));
} catch (NumberFormatException e) {
if (LOG.isWarnEnabled()) {
LOG.warn(("Invalid value for number of samples to take: " + samplesString) + ". Skipping sampling.");}
setNumLineSamples(0);
}
}
}
}
| 3.26 |
flink_DelimitedInputFormat_setCharset_rdh
|
/**
* Set the name of the character set used for the row delimiter. This is also used by subclasses
* to interpret field delimiters, comment strings, and for configuring {@link FieldParser}s.
*
* <p>These fields are interpreted when set. Changing the charset thereafter may cause
* unexpected results.
*
* @param charset
* name of the charset
*/
@PublicEvolving
public void setCharset(String charset) {
this.charsetName = Preconditions.checkNotNull(charset);
this.charset = null;if (this.delimiterString != null) {
this.delimiter = delimiterString.getBytes(getCharset());
}
}
| 3.26 |
flink_DelimitedInputFormat_close_rdh
|
/**
* Closes the input by releasing all buffers and closing the file input stream.
*
* @throws IOException
* Thrown, if the closing of the file stream causes an I/O error.
*/
@Override
public void close() throws IOException {
this.wrapBuffer = null;
this.readBuffer = null;
super.close();
}
| 3.26 |
flink_DelimitedInputFormat_getCurrentState_rdh
|
// --------------------------------------------------------------------------------------------
// Checkpointing
// --------------------------------------------------------------------------------------------
@PublicEvolving
@Override
public Long getCurrentState() throws IOException {
return this.offset;
}
| 3.26 |
flink_DelimitedInputFormat_readLine_rdh
|
// --------------------------------------------------------------------------------------------
protected final boolean readLine() throws IOException {
if ((this.stream == null) || this.overLimit) {
return false;
}
int countInWrapBuffer = 0;
// position of matching positions in the delimiter byte array
int delimPos = 0;
while (true) {
if (this.readPos >= this.limit) {
// readBuffer is completely consumed. Fill it again but keep partially read
// delimiter bytes.
if (!fillBuffer(delimPos)) {
int countInReadBuffer
= delimPos;
if ((countInWrapBuffer + countInReadBuffer) > 0) {
// we have bytes left to emit
if (countInReadBuffer > 0) {
// we have bytes left in the readBuffer. Move them into the wrapBuffer
if ((this.wrapBuffer.length - countInWrapBuffer) < countInReadBuffer) {
// reallocate
byte[]
tmp = new byte[countInWrapBuffer + countInReadBuffer];
System.arraycopy(this.wrapBuffer, 0, tmp, 0, countInWrapBuffer);
this.wrapBuffer = tmp;
}
// copy readBuffer bytes to wrapBuffer
System.arraycopy(this.readBuffer, 0, this.wrapBuffer, countInWrapBuffer, countInReadBuffer);countInWrapBuffer += countInReadBuffer;
}
this.offset += countInWrapBuffer;
setResult(this.wrapBuffer, 0, countInWrapBuffer);
return true;
} else {
return false;
}
}
}
int startPos = this.readPos - delimPos;
int count;
// Search for next occurrence of delimiter in read buffer.
while ((this.readPos < this.limit) && (delimPos < this.delimiter.length)) {
if (this.readBuffer[this.readPos] == this.delimiter[delimPos]) {
// Found the expected delimiter character. Continue looking for the next
// character of delimiter.
delimPos++;
} else {
// Delimiter does not match.
// We have to reset the read position to the character after the first matching
// character
// and search for the whole delimiter again.
readPos -= delimPos;
delimPos = 0;
}
readPos++;
}
// check why we dropped out
if (delimPos == this.delimiter.length) {
// we found a delimiter
int readBufferBytesRead = this.readPos - startPos;
this.offset += countInWrapBuffer + readBufferBytesRead;
count = readBufferBytesRead - this.delimiter.length;
// copy to byte array
if (countInWrapBuffer > 0) {
// check wrap buffer size
if (this.wrapBuffer.length < (countInWrapBuffer + count)) {
final byte[] nb = new byte[countInWrapBuffer + count];
System.arraycopy(this.wrapBuffer, 0, nb, 0, countInWrapBuffer);
this.wrapBuffer = nb;}
if (count >= 0) {
System.arraycopy(this.readBuffer, 0, this.wrapBuffer, countInWrapBuffer, count);
}
setResult(this.wrapBuffer, 0, countInWrapBuffer + count);
return true;
} else {
setResult(this.readBuffer, startPos, count);
return true;
}
} else {// we reached the end of the readBuffer
count = this.limit
- startPos;
// check against the maximum record length
if ((((long) (countInWrapBuffer)) + count) > this.lineLengthLimit) {
throw new IOException(("The record length exceeded the maximum record length (" + this.lineLengthLimit) + ").");
}
// Compute number of bytes to move to wrapBuffer
// Chars of partially read delimiter must remain in the readBuffer. We might need to
// go back.
int bytesToMove = count - delimPos;
// ensure wrapBuffer is large enough
if ((this.wrapBuffer.length - countInWrapBuffer) < bytesToMove) {
// reallocate
byte[] tmp =
new byte[Math.max(this.wrapBuffer.length * 2, countInWrapBuffer + bytesToMove)];
System.arraycopy(this.wrapBuffer, 0, tmp, 0, countInWrapBuffer);
this.wrapBuffer = tmp;
}
// copy readBuffer to wrapBuffer (except delimiter chars)
System.arraycopy(this.readBuffer, startPos, this.wrapBuffer, countInWrapBuffer, bytesToMove);
countInWrapBuffer += bytesToMove;
// move delimiter chars to the beginning of the readBuffer
System.arraycopy(this.readBuffer, this.readPos - delimPos, this.readBuffer,
0, delimPos);}
}
}
| 3.26 |
flink_DelimitedInputFormat_initializeSplit_rdh
|
/**
* Initialization method that is called after opening or reopening an input split.
*
* @param split
* Split that was opened or reopened
* @param state
* Checkpointed state if the split was reopened
* @throws IOException
*/
protected void initializeSplit(FileInputSplit split, @Nullable
Long state) throws IOException {
}
| 3.26 |
flink_DelimitedInputFormat_m0_rdh
|
/**
* Opens the given input split. This method opens the input stream to the specified file,
* allocates read buffers and positions the stream at the correct position, making sure that any
* partial record at the beginning is skipped.
*
* @param split
* The input split to open.
* @see org.apache.flink.api.common.io.FileInputFormat#open(org.apache.flink.core.fs.FileInputSplit)
*/@Override
public void m0(FileInputSplit split) throws IOException {
super.open(split);
initBuffers();
this.offset = splitStart;
if (this.splitStart != 0) {
this.stream.seek(offset);
readLine();
// if the first partial record already pushes the stream over
// the limit of our split, then no record starts within this split
if (this.overLimit) {
this.end = true;
}
} else {
fillBuffer(0);
}
initializeSplit(split, null);
}
| 3.26 |
flink_DelimitedInputFormat_getCharset_rdh
|
/**
* Get the character set used for the row delimiter. This is also used by subclasses to
* interpret field delimiters, comment strings, and for configuring {@link FieldParser}s.
*
* @return the charset
*/
@PublicEvolving
public Charset getCharset() {
if (this.charset == null) {
this.charset = Charset.forName(charsetName);
}
return this.charset;
}
| 3.26 |
flink_DelimitedInputFormat_reachedEnd_rdh
|
/**
* Checks whether the current split is at its end.
*
* @return True, if the split is at its end, false otherwise.
*/
@Override
public boolean reachedEnd() {
return this.end;}
| 3.26 |
flink_CepOperator_processEvent_rdh
|
/**
* Process the given event by giving it to the NFA and outputting the produced set of matched
* event sequences.
*
* @param nfaState
* Our NFAState object
* @param event
* The current event to be processed
* @param timestamp
* The timestamp of the event
*/
private void processEvent(NFAState nfaState, IN event, long timestamp) throws Exception {
try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) {
Collection<Map<String, List<IN>>> patterns = nfa.process(sharedBufferAccessor, nfaState, event, timestamp, afterMatchSkipStrategy, cepTimerService);
if ((nfa.getWindowTime() > 0) && nfaState.isNewStartPartialMatch()) {
registerTimer(timestamp + nfa.getWindowTime());
}
processMatchedSequences(patterns, timestamp);
}
}
| 3.26 |
flink_CepOperator_hasNonEmptySharedBuffer_rdh
|
// //////////////////// Testing Methods //////////////////////
@VisibleForTesting
boolean hasNonEmptySharedBuffer(KEY key) throws Exception {
setCurrentKey(key);
return !partialMatches.isEmpty();
}
| 3.26 |
flink_CepOperator_advanceTime_rdh
|
/**
* Advances the time for the given NFA to the given timestamp. This means that no more events
* with timestamp <b>lower</b> than the given timestamp should be passed to the nfa, This can
* lead to pruning and timeouts.
*/
private void advanceTime(NFAState nfaState, long timestamp) throws Exception {
try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) {
Tuple2<Collection<Map<String, List<IN>>>, Collection<Tuple2<Map<String, List<IN>>, Long>>> pendingMatchesAndTimeout = nfa.advanceTime(sharedBufferAccessor, nfaState, timestamp, afterMatchSkipStrategy);
Collection<Map<String, List<IN>>> pendingMatches = pendingMatchesAndTimeout.f0;
Collection<Tuple2<Map<String, List<IN>>, Long>> timedOut = pendingMatchesAndTimeout.f1;
if (!pendingMatches.isEmpty()) {
processMatchedSequences(pendingMatches, timestamp);
}
if (!timedOut.isEmpty()) {
processTimedOutSequences(timedOut);
}
}
}
| 3.26 |
flink_Transformation_declareManagedMemoryUseCaseAtSlotScope_rdh
|
/**
* Declares that this transformation contains certain slot scope managed memory use case.
*
* @param managedMemoryUseCase
* The use case that this transformation declares needing managed
* memory for.
*/
public void declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase managedMemoryUseCase) {
Preconditions.checkNotNull(managedMemoryUseCase);
Preconditions.checkArgument(managedMemoryUseCase.scope == Scope.SLOT);
managedMemorySlotScopeUseCases.add(managedMemoryUseCase);
}
| 3.26 |
flink_Transformation_setCoLocationGroupKey_rdh
|
/**
* <b>NOTE:</b> This is an internal undocumented feature for now. It is not clear whether this
* will be supported and stable in the long term.
*
* <p>Sets the key that identifies the co-location group. Operators with the same co-location
* key will have their corresponding subtasks placed into the same slot by the scheduler.
*
* <p>Setting this to null means there is no co-location constraint.
*/
public void setCoLocationGroupKey(@Nullable
String coLocationGroupKey)
{
this.coLocationGroupKey = coLocationGroupKey;
}
| 3.26 |
flink_Transformation_getName_rdh
|
/**
* Returns the name of this {@code Transformation}.
*/
public String getName() {
return name;
}
| 3.26 |
flink_Transformation_getId_rdh
|
/**
* Returns the unique ID of this {@code Transformation}.
*/
public int getId() {
return id; }
| 3.26 |
flink_Transformation_getManagedMemorySlotScopeUseCases_rdh
|
/**
* Get slot scope use cases that this transformation needs managed memory for.
*/
public Set<ManagedMemoryUseCase> getManagedMemorySlotScopeUseCases() {
return Collections.unmodifiableSet(managedMemorySlotScopeUseCases);
}
| 3.26 |
flink_Transformation_setUidHash_rdh
|
/**
* Sets an user provided hash for this operator. This will be used AS IS the create the
* JobVertexID.
*
* <p>The user provided hash is an alternative to the generated hashes, that is considered when
* identifying an operator through the default hash mechanics fails (e.g. because of changes
* between Flink versions).
*
* <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting.
* The provided hash needs to be unique per transformation and job. Otherwise, job submission
* will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an
* operator chain and trying so will let your job fail.
*
* <p>A use case for this is in migration between Flink versions or changing the jobs in a way
* that changes the automatically generated hashes. In this case, providing the previous hashes
* directly through this method (e.g. obtained from old logs) can help to reestablish a lost
* mapping from states to their target operator.
*
* @param uidHash
* The user provided hash for this operator. This will become the JobVertexID,
* which is shown in the logs and web ui.
*/
public void setUidHash(String uidHash) {
Preconditions.checkNotNull(uidHash);
Preconditions.checkArgument(uidHash.matches("^[0-9A-Fa-f]{32}$"), "Node hash must be a 32 character String that describes a hex code. Found: " + uidHash);
this.userProvidedNodeHash = uidHash;
}
| 3.26 |
flink_Transformation_setName_rdh
|
/**
* Changes the name of this {@code Transformation}.
*/
public void setName(String name) {
this.name = name;
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.