name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_StateTtlConfig_m0_rdh | /**
* Sets the ttl time.
*
* @param ttl
* The ttl time.
*/
@Nonnull
public Builder m0(@Nonnull
Time ttl) {
this.ttl = ttl;return this;
} | 3.26 |
flink_StateTtlConfig_setStateVisibility_rdh | /**
* Sets the state visibility.
*
* @param stateVisibility
* The state visibility configures whether expired user value can be
* returned or not.
*/
@Nonnull
public Builder setStateVisibility(@Nonnull
StateVisibility stateVisibility) {
this.stateVisibility = stateVisibility;
return this;
} | 3.26 |
flink_StateTtlConfig_disableCleanupInBackground_rdh | /**
* Disable default cleanup of expired state in background (enabled by default).
*
* <p>If some specific cleanup is configured, e.g. {@link #cleanupIncrementally(int,
* boolean)} or {@link #cleanupInRocksdbCompactFilter(long)} or {@link #cleanupInRocksdbCompactFilter(long, Time)} , this setting does not disable it.
*/
@Nonnull
public Builder disableCleanupInBackground() {
isCleanupInBackground = false;
return this;
} | 3.26 |
flink_StateTtlConfig_setTtlTimeCharacteristic_rdh | /**
* Sets the time characteristic.
*
* @param ttlTimeCharacteristic
* The time characteristic configures time scale to use for
* ttl.
*/
@Nonnull
public Builder setTtlTimeCharacteristic(@Nonnull
TtlTimeCharacteristic ttlTimeCharacteristic) {
this.ttlTimeCharacteristic = ttlTimeCharacteristic;
return this;
} | 3.26 |
flink_AsynchronousBufferFileWriter_writeBlock_rdh | /**
* Writes the given block asynchronously.
*
* @param buffer
* the buffer to be written (will be recycled when done)
* @throws IOException
* thrown if adding the write operation fails
*/
@Override
public void writeBlock(Buffer buffer) throws IOException {
try {
// if successfully added, the buffer will be recycled after the write operation
addRequest(new BufferWriteRequest(this, buffer));
} catch (Throwable e) {
// if not added, we need to recycle here
buffer.recycleBuffer();
ExceptionUtils.rethrowIOException(e);
}
} | 3.26 |
flink_ShortValue_getMaxNormalizedKeyLen_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getMaxNormalizedKeyLen()
{
return 2;
} | 3.26 |
flink_ShortValue_setValue_rdh | /**
* Sets the encapsulated short to the specified value.
*
* @param value
* the new value of the encapsulated short.
*/
public void setValue(short value) {
this.value = value;
} | 3.26 |
flink_ShortValue_getBinaryLength_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() {
return 2;
} | 3.26 |
flink_ShortValue_read_rdh | // --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
this.value = in.readShort();
} | 3.26 |
flink_SingleInputUdfOperator_setSemanticProperties_rdh | /**
* Sets the semantic properties for the user-defined function (UDF). The semantic properties
* define how fields of tuples and other objects are modified or preserved through this UDF. The
* configured properties can be retrieved via {@link UdfOperator#getSemanticProperties()}.
*
* @param properties
* The semantic properties for the UDF.
* @see UdfOperator#getSemanticProperties()
*/
@Internal
public void setSemanticProperties(SingleInputSemanticProperties properties) {
this.udfSemantics = properties;this.analyzedUdfSemantics = false;
} | 3.26 |
flink_SingleInputUdfOperator_withParameters_rdh | // --------------------------------------------------------------------------------------------
// Fluent API methods
// --------------------------------------------------------------------------------------------
@Override
public O withParameters(Configuration parameters) {
this.parameters = parameters;@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_SingleInputUdfOperator_getBroadcastSets_rdh | // --------------------------------------------------------------------------------------------
// Accessors
// --------------------------------------------------------------------------------------------
@Override@Internal
public Map<String, DataSet<?>> getBroadcastSets() {
return this.broadcastVariables == null ? Collections.<String, DataSet<?>>emptyMap() : Collections.unmodifiableMap(this.broadcastVariables);
} | 3.26 |
flink_SingleInputUdfOperator_returns_rdh | /**
* Adds a type information hint about the return type of this operator. This method can be used
* in cases where Flink cannot determine automatically what the produced type of a function is.
* That can be the case if the function uses generic type variables in the return type that
* cannot be inferred from the input type.
*
* <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are
* preferable.
*
* @param typeInfo
* The type information for the returned data type.
* @return This operator using the given type information for the return type.
*/
public O returns(TypeInformation<OUT> typeInfo) {
requireNonNull(typeInfo, "TypeInformation must not be null");
fillInType(typeInfo);
@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_SingleInputUdfOperator_withForwardedFields_rdh | /**
* Adds semantic information about forwarded fields of the user-defined function. The forwarded
* fields information declares fields which are never modified by the function and which are
* forwarded at the same position to the output or unchanged copied to another position in the
* output.
*
* <p>Fields that are forwarded at the same position are specified by their position. The
* specified position must be valid for the input and output data type and have the same type.
* For example <code>withForwardedFields("f2")</code> declares that the third field of a Java
* input tuple is copied to the third field of an output tuple.
*
* <p>Fields which are unchanged copied to another position in the output are declared by
* specifying the source field reference in the input and the target field reference in the
* output. {@code withForwardedFields("f0->f2")} denotes that the first field of the Java input
* tuple is unchanged copied to the third field of the Java output tuple. When using a wildcard
* ("*") ensure that the number of declared fields and their types in input and output type
* match.
*
* <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFields("f2; f3->f0;
* f4")}) or separate Strings ({@code withForwardedFields("f2", "f3->f0", "f4")}). Please refer
* to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or Flink's
* documentation for details on field references such as nested fields and wildcard.
*
* <p>It is not possible to override existing semantic information about forwarded fields which
* was for example added by a {@link org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFields} class annotation.
*
* <p><b>NOTE: Adding semantic information for functions is optional! If used correctly,
* semantic information can help the Flink optimizer to generate more efficient execution plans.
* However, incorrect semantic information can cause the optimizer to generate incorrect
* execution plans which compute wrong results! So be careful when adding semantic information.
* </b>
*
* @param forwardedFields
* A list of field forward expressions.
* @return This operator with annotated forwarded field information.
* @see org.apache.flink.api.java.functions.FunctionAnnotation
* @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFields
*/
public O withForwardedFields(String... forwardedFields) {
if (this.udfSemantics == null) {
// extract semantic properties from function annotations
setSemanticProperties(extractSemanticAnnotations(getFunction().getClass()));
}
if ((this.udfSemantics == null) || this.analyzedUdfSemantics) {
// discard analyzed semantic properties
setSemanticProperties(new SingleInputSemanticProperties());
SemanticPropUtil.getSemanticPropsSingleFromString(this.udfSemantics, forwardedFields, null, null, this.getInputType(), this.getResultType());
} else if (udfWithForwardedFieldsAnnotation(getFunction().getClass())) {
// refuse semantic information as it would override the function annotation
throw new SemanticProperties.InvalidSemanticAnnotationException(("Forwarded field information " + "has already been added by a function annotation for this operator. ") + "Cannot overwrite function annotations.");
} else {
SemanticPropUtil.getSemanticPropsSingleFromString(this.udfSemantics, forwardedFields, null, null, this.getInputType(), this.getResultType());
}
@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_Sink_metadataConsumer_rdh | /**
* Returns a metadata consumer, the {@link SinkWriter} can publish metadata events of type
* {@link MetaT} to the consumer.
*
* <p>It is recommended to use a separate thread pool to publish the metadata because
* enqueuing a lot of these messages in the mailbox may lead to a performance decrease.
* thread, and the {@link Consumer#accept} method is executed very fast.
*/
@Experimental
default <MetaT> Optional<Consumer<MetaT>> metadataConsumer() {
return Optional.empty();} | 3.26 |
flink_SlideWithSizeAndSlideOnTime_as_rdh | /**
* Assigns an alias for this window that the following {@code groupBy()} and {@code select()}
* clause can refer to. {@code select()} statement can access window properties such as window
* start or end time.
*
* @param alias
* alias for this window
* @return this window
*/
public SlideWithSizeAndSlideOnTimeWithAlias as(Expression alias) {
return new SlideWithSizeAndSlideOnTimeWithAlias(alias, timeField, size, slide);
} | 3.26 |
flink_CustomSinkOperatorUidHashes_setCommitterUidHash_rdh | /**
* Sets the uid hash of the committer operator used to recover state.
*
* @param committerUidHash
* uid hash denoting the committer operator
* @return {@link SinkOperatorUidHashesBuilder}
*/
public SinkOperatorUidHashesBuilder setCommitterUidHash(String committerUidHash) {
this.committerUidHash = committerUidHash;
return this;} | 3.26 |
flink_CustomSinkOperatorUidHashes_setGlobalCommitterUidHash_rdh | /**
* Sets the uid hash of the global committer operator used to recover state.
*
* @param globalCommitterUidHash
* uid hash denoting the global committer operator
* @return {@link SinkOperatorUidHashesBuilder}
*/
public SinkOperatorUidHashesBuilder setGlobalCommitterUidHash(String globalCommitterUidHash) {
this.globalCommitterUidHash = globalCommitterUidHash;
return this;
} | 3.26 |
flink_CustomSinkOperatorUidHashes_builder_rdh | /**
* Creates a builder to construct {@link CustomSinkOperatorUidHashes}.
*
* @return {@link SinkOperatorUidHashesBuilder}
*/public static
SinkOperatorUidHashesBuilder builder() {
return new SinkOperatorUidHashesBuilder();
} | 3.26 |
flink_CustomSinkOperatorUidHashes_build_rdh | /**
* Constructs the {@link CustomSinkOperatorUidHashes} with the given uid hashes.
*
* @return {@link CustomSinkOperatorUidHashes}
*/
public CustomSinkOperatorUidHashes build() {
return new CustomSinkOperatorUidHashes(writerUidHash, committerUidHash, globalCommitterUidHash);
} | 3.26 |
flink_CustomSinkOperatorUidHashes_setWriterUidHash_rdh | /**
* Sets the uid hash of the writer operator used to recover state.
*
* @param writerUidHash
* uid hash denoting writer operator
* @return {@link SinkOperatorUidHashesBuilder}
*/
public SinkOperatorUidHashesBuilder setWriterUidHash(String writerUidHash) {
this.writerUidHash = writerUidHash;
return this;
} | 3.26 |
flink_AbstractFileStateBackend_resolveCheckpoint_rdh | // ------------------------------------------------------------------------
// Initialization and metadata storage
// ------------------------------------------------------------------------
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String pointer) throws IOException {
return AbstractFsCheckpointStorageAccess.resolveCheckpointPointer(pointer);
} | 3.26 |
flink_AbstractFileStateBackend_validatePath_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Checks the validity of the path's scheme and path.
*
* @param path
* The path to check.
* @return The URI as a Path.
* @throws IllegalArgumentException
* Thrown, if the URI misses scheme or path.
*/
private static Path validatePath(Path path) {
final URI uri = path.toUri();
final String scheme = uri.getScheme();
final String pathPart = uri.getPath();
// some validity checks
if (scheme ==
null) {
throw new IllegalArgumentException("The scheme (hdfs://, file://, etc) is null. " + "Please specify the file system scheme explicitly in the URI.");
}
if (pathPart == null) {
throw new IllegalArgumentException("The path to store the checkpoint data in is null. " + "Please specify a directory path for the checkpoint data.");
}
if ((pathPart.length() == 0) || pathPart.equals("/")) {
throw new IllegalArgumentException("Cannot use the root directory for checkpoints.");
}
return path;
} | 3.26 |
flink_AbstractFileStateBackend_getCheckpointPath_rdh | // ------------------------------------------------------------------------
/**
* Gets the checkpoint base directory. Jobs will create job-specific subdirectories for
* checkpoints within this directory. May be null, if not configured.
*
* @return The checkpoint base directory
*/
@Nullable
public Path getCheckpointPath() {
return baseCheckpointPath;
} | 3.26 |
flink_WindowedStateTransformation_aggregate_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction
* The aggregation function that is used for incremental aggregation.
* @param windowFunction
* The window function.
* @param accumulatorType
* Type information for the internal accumulator type of the aggregation
* function
* @return The data stream that is the result of applying the window function to the window.
* @param <ACC>
* The type of the AggregateFunction's accumulator
* @param <V>
* The type of AggregateFunction's result, and the WindowFunction's input
* @param <R>
* The type of the elements in the resulting stream, equal to the WindowFunction's
* result type
*/
@PublicEvolving
public <ACC, V, R> StateBootstrapTransformation<T> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, ProcessWindowFunction<V, R, K, W> windowFunction, TypeInformation<ACC> accumulatorType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction.");
}
// clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction);
WindowOperator<K, T, ?, R, W> operator =
builder.aggregate(aggregateFunction, windowFunction, accumulatorType);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new StateBootstrapTransformation<>(input, operatorMaxParallelism, factory,
keySelector, keyType);
} | 3.26 |
flink_WindowedStateTransformation_process_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function
* The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> StateBootstrapTransformation<T> process(ProcessWindowFunction<T, R, K, W> function) {
WindowOperator<K, T, ?, R, W> operator = builder.process(function);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new StateBootstrapTransformation<>(input, operatorMaxParallelism, factory, keySelector, keyType);
} | 3.26 |
flink_WindowedStateTransformation_evictor_rdh | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* incremental aggregation of window results cannot be used.
*/
@PublicEvolving
public WindowedStateTransformation<T, K, W> evictor(Evictor<? super T, ? super W> evictor) {builder.evictor(evictor);
return this;
} | 3.26 |
flink_WindowedStateTransformation_reduce_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction
* The reduce function that is used for incremental aggregation.
* @param function
* The window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@Internal
public <R> StateBootstrapTransformation<T> reduce(ReduceFunction<T> reduceFunction, ProcessWindowFunction<T, R, K, W> function) {
// clean the closures
function = input.getExecutionEnvironment().clean(function);
reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
WindowOperator<K, T, ?, R, W> operator =
builder.reduce(reduceFunction, function);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new StateBootstrapTransformation<>(input,
operatorMaxParallelism, factory, keySelector, keyType);
} | 3.26 |
flink_WindowedStateTransformation_m0_rdh | // Aggregation Function
// ------------------------------------------------------------------------
/**
* Applies the given aggregation function to each window. The aggregation function is called for
* each element, aggregating values incrementally and keeping the state to one accumulator per
* key and window.
*
* @param function
* The aggregation function.
* @return The data stream that is the result of applying the fold function to the window.
* @param <ACC>
* The type of the AggregateFunction's accumulator
* @param <R>
* The type of the elements in the resulting stream, equal to the AggregateFunction's
* result type
*/
@PublicEvolving
public <ACC, R> StateBootstrapTransformation<T> m0(AggregateFunction<T, ACC, R> function) {
checkNotNull(function, "function");
if (function instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregation function cannot be a RichFunction.");
}
TypeInformation<ACC> accumulatorType = TypeExtractor.getAggregateFunctionAccumulatorType(function, input.getType(), null, false);
return aggregate(function, accumulatorType);
} | 3.26 |
flink_WindowedStateTransformation_trigger_rdh | /**
* Sets the {@code Trigger} that should be used to trigger window emission.
*/
@PublicEvolvingpublic WindowedStateTransformation<T, K, W> trigger(Trigger<? super T, ? super W> trigger) {
builder.trigger(trigger);
return this;
} | 3.26 |
flink_WindowedStateTransformation_apply_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function
* The window function.
* @param resultType
* Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
*/
public <R> StateBootstrapTransformation<T> apply(WindowFunction<T, R, K, W> function, TypeInformation<R> resultType)
{
function = input.getExecutionEnvironment().clean(function);
WindowOperator<K, T, ?, R, W> operator = builder.apply(function);
SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new StateBootstrapTransformation<>(input, operatorMaxParallelism, factory, keySelector, keyType);
} | 3.26 |
flink_TumblingProcessingTimeWindows_of_rdh | /**
* Creates a new {@code TumblingProcessingTimeWindows} {@link WindowAssigner} that assigns
* elements to time windows based on the element timestamp, offset and a staggering offset,
* depending on the staggering policy.
*
* @param size
* The size of the generated windows.
* @param offset
* The offset which window start would be shifted by.
* @param windowStagger
* The utility that produces staggering offset in runtime.
* @return The time policy.
*/
@PublicEvolvingpublic static TumblingProcessingTimeWindows of(Time size, Time offset, WindowStagger windowStagger) {
return new TumblingProcessingTimeWindows(size.toMilliseconds(), offset.toMilliseconds(), windowStagger);
} | 3.26 |
flink_QuickSort_sort_rdh | /**
* Sort the given range of items using quick sort. {@inheritDoc } If the recursion depth falls
* below {@link #getMaxDepth}, then switch to {@link HeapSort}.
*/
public void sort(final IndexedSortable s, int p, int r)
{
int recordsPerSegment
= s.recordsPerSegment();
int recordSize = s.recordSize();
int maxOffset = recordSize * (recordsPerSegment - 1);
int pN = p / recordsPerSegment;
int pO = (p % recordsPerSegment) * recordSize;
int rN = r / recordsPerSegment;
int rO = (r % recordsPerSegment) * recordSize;
sortInternal(s, recordsPerSegment, recordSize, maxOffset, p, pN, pO, r, rN, rO, m1(r - p));
} | 3.26 |
flink_QuickSort_sortInternal_rdh | /**
* Sort the given range of items using quick sort. If the recursion depth falls below {@link #getMaxDepth}, then switch to {@link HeapSort}.
*
* @param s
* paged sortable
* @param recordsPerSegment
* number of records per memory segment
* @param recordSize
* number of bytes per record
* @param maxOffset
* offset of a last record in a memory segment
* @param p
* index of first record in range
* @param pN
* page number of first record in range
* @param pO
* page offset of first record in range
* @param r
* index of last-plus-one'th record in range
* @param rN
* page number of last-plus-one'th record in range
* @param rO
* page offset of last-plus-one'th record in range
* @param depth
* recursion depth
* @see #sort(IndexedSortable, int, int)
*/
private static void sortInternal(final IndexedSortable s, int recordsPerSegment, int recordSize, int maxOffset, int p, int pN, int pO, int r, int rN, int rO, int depth) {while (true) {
if ((r - p) < 13) {
// switch to insertion sort
int i = p + 1;
int iN;
int iO;
if (pO == maxOffset) {
iN = pN + 1;
iO = 0;
} else {
iN = pN;
iO = pO + recordSize;
}
while (i < r) {
int v10 = i;
int jN = iN;
int jO = iO;
int jd = v10 - 1;
int jdN;
int jdO;
if (jO == 0) {
jdN = jN - 1;
jdO = maxOffset;
} else {
jdN = jN;
jdO = jO - recordSize;
}
while ((v10 > p) && (s.compare(jdN, jdO, jN, jO) > 0)) {s.swap(jN, jO, jdN,
jdO);
v10 = jd;
jN = jdN;
jO = jdO;
jd--;
if (jdO == 0) {
jdN--;
jdO = maxOffset;
} else {
jdO
-= recordSize;
}
}
i++;if (iO == maxOffset) {
iN++;
iO = 0;
} else {iO += recordSize;
}
}
return;
}
if ((--depth) < 0) {
// switch to heap sort
alt.sort(s, p, r);
return;
}
int rdN;
int rdO;
if (rO == 0) {
rdN = rN - 1;
rdO = maxOffset;
} else {rdN = rN;
rdO = rO - recordSize;
}
int m
= (p + r) >>> 1;
int mN = m / recordsPerSegment;
int mO = (m % recordsPerSegment) * recordSize;
// select, move pivot into first position
m0(s, mN, mO, pN, pO);
m0(s, mN, mO, rdN, rdO);m0(s, pN, pO, rdN, rdO);
// Divide
int i = p;
int iN = pN;
int v23 = pO;
int j = r;
int jN = rN;
int jO = rO;
int ll = p; int llN = pN;
int llO = pO;
int rr = r;
int rrN = rN;
int
rrO = rO;
int cr;
while (true) {
i++;
if (v23 == maxOffset) {
iN++;
v23 = 0;} else { v23 += recordSize;
}
while (i < j) {
if ((cr = s.compare(iN, v23, pN, pO)) > 0) {
break;
}
if (0 == cr)
{
ll++;
if (llO == maxOffset) {
llN++;
llO = 0;
} else {
llO += recordSize;
}
if (ll != i) {
s.swap(llN, llO, iN, v23);
}
}
i++;
if (v23 == maxOffset) {
iN++;
v23 = 0;
} else {
v23 += recordSize;
}
}
j--;
if (jO == 0) {
jN--;
jO = maxOffset;
} else {
jO -= recordSize;
}
while (j > i) {
if ((cr = s.compare(pN, pO, jN, jO)) > 0) {
break;
}
if (0 == cr) {
rr--;
if (rrO == 0) {
rrN--;
rrO = maxOffset;
} else {
rrO -= recordSize;
}
if
(rr != j) {
s.swap(rrN, rrO, jN, jO);
}
}
j--;
if (jO == 0) {
jN--;
jO = maxOffset;
} else {
jO -= recordSize;
}
}
if (i
< j) {
s.swap(iN, v23, jN, jO);
} else {
break;
}
}
j = i;
jN = iN;
jO = v23;
// swap pivot- and all eq values- into position
while (ll >= p) {
i--;
if (v23 == 0) {
iN--;
v23 = maxOffset;
} else {
v23 -= recordSize;
}
s.swap(llN, llO, iN, v23);
ll--;
if (llO == 0) {
llN--;
llO = maxOffset;
} else {
llO -= recordSize;
}
}
while (rr < r) {
s.swap(rrN, rrO, jN, jO);
rr++;
if (rrO == maxOffset) {
rrN++;
rrO = 0;} else {
rrO += recordSize;
}
j++;
if (jO == maxOffset)
{
jN++;
jO = 0;
} else {
jO += recordSize;
}
}
// Conquer
// Recurse on smaller interval first to keep stack shallow
assert i != j;
if ((i - p) < (r - j)) {
sortInternal(s, recordsPerSegment, recordSize, maxOffset, p, pN, pO, i, iN, v23, depth);
p = j;
pN = jN;
pO =
jO;
} else {
sortInternal(s, recordsPerSegment, recordSize, maxOffset, j, jN, jO, r, rN, rO, depth);
r = i;
rN = iN;
rO = v23;
}
}
} | 3.26 |
flink_QuickSort_m1_rdh | /**
* Deepest recursion before giving up and doing a heapsort. Returns 2 * ceil(log(n)).
*/
protected static int m1(int x) {
if (x <= 0) {
throw new IllegalArgumentException("Undefined for " + x);
}
return (32 - Integer.numberOfLeadingZeros(x - 1)) << 2;
} | 3.26 |
flink_QuickSort_m0_rdh | /**
* Fix the records into sorted order, swapping when the first record is greater than the second
* record.
*
* @param s
* paged sortable
* @param pN
* page number of first record
* @param pO
* page offset of first record
* @param rN
* page number of second record
* @param rO
* page offset of second record
*/
private static void m0(IndexedSortable s, int pN, int pO, int rN, int rO) {if (s.compare(pN, pO, rN,
rO) > 0) {
s.swap(pN, pO,
rN, rO);
}
} | 3.26 |
flink_HsSubpartitionConsumer_getSubpartitionBacklog_rdh | // -------------------------------
// Internal Methods
// -------------------------------
@SuppressWarnings("FieldAccessNotGuarded")
private int getSubpartitionBacklog() {
if ((memoryDataView == null) || (diskDataView == null)) {
return 0;
}
return Math.max(memoryDataView.getBacklog(), diskDataView.getBacklog());
} | 3.26 |
flink_HsSubpartitionConsumer_setDiskDataView_rdh | /**
* Set {@link HsDataView} for this subpartition, this method only called when {@link HsSubpartitionFileReader} is creating.
*/
void setDiskDataView(HsDataView diskDataView) {
synchronized(lock) {
checkState(this.diskDataView == null, "repeatedly set disk data view is not allowed.");
this.diskDataView = diskDataView;
}
} | 3.26 |
flink_HsSubpartitionConsumer_setMemoryDataView_rdh | /**
* Set {@link HsDataView} for this subpartition, this method only called when {@link HsSubpartitionFileReader} is creating.
*/
void setMemoryDataView(HsDataView memoryDataView) {
synchronized(lock) {
checkState(this.memoryDataView == null, "repeatedly set memory data view is not allowed.");
this.memoryDataView = memoryDataView;
}
} | 3.26 |
flink_BackgroundTask_finishedBackgroundTask_rdh | /**
* Creates a finished background task which can be used as the start of a background task chain.
*
* @param <V>
* type of the background task
* @return A finished background task
*/
static <V> BackgroundTask<V>
finishedBackgroundTask() {
return new BackgroundTask<>();
} | 3.26 |
flink_BackgroundTask_initialBackgroundTask_rdh | /**
* Creates an initial background task. This means that this background task has no predecessor.
*
* @param task
* task to run
* @param executor
* executor to run the task
* @param <V>
* type of the result
* @return initial {@link BackgroundTask} representing the task to execute
*/
static <V> BackgroundTask<V> initialBackgroundTask(SupplierWithException<? extends V, ? extends Exception> task, Executor executor) {
return new BackgroundTask<>(FutureUtils.completedVoidFuture(), task, executor);
} | 3.26 |
flink_BackgroundTask_m1_rdh | /**
* Runs the given task after this background task has completed (normally or exceptionally).
*
* @param task
* task to run after this background task has completed
* @param executor
* executor to run the task
* @param <V>
* type of the result
* @return new {@link BackgroundTask} representing the new task to execute
*/
<V> BackgroundTask<V> m1(SupplierWithException<? extends V, ? extends Exception> task, Executor executor) {
return new BackgroundTask<>(terminationFuture, task, executor);
} | 3.26 |
flink_BackgroundTask_abort_rdh | /**
* Abort the execution of this background task. This method has only an effect if the background
* task has not been started yet.
*/
void abort() {
isAborted = true;
} | 3.26 |
flink_JobStatusPollingUtils_getJobResult_rdh | /**
* Polls the {@link JobStatus} of a job periodically and when the job has reached a terminal
* state, it requests its {@link JobResult}.
*
* @param dispatcherGateway
* the {@link DispatcherGateway} to be used for requesting the details
* of the job.
* @param jobId
* the id of the job
* @param scheduledExecutor
* the executor to be used to periodically request the status of the
* job
* @param rpcTimeout
* the timeout of the rpc
* @param retryPeriod
* the interval between two consecutive job status requests
* @return a future that will contain the job's {@link JobResult}.
*/
static CompletableFuture<JobResult> getJobResult(final DispatcherGateway dispatcherGateway, final JobID jobId, final ScheduledExecutor scheduledExecutor, final Time rpcTimeout, final Time retryPeriod) {
return pollJobResultAsync(() -> dispatcherGateway.requestJobStatus(jobId,
rpcTimeout), () -> dispatcherGateway.requestJobResult(jobId, rpcTimeout), scheduledExecutor, retryPeriod.toMilliseconds());
} | 3.26 |
flink_DeclarationRewriter_extractLocalVariable_rdh | /**
*
* @return new name.
*/
private String extractLocalVariable(JavaParser.VariableDeclaratorIdContext decId, JavaParser.TypeTypeContext typeType, boolean forceNewName) {
String name = decId.getText();
if (forceNewName || allVarNames.contains(name)) {
// here we assume that the original code can be successfully compiled.
// that is to say, the scope of two variables with the same name will not
// overlap.
String newName = CodeSplitUtil.newName("local");
replaceMap.put(name, newName);
newLocalVariables.append(typeType.getText()).append(" ").append(newName).append(";\n");
return newName;
} else {
newLocalVariables.append(typeType.getText()).append(" ").append(name).append(";\n");
allVarNames.add(name);return name;
}
} | 3.26 |
flink_GenericInMemoryCatalogStoreFactory_createCatalogStore_rdh | /**
* Catalog store factory for {@link GenericInMemoryCatalogStore}.
*/
@Internal public class GenericInMemoryCatalogStoreFactory implements CatalogStoreFactory {
@Override
public CatalogStore createCatalogStore() {
return new GenericInMemoryCatalogStore();
} | 3.26 |
flink_CommonTestUtils_containsCause_rdh | /**
* Checks whether the given throwable contains the given cause as a cause. The cause is not
* checked on equality but on type equality.
*
* @param throwable
* Throwable to check for the cause
* @param cause
* Cause to look for
* @return True if the given Throwable contains the given cause (type equality); otherwise false
*/
public static boolean containsCause(Throwable throwable, Class<? extends Throwable> cause) {
Throwable current = throwable;
while (current != null) {
if (cause.isAssignableFrom(current.getClass())) {return true;
}
current = current.getCause();
}
return false;
} | 3.26 |
flink_CommonTestUtils_createTempFile_rdh | /**
* Creates a temporary file that contains the given string. The file is written with the
* platform's default encoding.
*
* <p>The temp file is automatically deleted on JVM exit.
*
* @param contents
* The contents to be written to the file.
* @return The temp file URI.
*/
public static String createTempFile(String contents) throws IOException {File f = File.createTempFile("flink_test_", ".tmp");
f.deleteOnExit();
try (BufferedWriter out = new BufferedWriter(new FileWriter(f))) {
out.write(contents);
}
return f.toURI().toString();
} | 3.26 |
flink_CommonTestUtils_setEnv_rdh | // ------------------------------------------------------------------------
// Manipulation of environment
// ------------------------------------------------------------------------
public static void setEnv(Map<String, String> newenv) {m0(newenv, true);
} | 3.26 |
flink_CommonTestUtils_assertThrows_rdh | /**
* Checks whether an exception with a message occurs when running a piece of code.
*/
public static void assertThrows(String msg, Class<? extends Exception> expected, Callable<?> code) {
try {
Object result = code.call();
Assert.fail("Previous method call should have failed but it returned: " + result);
} catch (Exception e) {
assertThat(e, instanceOf(expected));
assertThat(e.getMessage(), containsString(msg));
}
} | 3.26 |
flink_CommonTestUtils_waitUtil_rdh | /**
* Wait util the given condition is met or timeout.
*
* @param condition
* the condition to wait for.
* @param timeout
* the maximum time to wait for the condition to become true.
* @param errorMsg
* the error message to include in the <code>TimeoutException</code> if the
* condition was not met before timeout.
* @throws TimeoutException
* if the condition is not met before timeout.
* @throws InterruptedException
* if the thread is interrupted.
*/
public static void waitUtil(Supplier<Boolean> condition, Duration timeout, String errorMsg) throws TimeoutException, InterruptedException {
waitUtil(condition, timeout, Duration.ofMillis(1), errorMsg);
} | 3.26 |
flink_CommonTestUtils_blockForeverNonInterruptibly_rdh | /**
* Permanently blocks the current thread. The thread cannot be woken up via {@link Thread#interrupt()}.
*/
public static void blockForeverNonInterruptibly() {
final Object lock = new Object();
// noinspection InfiniteLoopStatement
while (true) {
try {
// noinspection SynchronizationOnLocalVariableOrMethodParameter
synchronized(lock) {
lock.wait();
}
}
catch (InterruptedException ignored) {
}
}
} | 3.26 |
flink_CommonTestUtils_createCopySerializable_rdh | /**
* Creates a copy of an object via Java Serialization.
*
* @param original
* The original object.
* @return The copied object.
*/
public static <T extends Serializable> T createCopySerializable(T original) throws IOException {
if (original == null) {
throw new IllegalArgumentException();
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(original);
oos.close();
baos.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
@SuppressWarnings("unchecked")
T copy = ((T) (ois.readObject()));
return copy;
} catch (ClassNotFoundException e) { throw new IOException(e);
}
} | 3.26 |
flink_CommonTestUtils_m0_rdh | // This code is taken slightly modified from: http://stackoverflow.com/a/7201825/568695
// it changes the environment variables of this JVM. Use only for testing purposes!
@SuppressWarnings("unchecked")
public static void m0(Map<String, String> newenv, boolean clearExisting) {
try {
Map<String, String> env = System.getenv();
Class<?> clazz = env.getClass();
Field field = clazz.getDeclaredField("m");
field.setAccessible(true);
Map<String, String> map = ((Map<String, String>) (field.get(env)));
if (clearExisting) {
map.clear();
}
map.putAll(newenv);
// only for Windows
Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment");
try {
Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment");
theCaseInsensitiveEnvironmentField.setAccessible(true);
Map<String, String> cienv = ((Map<String, String>) (theCaseInsensitiveEnvironmentField.get(null)));
if (clearExisting) {
cienv.clear();
}
cienv.putAll(newenv);
} catch (NoSuchFieldException ignored) {
}
} catch (Exception
e1) {
throw new RuntimeException(e1);
}
} | 3.26 |
flink_TableFactoryUtil_findAndCreateTableSource_rdh | /**
* Creates a {@link TableSource} from a {@link CatalogTable}.
*
* <p>It considers {@link Catalog#getFactory()} if provided.
*/
@SuppressWarnings("unchecked")
public static <T> TableSource<T> findAndCreateTableSource(@Nullable
Catalog catalog, ObjectIdentifier objectIdentifier, CatalogTable catalogTable, ReadableConfig configuration, boolean isTemporary) {
TableSourceFactory.Context context = new TableSourceFactoryContextImpl(objectIdentifier, catalogTable, configuration, isTemporary);
Optional<TableFactory> factoryOptional = (catalog == null) ? Optional.empty() : catalog.getTableFactory();
if (factoryOptional.isPresent()) {
TableFactory v2 = factoryOptional.get();
if (v2 instanceof TableSourceFactory) {
return ((TableSourceFactory<T>) (v2)).createTableSource(context);
} else {
throw new ValidationException("Cannot query a sink-only table. " + "TableFactory provided by catalog must implement TableSourceFactory");
}
}
else {
return findAndCreateTableSource(context);
}
} | 3.26 |
flink_TableFactoryUtil_buildCatalogStoreFactoryContext_rdh | /**
* Build a {@link CatalogStoreFactory.Context} for opening the {@link CatalogStoreFactory}.
*
* <p>The configuration format should be as follows:
*
* <pre>{@code table.catalog-store.kind: {identifier}
* table.catalog-store.{identifier}.{param1}: xxx
* table.catalog-store.{identifier}.{param2}: xxx}</pre>
*/
public static Context buildCatalogStoreFactoryContext(Configuration configuration, ClassLoader classLoader) {
String identifier = configuration.getString(CommonCatalogOptions.TABLE_CATALOG_STORE_KIND);
String catalogStoreOptionPrefix = (CommonCatalogOptions.TABLE_CATALOG_STORE_OPTION_PREFIX + identifier) + ".";
Map<String, String> options = new DelegatingConfiguration(configuration, catalogStoreOptionPrefix).toMap();
CatalogStoreFactory.Context context = new FactoryUtil.DefaultCatalogStoreContext(options, configuration, classLoader);
return context;
} | 3.26 |
flink_TableFactoryUtil_findAndCreateCatalogStoreFactory_rdh | /**
* Finds and creates a {@link CatalogStoreFactory} using the provided {@link Configuration} and
* user classloader.
*
* <p>The configuration format should be as follows:
*
* <pre>{@code table.catalog-store.kind: {identifier}
* table.catalog-store.{identifier}.{param1}: xxx
* table.catalog-store.{identifier}.{param2}: xxx}</pre>
*/
public static CatalogStoreFactory findAndCreateCatalogStoreFactory(Configuration configuration, ClassLoader classLoader) {
String identifier = configuration.getString(CommonCatalogOptions.TABLE_CATALOG_STORE_KIND);
CatalogStoreFactory catalogStoreFactory = FactoryUtil.discoverFactory(classLoader, CatalogStoreFactory.class, identifier);
return catalogStoreFactory;
} | 3.26 |
flink_TableFactoryUtil_m0_rdh | /**
* Creates a table sink for a {@link CatalogTable} using table factory associated with the
* catalog.
*/public static Optional<TableSink> m0(Catalog catalog, TableSinkFactory.Context context) {
TableFactory tableFactory = catalog.getTableFactory().orElse(null);
if (tableFactory instanceof TableSinkFactory) {
return Optional.ofNullable(((TableSinkFactory) (tableFactory)).createTableSink(context));
}
return Optional.empty();
} | 3.26 |
flink_TableFactoryUtil_findCatalogModificationListenerList_rdh | /**
* Find and create modification listener list from configuration.
*/public static List<CatalogModificationListener> findCatalogModificationListenerList(final ReadableConfig configuration, final ClassLoader classLoader) {
return configuration.getOptional(TableConfigOptions.TABLE_CATALOG_MODIFICATION_LISTENERS).orElse(Collections.emptyList()).stream().map(identifier -> FactoryUtil.discoverFactory(classLoader, CatalogModificationListenerFactory.class, identifier).createListener(new CatalogModificationListenerFactory.Context() {
@Override
public ReadableConfig getConfiguration() {
return configuration;
}
@Override
public ClassLoader getUserClassLoader() {
return classLoader;
}
})).collect(Collectors.toList());
} | 3.26 |
flink_TableFactoryUtil_isLegacyConnectorOptions_rdh | /**
* Checks whether the {@link CatalogTable} uses legacy connector sink options.
*/
public static boolean isLegacyConnectorOptions(@Nullable
Catalog catalog, ReadableConfig configuration, boolean isStreamingMode, ObjectIdentifier objectIdentifier, CatalogTable catalogTable, boolean isTemporary) {
// normalize option keys
DescriptorProperties properties = new DescriptorProperties(true);
properties.putProperties(catalogTable.getOptions());
if (properties.containsKey(ConnectorDescriptorValidator.CONNECTOR_TYPE)) {
return true;
} else {
try {
// try to create legacy table source using the options,
// some legacy factories may use the 'type' key
TableFactoryUtil.findAndCreateTableSink(catalog, objectIdentifier, catalogTable, configuration, isStreamingMode, isTemporary);
// success, then we will use the legacy factories
return true;
} catch (Throwable ignore) {
// fail, then we will use new factories
return false;
}
}
} | 3.26 |
flink_TableFactoryUtil_findAndCreateTableSink_rdh | /**
* Creates a {@link TableSink} from a {@link CatalogTable}.
*
* <p>It considers {@link Catalog#getFactory()} if provided.
*/
@SuppressWarnings("unchecked")
public static <T> TableSink<T> findAndCreateTableSink(@Nullable
Catalog catalog, ObjectIdentifier objectIdentifier, CatalogTable catalogTable, ReadableConfig configuration, boolean isStreamingMode, boolean isTemporary) {
TableSinkFactory.Context context = new TableSinkFactoryContextImpl(objectIdentifier,
catalogTable, configuration, !isStreamingMode, isTemporary);
if (catalog == null) {
return findAndCreateTableSink(context);
} else {
return createTableSinkForCatalogTable(catalog, context).orElseGet(() -> findAndCreateTableSink(context));
}
} | 3.26 |
flink_AvroFactory_extractAvroSpecificSchema_rdh | /**
* Extracts an Avro {@link Schema} from a {@link SpecificRecord}. We do this either via {@link SpecificData} or by instantiating a record and extracting the schema from the instance.
*/
public static <T> Schema extractAvroSpecificSchema(Class<T> type, SpecificData specificData) {
Optional<Schema> newSchemaOptional = m0(type);
return newSchemaOptional.orElseGet(() -> specificData.getSchema(type));
} | 3.26 |
flink_AvroFactory_m0_rdh | /**
* Extracts an Avro {@link Schema} from a {@link SpecificRecord}. We do this by creating an
* instance of the class using the zero-argument constructor and calling {@link SpecificRecord#getSchema()} on it.
*/
private static Optional<Schema> m0(Class<?> type) {
try {
SpecificRecord instance = ((SpecificRecord) (type.newInstance()));
return Optional.ofNullable(instance.getSchema());
} catch (InstantiationException | IllegalAccessException e) {
LOG.warn("Could not extract schema from Avro-generated SpecificRecord class {}: {}.", type, e); return Optional.empty();
}
} | 3.26 |
flink_AvroFactory_create_rdh | /**
* Creates Avro Writer and Reader for a specific type.
*
* <p>Given an input type, and possible the current schema, and a previously known schema (also
* known as writer schema) create will deduce the best way to initialize a reader and writer
* according to the following rules:
*
* <ul>
* <li>If type is an Avro generated class (an {@link SpecificRecord} then the reader would use
* the previousSchema for reading (if present) otherwise it would use the schema attached
* to the auto generated class.
* <li>If the type is a GenericRecord then the reader and the writer would be created with the
* supplied (mandatory) schema.
* <li>Otherwise, we use Avro's reflection based reader and writer that would deduce the
* schema via reflection. If the previous schema is also present (when restoring a
* serializer for example) then the reader would be created with both schemas.
* </ul>
*/
static <T> AvroFactory<T> create(Class<T> type, @Nullable
Schema currentSchema, @Nullable
Schema previousSchema) {
final ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (SpecificRecord.class.isAssignableFrom(type)) {
return fromSpecific(type, cl, Optional.ofNullable(previousSchema));
}
if (GenericRecord.class.isAssignableFrom(type)) {
return fromGeneric(cl, currentSchema);
}
return fromReflective(type, cl, Optional.ofNullable(previousSchema));
} | 3.26 |
flink_AvroFactory_getSpecificDataForClass_rdh | /**
* Creates a {@link SpecificData} object for a given class. Possibly uses the specific data from
* the generated class with logical conversions applied (avro >= 1.9.x).
*
* <p>Copied over from {@code SpecificData#getForClass(Class<T> c)} we do not use the method
* directly, because we want to be API backwards compatible with older Avro versions which did
* not have this method
*/
public static <T extends SpecificData> SpecificData getSpecificDataForClass(Class<T> type, ClassLoader cl) {
try {
Field specificDataField = type.getDeclaredField("MODEL$");
specificDataField.setAccessible(true);
return ((SpecificData) (specificDataField.get(((Object) (null)))));} catch (IllegalAccessException e) {
throw new FlinkRuntimeException("Could not access the MODEL$ field of avro record", e);
} catch (NoSuchFieldException e) {
return new SpecificData(cl);
}
} | 3.26 |
flink_SelectByMinFunction_reduce_rdh | /**
* Reduce implementation, returns smaller tuple or value1 if both tuples are equal. Comparison
* highly depends on the order and amount of fields chosen as indices. All given fields (at
* construction time) are checked in the same order as defined (at construction time). If both
* tuples are equal in one index, the next index is compared. Or if no next index is available
* value1 is returned. The tuple which has a smaller value at one index will be returned.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public T reduce(T value1, T value2) throws Exception {
for (int position : fields) {
// Save position of compared key
// Get both values - both implement comparable
Comparable comparable1 = value1.getFieldNotNull(position);
Comparable comparable2 =
value2.getFieldNotNull(position);
// Compare values
int comp = comparable1.compareTo(comparable2);
// If comp is smaller than 0 comparable 1 is smaller.
// Return the smaller value.
if (comp < 0) {return value1;
} else if
(comp > 0) {
return value2;
}
}
return value1;
} | 3.26 |
flink_LookupFunctionProvider_of_rdh | /**
* Helper function for creating a static provider.
*/
static LookupFunctionProvider of(LookupFunction lookupFunction) {
return () -> lookupFunction;
} | 3.26 |
flink_ReadOnlySlicedNetworkBuffer_getMemorySegment_rdh | /**
* Returns the underlying memory segment.
*
* <p><strong>BEWARE:</strong> Although we cannot set the memory segment read-only it should be
* handled as if it was!.
*
* @return the memory segment backing this buffer
*/
@Override public
MemorySegment getMemorySegment() {
return getBuffer().getMemorySegment();
} | 3.26 |
flink_ArchivedExecution_getAttemptId_rdh | // --------------------------------------------------------------------------------------------
// Accessors
// --------------------------------------------------------------------------------------------
@Override
public ExecutionAttemptID getAttemptId() {
return attemptId;
} | 3.26 |
flink_StopWithSavepointTerminationHandlerImpl_terminateSuccessfully_rdh | /**
* Handles the successful termination of the {@code StopWithSavepointTerminationHandler}.
*
* @param completedSavepoint
* the completed savepoint
*/
private void terminateSuccessfully(CompletedCheckpoint completedSavepoint) {
result.complete(completedSavepoint.getExternalPointer());
} | 3.26 |
flink_StopWithSavepointTerminationHandlerImpl_terminateExceptionally_rdh | /**
* Handles the termination of the {@code StopWithSavepointTerminationHandler} exceptionally
* without triggering a global job fail-over but restarting the checkpointing. It does restart
* the checkpoint scheduling.
*
* @param throwable
* the error that caused the exceptional termination.
*/
private void terminateExceptionally(Throwable throwable) {
checkpointScheduling.startCheckpointScheduler();
result.completeExceptionally(throwable);
} | 3.26 |
flink_StopWithSavepointTerminationHandlerImpl_terminateExceptionallyWithGlobalFailover_rdh | /**
* Handles the termination of the {@code StopWithSavepointTerminationHandler} exceptionally
* after triggering a global job fail-over.
*
* @param unfinishedExecutionStates
* the unfinished states that caused the failure.
* @param savepointPath
* the path to the successfully created savepoint.
*/
private void terminateExceptionallyWithGlobalFailover(Iterable<ExecutionState> unfinishedExecutionStates, String savepointPath) {
StopWithSavepointStoppingException inconsistentFinalStateException = new StopWithSavepointStoppingException(savepointPath, jobId);log.warn("Inconsistent execution state after stopping with savepoint. At least one" + " execution is still in one of the following states: {}.", StringUtils.join(unfinishedExecutionStates, ", "), inconsistentFinalStateException);
scheduler.handleGlobalFailure(inconsistentFinalStateException);
result.completeExceptionally(inconsistentFinalStateException);
} | 3.26 |
flink_DeclarativeSlotManager_checkInit_rdh | // ---------------------------------------------------------------------------------------------
// Internal utility methods
// ---------------------------------------------------------------------------------------------
private void checkInit() {
Preconditions.checkState(started, "The slot manager has not been started.");
} | 3.26 |
flink_DeclarativeSlotManager_start_rdh | // ---------------------------------------------------------------------------------------------
// Component lifecycle methods
// ---------------------------------------------------------------------------------------------
/**
* Starts the slot manager with the given leader id and resource manager actions.
*
* @param newResourceManagerId
* to use for communication with the task managers
* @param newMainThreadExecutor
* to use to run code in the ResourceManager's main thread
* @param newResourceAllocator
* to use for resource (de-)allocations
* @param newBlockedTaskManagerChecker
* to query whether a task manager is blocked
*/
@Override
public void start(ResourceManagerId newResourceManagerId, Executor newMainThreadExecutor, ResourceAllocator newResourceAllocator, ResourceEventListener newResourceEventListener, BlockedTaskManagerChecker newBlockedTaskManagerChecker) {
LOG.debug("Starting the slot manager.");
this.resourceManagerId = Preconditions.checkNotNull(newResourceManagerId);
mainThreadExecutor = Preconditions.checkNotNull(newMainThreadExecutor);
resourceEventListener = Preconditions.checkNotNull(newResourceEventListener);taskExecutorManager = taskExecutorManagerFactory.apply(newMainThreadExecutor, newResourceAllocator);
blockedTaskManagerChecker = Preconditions.checkNotNull(newBlockedTaskManagerChecker);
started = true;
registerSlotManagerMetrics();
} | 3.26 |
flink_DeclarativeSlotManager_checkResourceRequirementsWithDelay_rdh | // ---------------------------------------------------------------------------------------------
// Requirement matching
// ---------------------------------------------------------------------------------------------
/**
* Depending on the implementation of {@link ResourceAllocationStrategy}, checking resource
* requirements and potentially making a re-allocation can be heavy. In order to cover more
* changes with each check, thus reduce the frequency of unnecessary re-allocations, the checks
* are performed with a slight delay.
*/
private void checkResourceRequirementsWithDelay() {
if (requirementsCheckDelay.toMillis() <= 0) {
checkResourceRequirements();
}
else if ((requirementsCheckFuture == null) || requirementsCheckFuture.isDone()) {
requirementsCheckFuture = new CompletableFuture<>();
scheduledExecutor.schedule(() -> mainThreadExecutor.execute(() -> {
checkResourceRequirements();
Preconditions.checkNotNull(requirementsCheckFuture).complete(null);
}), requirementsCheckDelay.toMillis(), TimeUnit.MILLISECONDS);
}
} | 3.26 |
flink_DeclarativeSlotManager_reportSlotStatus_rdh | /**
* Reports the current slot allocations for a task manager identified by the given instance id.
*
* @param instanceId
* identifying the task manager for which to report the slot status
* @param slotReport
* containing the status for all of its slots
* @return true if the slot status has been updated successfully, otherwise false
*/
@Override
public boolean reportSlotStatus(InstanceID instanceId, SlotReport slotReport) {
checkInit();
LOG.debug("Received slot report from instance {}: {}.", instanceId, slotReport);
if (taskExecutorManager.isTaskManagerRegistered(instanceId)) {
if (slotTracker.notifySlotStatus(slotReport)) {
checkResourceRequirementsWithDelay();
}
return true;
} else {
LOG.debug("Received slot report for unknown task manager with instance id {}. Ignoring this report.", instanceId);
return false;
}
} | 3.26 |
flink_DeclarativeSlotManager_close_rdh | /**
* Closes the slot manager.
*
* @throws Exception
* if the close operation fails
*/
@Override
public void close() throws Exception {
LOG.info("Closing the slot manager.");
suspend();
} | 3.26 |
flink_DeclarativeSlotManager_freeSlot_rdh | /**
* Free the given slot from the given allocation. If the slot is still allocated by the given
* allocation id, then the slot will be marked as free and will be subject to new slot requests.
*
* @param slotId
* identifying the slot to free
* @param allocationId
* with which the slot is presumably allocated
*/
@Override
public void freeSlot(SlotID slotId, AllocationID allocationId) {
checkInit();
LOG.debug("Freeing slot {}.", slotId);
slotTracker.notifyFree(slotId);
checkResourceRequirementsWithDelay();
} | 3.26 |
flink_DeclarativeSlotManager_clearResourceRequirements_rdh | // ---------------------------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------------------------
@Override
public void clearResourceRequirements(JobID jobId) {
checkInit();
maybeReclaimInactiveSlots(jobId);
jobMasterTargetAddresses.remove(jobId);
resourceTracker.notifyResourceRequirements(jobId, Collections.emptyList());
} | 3.26 |
flink_DeclarativeSlotManager_getNumberRegisteredSlots_rdh | // ---------------------------------------------------------------------------------------------
// Legacy APIs
// ---------------------------------------------------------------------------------------------
@Override
public int getNumberRegisteredSlots() {
return taskExecutorManager.getNumberRegisteredSlots();
} | 3.26 |
flink_DeclarativeSlotManager_checkResourceRequirements_rdh | /**
* Matches resource requirements against available resources. In a first round requirements are
* matched against free slot, and any match results in a slot allocation. The remaining
* unfulfilled requirements are matched against pending slots, allocating more workers if no
* matching pending slot could be found. If the requirements for a job could not be fulfilled
* then a notification is sent to the job master informing it as such.
*
* <p>Performance notes: At it's core this method loops, for each job, over all free/pending
* slots for each required slot, trying to find a matching slot. One should generally go in with
* the assumption that this runs in numberOfJobsRequiringResources * numberOfRequiredSlots *
* numberOfFreeOrPendingSlots. This is especially important when dealing with pending slots, as
* matches between requirements and pending slots are not persisted and recomputed on each call.
* This may required further refinements in the future; e.g., persisting the matches between
* requirements and pending slots, or not matching against pending slots at all.
*
* <p>When dealing with unspecific resource profiles (i.e., {@link ResourceProfile#ANY}/{@link ResourceProfile#UNKNOWN}), then the number of free/pending slots is not relevant because we
* only need exactly 1 comparison to determine whether a slot can be fulfilled or not, since
* they are all the same anyway.
*
* <p>When dealing with specific resource profiles things can be a lot worse, with the classical
* cases where either no matches are found, or only at the very end of the iteration. In the
* absolute worst case, with J jobs, requiring R slots each with a unique resource profile such
* each pair of these profiles is not matching, and S free/pending slots that don't fulfill any
* requirement, then this method does a total of J*R*S resource profile comparisons.
*
* <p>DO NOT call this method directly. Use {@link #checkResourceRequirementsWithDelay()}
* instead.
*/
private void checkResourceRequirements() {
final Map<JobID, Collection<ResourceRequirement>> missingResources = resourceTracker.getMissingResources();
if (missingResources.isEmpty()) {
taskExecutorManager.clearPendingTaskManagerSlots();
return;
}
final Map<JobID, ResourceCounter> unfulfilledRequirements = new LinkedHashMap<>();
for
(Map.Entry<JobID, Collection<ResourceRequirement>> resourceRequirements : missingResources.entrySet()) {
final JobID jobId = resourceRequirements.getKey();
final ResourceCounter unfulfilledJobRequirements = tryAllocateSlotsForJob(jobId, resourceRequirements.getValue());
if (!unfulfilledJobRequirements.isEmpty()) {
unfulfilledRequirements.put(jobId, unfulfilledJobRequirements);
}
}if (unfulfilledRequirements.isEmpty()) {
return;}
ResourceCounter freePendingSlots = ResourceCounter.withResources(taskExecutorManager.getPendingTaskManagerSlots().stream().collect(Collectors.groupingBy(PendingTaskManagerSlot::getResourceProfile, Collectors.summingInt(x -> 1))));
for (Map.Entry<JobID, ResourceCounter> unfulfilledRequirement : unfulfilledRequirements.entrySet()) {
freePendingSlots = tryFulfillRequirementsWithPendingSlots(unfulfilledRequirement.getKey(), unfulfilledRequirement.getValue().getResourcesWithCount(), freePendingSlots);
}
if (!freePendingSlots.isEmpty()) {
taskExecutorManager.removePendingTaskManagerSlots(freePendingSlots);
}
} | 3.26 |
flink_DeclarativeSlotManager_suspend_rdh | /**
* Suspends the component. This clears the internal state of the slot manager.
*/
@Override
public void suspend() {
if (!started) {return;
}
LOG.info("Suspending the slot manager.");
slotManagerMetricGroup.close();
resourceTracker.clear();
if (taskExecutorManager != null) {
taskExecutorManager.close();
for (InstanceID registeredTaskManager : taskExecutorManager.getTaskExecutors()) {
unregisterTaskManager(registeredTaskManager, new SlotManagerException("The slot manager is being suspended."));
}
}
taskExecutorManager = null;
resourceManagerId = null;
resourceEventListener = null;
blockedTaskManagerChecker = null;
started = false;
} | 3.26 |
flink_DeclarativeSlotManager_internalTryAllocateSlots_rdh | /**
* Tries to allocate slots for the given requirement. If there are not enough slots available,
* the resource manager is informed to allocate more resources.
*
* @param jobId
* job to allocate slots for
* @param targetAddress
* address of the jobmaster
* @param resourceRequirement
* required slots
* @return the number of missing slots
*/
private int internalTryAllocateSlots(JobID jobId, String targetAddress, ResourceRequirement resourceRequirement) {
final ResourceProfile v15 = resourceRequirement.getResourceProfile();
// Use LinkedHashMap to retain the original order
final Map<SlotID, TaskManagerSlotInformation> availableSlots = new LinkedHashMap<>();
for (TaskManagerSlotInformation freeSlot : slotTracker.getFreeSlots()) {
if (!isBlockedTaskManager(freeSlot.getTaskManagerConnection().getResourceID()))
{
availableSlots.put(freeSlot.getSlotId(), freeSlot);
}
}
int numUnfulfilled = 0;
for (int v19 = 0; v19 < resourceRequirement.getNumberOfRequiredSlots(); v19++) {
final Optional<TaskManagerSlotInformation> reservedSlot = slotMatchingStrategy.findMatchingSlot(v15, availableSlots.values(), this::getNumberRegisteredSlotsOf); if (reservedSlot.isPresent()) {
allocateSlot(reservedSlot.get(), jobId, targetAddress, v15);
availableSlots.remove(reservedSlot.get().getSlotId());
} else {
// exit loop early; we won't find a matching slot for this requirement
int numRemaining = resourceRequirement.getNumberOfRequiredSlots() - v19;
numUnfulfilled += numRemaining;
break;
}
}
return numUnfulfilled;
} | 3.26 |
flink_DeclarativeSlotManager_allocateSlot_rdh | /**
* Allocates the given slot. This entails sending a registration message to the task manager and
* treating failures.
*
* @param taskManagerSlot
* slot to allocate
* @param jobId
* job for which the slot should be allocated for
* @param targetAddress
* address of the job master
* @param resourceProfile
* resource profile for the requirement for which the slot is used
*/private void allocateSlot(TaskManagerSlotInformation taskManagerSlot,
JobID jobId, String targetAddress, ResourceProfile resourceProfile) {
final SlotID slotId = taskManagerSlot.getSlotId();
LOG.debug("Starting allocation of slot {} for job {} with resource profile {}.", slotId, jobId, resourceProfile);
final InstanceID instanceId = taskManagerSlot.getInstanceId();
if (!taskExecutorManager.isTaskManagerRegistered(instanceId)) {
throw new IllegalStateException(("Could not find a registered task manager for instance id " + instanceId) + '.');
}
final TaskExecutorConnection taskExecutorConnection = taskManagerSlot.getTaskManagerConnection();
final TaskExecutorGateway gateway = taskExecutorConnection.getTaskExecutorGateway();
final AllocationID allocationId = new AllocationID();
slotTracker.notifyAllocationStart(slotId, jobId);
taskExecutorManager.markUsed(instanceId);
pendingSlotAllocations.put(slotId, allocationId);
// RPC call to the task manager
CompletableFuture<Acknowledge> requestFuture = gateway.requestSlot(slotId, jobId, allocationId,
resourceProfile, targetAddress, resourceManagerId, taskManagerRequestTimeout);
CompletableFuture<Void> slotAllocationResponseProcessingFuture = requestFuture.handleAsync((Acknowledge acknowledge,Throwable throwable) -> {
final AllocationID currentAllocationForSlot = pendingSlotAllocations.get(slotId);
if ((currentAllocationForSlot == null) || (!currentAllocationForSlot.equals(allocationId))) {
LOG.debug("Ignoring slot allocation update from task executor {} for slot {} and job {}, because the allocation was already completed or cancelled.", instanceId, slotId, jobId);
return null;
}
if (acknowledge != null) {
LOG.trace("Completed allocation of slot {} for job {}.", slotId, jobId);
slotTracker.notifyAllocationComplete(slotId, jobId);
} else {
if (throwable instanceof
SlotOccupiedException) {
SlotOccupiedException exception = ((SlotOccupiedException) (throwable));
LOG.debug("Tried allocating slot {} for job {}, but it was already allocated for job {}.", slotId, jobId, exception.getJobId());
// report as a slot status to force the state transition
// this could be a problem if we ever assume that the task
// executor always reports about all slots
slotTracker.notifySlotStatus(Collections.singleton(new SlotStatus(slotId, taskManagerSlot.getResourceProfile(), exception.getJobId(), exception.getAllocationId())));
} else {
LOG.warn("Slot allocation for slot {} for job {} failed.", slotId, jobId,
throwable);
slotTracker.notifyFree(slotId);
}
checkResourceRequirementsWithDelay();
}
return null;
}, mainThreadExecutor);
FutureUtils.assertNoException(slotAllocationResponseProcessingFuture);
} | 3.26 |
flink_FrontMetricGroup_getLogicalScope_rdh | /**
*
* @deprecated work against the LogicalScopeProvider interface instead.
*/
@Override
@Deprecated
public String getLogicalScope(CharacterFilter filter, char delimiter) {
return parentMetricGroup.getLogicalScope(getDelimiterFilter(this.settings, filter), delimiter, this.settings.getReporterIndex());
} | 3.26 |
flink_SchemaTestUtils_open_rdh | /**
* Opens the given schema with a mock initialization context.
*
* @param schema
* to open
* @throws RuntimeException
* if the schema throws an exception
*/
public static void open(DeserializationSchema<?> schema) {
try {
schema.open(new DummyInitializationContext());
} catch (Exception e) {throw new RuntimeException(e);
}
} | 3.26 |
flink_TableSinkFactory_createTableSink_rdh | /**
* Creates and configures a {@link TableSink} based on the given {@link Context}.
*
* @param context
* context of this table sink.
* @return the configured table sink.
*/
default TableSink<T> createTableSink(Context context) {return createTableSink(context.getObjectIdentifier().toObjectPath(), context.getTable());
} | 3.26 |
flink_DataStreamSink_name_rdh | /**
* Sets the name of this sink. This name is used by the visualization and logging during
* runtime.
*
* @return The named sink.
*/
public DataStreamSink<T>
name(String name) {
transformation.setName(name);
return
this;
} | 3.26 |
flink_DataStreamSink_disableChaining_rdh | /**
* Turns off chaining for this operator so thread co-location will not be used as an
* optimization.
*
* <p>Chaining can be turned off for the whole job by {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#disableOperatorChaining()}
* however it is not advised for performance considerations.
*
* @return The sink with chaining disabled
*/
@PublicEvolving
public DataStreamSink<T> disableChaining() {
this.transformation.setChainingStrategy(ChainingStrategy.NEVER);
return this;
}
/**
* Sets the slot sharing group of this operation. Parallel instances of operations that are in
* the same slot sharing group will be co-located in the same TaskManager slot, if possible.
*
* <p>Operations inherit the slot sharing group of input operations if all input operations are
* in the same slot sharing group and no slot sharing group was explicitly specified.
*
* <p>Initially an operation is in the default slot sharing group. An operation can be put into
* the default group explicitly by setting the slot sharing group to {@code "default"} | 3.26 |
flink_DataStreamSink_setDescription_rdh | /**
* Sets the description for this sink.
*
* <p>Description is used in json plan and web ui, but not in logging and metrics where only
* name is available. Description is expected to provide detailed information about the sink,
* while name is expected to be more simple, providing summary information only, so that we can
* have more user-friendly logging messages and metric tags without losing useful messages for
* debugging.
*
* @param description
* The description for this sink.
* @return The sink with new description.
*/
@PublicEvolving
public DataStreamSink<T> setDescription(String description) {
transformation.setDescription(description);
return this;
} | 3.26 |
flink_DataStreamSink_setParallelism_rdh | /**
* Sets the parallelism for this sink. The degree must be higher than zero.
*
* @param parallelism
* The parallelism for this sink.
* @return The sink with set parallelism.
*/
public DataStreamSink<T> setParallelism(int parallelism) {
transformation.setParallelism(parallelism);
return this;
} | 3.26 |
flink_DataStreamSink_uid_rdh | /**
* Sets an ID for this operator.
*
* <p>The specified ID is used to assign the same operator ID across job submissions (for
* example when starting a job from a savepoint).
*
* <p><strong>Important</strong>: this ID needs to be unique per transformation and job.
* Otherwise, job submission will fail.
*
* @param uid
* The unique user-specified ID of this transformation.
* @return The operator with the specified ID.
*/
@PublicEvolving
public DataStreamSink<T> uid(String uid) {
transformation.setUid(uid);
return this;
} | 3.26 |
flink_DataStreamSink_setUidHash_rdh | /**
* Sets an user provided hash for this operator. This will be used AS IS the create the
* JobVertexID.
*
* <p>The user provided hash is an alternative to the generated hashes, that is considered when
* identifying an operator through the default hash mechanics fails (e.g. because of changes
* between Flink versions).
*
* <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting.
* The provided hash needs to be unique per transformation and job. Otherwise, job submission
* will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an
* operator chain and trying so will let your job fail.
*
* <p>A use case for this is in migration between Flink versions or changing the jobs in a way
* that changes the automatically generated hashes. In this case, providing the previous hashes
* directly through this method (e.g. obtained from old logs) can help to reestablish a lost
* mapping from states to their target operator.
*
* @param uidHash
* The user provided hash for this operator. This will become the JobVertexID,
* which is shown in the logs and web ui.
* @return The operator with the user provided hash.
*/
@PublicEvolving
public DataStreamSink<T> setUidHash(String uidHash) {
if (!(transformation instanceof LegacySinkTransformation)) {
throw new UnsupportedOperationException("Cannot set a custom UID hash on a non-legacy sink");
}
transformation.setUidHash(uidHash);
return this;
} | 3.26 |
flink_DataStreamSink_setMaxParallelism_rdh | /**
* Sets the max parallelism for this sink.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. The degree must be
* higher than zero and less than the upper bound.
*
* @param maxParallelism
* The max parallelism for this sink.
* @return The sink with set parallelism.
*/
public DataStreamSink<T> setMaxParallelism(int maxParallelism) {
OperatorValidationUtils.validateMaxParallelism(maxParallelism, true);
transformation.setMaxParallelism(maxParallelism);
return this;
} | 3.26 |
flink_DataStreamSink_setResources_rdh | /**
* Sets the resources for this sink, the minimum and preferred resources are the same by
* default.
*
* @param resources
* The resources for this sink.
* @return The sink with set minimum and preferred resources.
*/
private DataStreamSink<T> setResources(ResourceSpec resources) {
transformation.setResources(resources, resources);
return this;
} | 3.26 |
flink_DataStreamSink_m1_rdh | /**
* Returns the transformation that contains the actual sink operator of this sink.
*/
@Internal
public Transformation<T> m1() {
return transformation;
} | 3.26 |
flink_DataStreamSink_slotSharingGroup_rdh | /**
* Sets the slot sharing group of this operation. Parallel instances of operations that are in
* the same slot sharing group will be co-located in the same TaskManager slot, if possible.
*
* <p>Operations inherit the slot sharing group of input operations if all input operations are
* in the same slot sharing group and no slot sharing group was explicitly specified.
*
* <p>Initially an operation is in the default slot sharing group. An operation can be put into
* the default group explicitly by setting the slot sharing group with name {@code "default"}.
*
* @param slotSharingGroup
* which contains name and its resource spec.
*/
@PublicEvolving
public DataStreamSink<T> slotSharingGroup(SlotSharingGroup
slotSharingGroup) {
transformation.setSlotSharingGroup(slotSharingGroup);
return this;
} | 3.26 |
flink_VertexFlameGraph_waiting_rdh | // Indicates that it is waiting for the first samples to creating the flame graph
public static VertexFlameGraph waiting() {
return new VertexFlameGraph(-3, null);} | 3.26 |
flink_PackagingTestUtils_assertJarContainsServiceEntry_rdh | /**
* Verifies that the given jar contains a service entry file for the given service.
*
* <p>Caution: This only checks that the file exists; the content is not verified.
*/
public static void assertJarContainsServiceEntry(Path jarPath, Class<?> service) throws Exception {
final URI jar = jarPath.toUri();
try (final FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:file", jar.getHost(), jar.getPath(), jar.getFragment()), Collections.emptyMap())) {
assertThat(fileSystem.getPath("META-INF", "services", service.getName())).exists();
}
} | 3.26 |
flink_PackagingTestUtils_assertJarContainsOnlyFilesMatching_rdh | /**
* Verifies that all files in the jar match one of the provided allow strings.
*
* <p>An allow item ending on a {@code "/"} is treated as an allowed parent directory.
* Otherwise, it is treated as an allowed file.
*
* <p>For example, given a jar containing a file {@code META-INF/NOTICES}:
*
* <p>These would pass:
*
* <ul>
* <li>{@code "META-INF/"}
* <li>{@code "META-INF/NOTICES"}
* </ul>
*
* <p>These would fail:
*
* <ul>
* <li>{@code "META-INF"}
* <li>{@code "META-INF/NOTICE"}
* <li>{@code "META-INF/NOTICES/"}
* </ul>
*/
public static void assertJarContainsOnlyFilesMatching(Path jarPath, Collection<String> allowedPaths) throws Exception {
final URI jar = jarPath.toUri();
try (final FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:file", jar.getHost(), jar.getPath(), jar.getFragment()), Collections.emptyMap())) {
try (Stream<Path> walk = Files.walk(fileSystem.getPath("/"))) {
walk.filter(file -> !Files.isDirectory(file)).map(file -> file.toAbsolutePath().toString()).map(file -> file.startsWith("/") ? file.substring(1) : file).forEach(file -> assertThat(allowedPaths).as("Bad file in JAR: %s", file).anySatisfy(allowedPath -> {
if (allowedPath.endsWith("/")) {
assertThat(file).startsWith(allowedPath);
} else {
assertThat(file).isEqualTo(allowedPath);
}}));
}
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.