name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Transition_eventType_rdh | // ------------------------------------------------------------------------
public EventType eventType() {
return eventType;
} | 3.26 |
flink_AbstractMapTypeInfo_getValueTypeInfo_rdh | /**
* Returns the type information for the values in the map.
*
* @return The type information for the values in the map.
*/
public TypeInformation<V> getValueTypeInfo() {return valueTypeInfo;
} | 3.26 |
flink_AbstractMapTypeInfo_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}
AbstractMapTypeInfo<?, ?, ?> that =
((AbstractMapTypeInfo<?, ?, ?>) (o));
return keyTypeInfo.equals(that.keyTypeInfo) && valueTypeInfo.equals(that.valueTypeInfo);
} | 3.26 |
flink_AbstractMapTypeInfo_isBasicType_rdh | // ------------------------------------------------------------------------
@Override
public boolean isBasicType() {
return false;
} | 3.26 |
flink_AbstractMapTypeInfo_getKeyTypeInfo_rdh | // ------------------------------------------------------------------------
/**
* Returns the type information for the keys in the map.
*
* @return The type information for the keys in the map.
*/public TypeInformation<K> getKeyTypeInfo() {
return keyTypeInfo;
} | 3.26 |
flink_DeclineCheckpoint_getSerializedCheckpointException_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the reason why the checkpoint was declined.
*
* @return The reason why the checkpoint was declined
*/
@Nonnull
public SerializedCheckpointException getSerializedCheckpointException() {
return serializedCheckpointException;
} | 3.26 |
flink_DeclineCheckpoint_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return String.format("Declined Checkpoint %d for (%s/%s): %s", getCheckpointId(), getJob(), getTaskExecutionId(), serializedCheckpointException.getCheckpointFailureReason());
} | 3.26 |
flink_FunctionAnnotation_readDualForwardAnnotations_rdh | // --------------------------------------------------------------------------------------------
/**
* Reads the annotations of a user defined function with two inputs and returns semantic
* properties according to the forwarded fields annotated.
*
* @param udfClass
* The user defined function, represented by its class.
* @return The DualInputSemanticProperties containing the forwarded fields.
*/
@Internal
public static Set<Annotation> readDualForwardAnnotations(Class<?> udfClass) {
// get readSet annotation from stub
ForwardedFieldsFirst forwardedFields1 = udfClass.getAnnotation(FunctionAnnotation.ForwardedFieldsFirst.class);
ForwardedFieldsSecond forwardedFields2 = udfClass.getAnnotation(FunctionAnnotation.ForwardedFieldsSecond.class);
// get readSet annotation from stub
NonForwardedFieldsFirst nonForwardedFields1 = udfClass.getAnnotation(FunctionAnnotation.NonForwardedFieldsFirst.class);
NonForwardedFieldsSecond nonForwardedFields2 = udfClass.getAnnotation(FunctionAnnotation.NonForwardedFieldsSecond.class);
ReadFieldsFirst readSet1 = udfClass.getAnnotation(FunctionAnnotation.ReadFieldsFirst.class);
ReadFieldsSecond readSet2 = udfClass.getAnnotation(FunctionAnnotation.ReadFieldsSecond.class);
Set<Annotation> annotations = new HashSet<Annotation>();
if ((nonForwardedFields1 != null) && (forwardedFields1 != null)) {
throw new InvalidProgramException(((("Either " + FunctionAnnotation.ForwardedFieldsFirst.class.getSimpleName()) + " or ") + FunctionAnnotation.NonForwardedFieldsFirst.class.getSimpleName()) + " can be annotated to a function, not both.");
} else if (forwardedFields1 != null) {
annotations.add(forwardedFields1);
} else if (nonForwardedFields1 != null) {
annotations.add(nonForwardedFields1);}
if ((forwardedFields2 != null) && (nonForwardedFields2 != null)) {
throw new InvalidProgramException(((("Either " + FunctionAnnotation.ForwardedFieldsSecond.class.getSimpleName()) + " or ") + FunctionAnnotation.NonForwardedFieldsSecond.class.getSimpleName()) + " can be annotated to a function, not both.");
} else if (forwardedFields2 != null) {
annotations.add(forwardedFields2);
} else if (nonForwardedFields2 != null) {
annotations.add(nonForwardedFields2);
}
if (readSet1 != null) {
annotations.add(readSet1);
}
if (readSet2 != null) {
annotations.add(readSet2);
}
return !annotations.isEmpty() ? annotations : null;
} | 3.26 |
flink_FunctionAnnotation_readSingleForwardAnnotations_rdh | // --------------------------------------------------------------------------------------------
// Function Annotation Handling
// --------------------------------------------------------------------------------------------
/**
* Reads the annotations of a user defined function with one input and returns semantic
* properties according to the forwarded fields annotated.
*
* @param udfClass
* The user defined function, represented by its class.
* @return The DualInputSemanticProperties containing the forwarded fields.
*/
@Internal
public static Set<Annotation> readSingleForwardAnnotations(Class<?> udfClass) {
ForwardedFields forwardedFields = udfClass.getAnnotation(FunctionAnnotation.ForwardedFields.class);
NonForwardedFields nonForwardedFields = udfClass.getAnnotation(FunctionAnnotation.NonForwardedFields.class);
ReadFields readSet = udfClass.getAnnotation(FunctionAnnotation.ReadFields.class);
Set<Annotation> annotations = new HashSet<Annotation>();
if (forwardedFields != null) {
annotations.add(forwardedFields);
}
if (nonForwardedFields != null) {
if (!annotations.isEmpty()) {
throw new InvalidProgramException(((("Either " + FunctionAnnotation.ForwardedFields.class.getSimpleName())
+ " or ") + FunctionAnnotation.NonForwardedFields.class.getSimpleName()) + " can be annotated to a function, not both.");
}
annotations.add(nonForwardedFields);
}
if (readSet != null) {
annotations.add(readSet);
} return !annotations.isEmpty() ? annotations : null;
} | 3.26 |
flink_PatternStreamBuilder_build_rdh | /**
* Creates a data stream containing results of {@link PatternProcessFunction} to fully matching
* event patterns.
*
* @param processFunction
* function to be applied to matching event sequences
* @param outTypeInfo
* output TypeInformation of {@link PatternProcessFunction#processMatch(Map,
* PatternProcessFunction.Context, Collector)}
* @param <OUT>
* type of output events
* @return Data stream containing fully matched event sequence with applied {@link PatternProcessFunction}
*/
<OUT, K> SingleOutputStreamOperator<OUT> build(final TypeInformation<OUT> outTypeInfo, final
PatternProcessFunction<IN, OUT> processFunction)
{
checkNotNull(outTypeInfo);
checkNotNull(processFunction);
final TypeSerializer<IN> inputSerializer = inputStream.getType().createSerializer(inputStream.getExecutionConfig()); final boolean isProcessingTime = timeBehaviour == TimeBehaviour.ProcessingTime;
final boolean timeoutHandling = processFunction instanceof TimedOutPartialMatchHandler;
final NFACompiler.NFAFactory<IN> nfaFactory = NFACompiler.compileFactory(pattern, timeoutHandling);
final CepOperator<IN, K, OUT> v4 = new CepOperator<>(inputSerializer, isProcessingTime, nfaFactory, comparator, pattern.getAfterMatchSkipStrategy(), processFunction, lateDataOutputTag);
final SingleOutputStreamOperator<OUT> patternStream;
if (inputStream instanceof KeyedStream) {
KeyedStream<IN, K> keyedStream = ((KeyedStream<IN, K>) (inputStream));patternStream = keyedStream.transform("CepOperator", outTypeInfo, v4);
} else {
KeySelector<IN,
Byte> keySelector = new NullByteKeySelector<>();
patternStream = inputStream.keyBy(keySelector).transform("GlobalCepOperator", outTypeInfo, v4).forceNonParallel();
}
return patternStream;
} | 3.26 |
flink_PatternStreamBuilder_forStreamAndPattern_rdh | // ---------------------------------------- factory-like methods
// ---------------------------------------- //
static <IN> PatternStreamBuilder<IN> forStreamAndPattern(final DataStream<IN> inputStream, final Pattern<IN, ?> pattern) {
return new PatternStreamBuilder<>(inputStream, pattern, TimeBehaviour.EventTime, null, null);} | 3.26 |
flink_Acknowledge_get_rdh | /**
* Gets the singleton instance.
*
* @return The singleton instance.
*/
public static Acknowledge get() {
return INSTANCE;
} | 3.26 |
flink_Acknowledge_readResolve_rdh | /**
* Read resolve to preserve the singleton object property. (per best practices, this should have
* visibility 'protected')
*/
protected Object readResolve() throws ObjectStreamException {
return INSTANCE;
} | 3.26 |
flink_AbstractBinaryWriter_write_rdh | /**
* Writes the specified byte to this output stream. The general contract for <code>write
* </code> is that one byte is written to the output stream. The byte to be written is the
* eight low-order bits of the argument <code>b</code>. The 24 high-order bits of <code>b
* </code> are ignored.
*/
@Override
public void write(int b) throws IOException {
ensureCapacity(1);
segment.put(f0, ((byte) (b)));
f0 +=
1;
} | 3.26 |
flink_AbstractBinaryWriter_writeString_rdh | /**
* See {@link BinarySegmentUtils#readStringData(MemorySegment[], int, int, long)}.
*/
@Override
public void writeString(int pos, StringData input) {
BinaryStringData string = ((BinaryStringData) (input));
if (string.getSegments() == null) {
String javaObject = string.toString();
writeBytes(pos, javaObject.getBytes(StandardCharsets.UTF_8));
} else {
int len
= string.getSizeInBytes();
if (len <= 7) {
byte[] bytes = BinarySegmentUtils.allocateReuseBytes(len);
BinarySegmentUtils.copyToBytes(string.getSegments(), string.getOffset(), bytes, 0, len);
writeBytesToFixLenPart(segment, getFieldOffset(pos), bytes, len);
} else {
writeSegmentsToVarLenPart(pos, string.getSegments(), string.getOffset(), len);
}
}
} | 3.26 |
flink_ResourceManagerPartitionTrackerImpl_setHostedDataSetsAndCheckCorruption_rdh | /**
* Updates the data sets for which the given task executor is hosting partitions and returns
* data sets that were corrupted due to a loss of partitions.
*
* @param taskExecutorId
* ID of the hosting TaskExecutor
* @param reportEntries
* IDs of data sets for which partitions are hosted
* @return corrupted data sets
*/
private Set<IntermediateDataSetID> setHostedDataSetsAndCheckCorruption(ResourceID taskExecutorId, Collection<ClusterPartitionReport.ClusterPartitionReportEntry> reportEntries) {
final Set<IntermediateDataSetID> currentlyHostedDatasets = reportEntries.stream().map(ClusterPartitionReport.ClusterPartitionReportEntry::getDataSetId).collect(Collectors.toSet());
final Set<IntermediateDataSetID> previouslyHostedDataSets
= taskExecutorToDataSets.put(taskExecutorId, currentlyHostedDatasets);
// previously tracked data sets may be corrupted since we may be tracking less partitions
// than before
final Set<IntermediateDataSetID> v7 = Optional.ofNullable(previouslyHostedDataSets).orElse(new HashSet<>(0));
// update data set -> task executor mapping and find datasets for which lost a partition
reportEntries.forEach(hostedPartition -> {
final Map<ResourceID, Set<ResultPartitionID>> taskExecutorHosts = dataSetToTaskExecutors.computeIfAbsent(hostedPartition.getDataSetId(), ignored -> new HashMap<>());
final Set<ResultPartitionID> previouslyHostedPartitions =
taskExecutorHosts.put(taskExecutorId, hostedPartition.getHostedPartitions());
final boolean noPartitionLost = (previouslyHostedPartitions == null) || hostedPartition.getHostedPartitions().containsAll(previouslyHostedPartitions);
if (noPartitionLost) {
v7.remove(hostedPartition.getDataSetId());
}
});// now only contains data sets for which a partition is no longer tracked
return v7;
} | 3.26 |
flink_ResourceManagerPartitionTrackerImpl_areAllMapsEmpty_rdh | /**
* Returns whether all maps are empty; used for checking for resource leaks in case entries
* aren't properly removed.
*
* @return whether all contained maps are empty
*/
@VisibleForTesting
boolean areAllMapsEmpty() {
return ((taskExecutorToDataSets.isEmpty() && dataSetToTaskExecutors.isEmpty()) && dataSetMetaInfo.isEmpty()) && partitionReleaseCompletionFutures.isEmpty();
} | 3.26 |
flink_MailboxMetricsController_startLatencyMeasurement_rdh | /**
* Starts mailbox latency measurement. This requires setup of latency measurement via {@link MailboxMetricsController#setupLatencyMeasurement(TimerService, MailboxExecutor)}. Latency is
* measured through execution of a mail that is triggered by default in the interval defined by
* {@link MailboxMetricsController#defaultLatencyMeasurementInterval}.
*
* <p>Note: For each instance, latency measurement can be started only once.
*/
public void startLatencyMeasurement() {
checkState(!isLatencyMeasurementStarted(), "latency measurement has already been started");
checkState(isLatencyMeasurementSetup(), "timer service and mailbox executor must be setup for latency measurement");
scheduleLatencyMeasurement();
f0 = true;
} | 3.26 |
flink_MailboxMetricsController_isLatencyMeasurementStarted_rdh | /**
* Indicates if latency mesurement has been started.
*
* @return True if latency measurement has been started.
*/
public boolean isLatencyMeasurementStarted() {
return f0;
} | 3.26 |
flink_MailboxMetricsController_isLatencyMeasurementSetup_rdh | /**
* Indicates if latency measurement has been setup.
*
* @return True if latency measurement has been setup.
*/
public boolean isLatencyMeasurementSetup() {
return (this.timerService != null) && (this.mailboxExecutor != null);
}
/**
* Gets {@link Counter} for number of mails processed.
*
* @return {@link Counter} | 3.26 |
flink_MailboxMetricsController_setupLatencyMeasurement_rdh | /**
* Sets up latency measurement with required {@link TimerService} and {@link MailboxExecutor}.
*
* <p>Note: For each instance, latency measurement can be set up only once.
*
* @param timerService
* {@link TimerService} used for latency measurement.
* @param mailboxExecutor
* {@link MailboxExecutor} used for latency measurement.
*/
public void setupLatencyMeasurement(TimerService timerService, MailboxExecutor mailboxExecutor) {
checkState(!isLatencyMeasurementSetup(), "latency measurement has already been setup and cannot be setup twice");
this.timerService = timerService;
this.mailboxExecutor = mailboxExecutor;
} | 3.26 |
flink_ConfluentRegistryAvroSerializationSchema_forSpecific_rdh | /**
* Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from
* Avro schema and writes the writer schema to Confluent Schema Registry.
*
* @param tClass
* the type to be serialized
* @param subject
* subject of schema registry to produce
* @param schemaRegistryUrl
* URL of schema registry to connect
* @param registryConfigs
* map with additional schema registry configs (for example SSL
* properties)
* @return serialized record
*/
public static <T extends SpecificRecord> ConfluentRegistryAvroSerializationSchema<T> forSpecific(Class<T> tClass, String subject, String schemaRegistryUrl, @Nullable
Map<String, ?> registryConfigs) {
return new ConfluentRegistryAvroSerializationSchema<>(tClass, null, new CachedSchemaCoderProvider(subject, schemaRegistryUrl, DEFAULT_IDENTITY_MAP_CAPACITY, registryConfigs));
} | 3.26 |
flink_ConfluentRegistryAvroSerializationSchema_forGeneric_rdh | /**
* Creates {@link AvroSerializationSchema} that produces byte arrays that were generated from
* Avro schema and writes the writer schema to Confluent Schema Registry.
*
* @param subject
* subject of schema registry to produce
* @param schema
* schema that will be used for serialization
* @param schemaRegistryUrl
* URL of schema registry to connect
* @param registryConfigs
* map with additional schema registry configs (for example SSL
* properties)
* @return serialized record
*/
public static ConfluentRegistryAvroSerializationSchema<GenericRecord> forGeneric(String subject, Schema schema, String schemaRegistryUrl, @Nullable
Map<String, ?> registryConfigs) {
return new ConfluentRegistryAvroSerializationSchema<>(GenericRecord.class, schema, new CachedSchemaCoderProvider(subject, schemaRegistryUrl, DEFAULT_IDENTITY_MAP_CAPACITY, registryConfigs));
} | 3.26 |
flink_YarnLocalResourceDescriptor_toLocalResource_rdh | /**
* Transforms this local resource descriptor to a {@link LocalResource}.
*
* @return YARN resource
*/
public LocalResource toLocalResource() {
return Utils.registerLocalResource(path, size, modificationTime,
visibility, resourceType);
} | 3.26 |
flink_FunctionUtils_nullFn_rdh | /**
* Function which returns {@code null} (type: Void).
*
* @param <T>
* input type
* @return Function which returns {@code null}.
*/@SuppressWarnings("unchecked")
public static <T> Function<T, Void> nullFn() {
return ((Function<T, Void>) (f0));
} | 3.26 |
flink_FunctionUtils_asCallable_rdh | /**
* Converts {@link RunnableWithException} into a {@link Callable} that will return the {@code result}.
*/
public static <T> Callable<T> asCallable(RunnableWithException command, T result) {
return () -> {
command.run();
return result;
};
} | 3.26 |
flink_FunctionUtils_ignoreFn_rdh | /**
* Consumer which ignores the input.
*
* @param <T>
* type of the input
* @return Ignoring {@link Consumer}
*/
@SuppressWarnings("unchecked")
public static <T> Consumer<T> ignoreFn() {
return ((Consumer<T>) (IGNORE_FN));
} | 3.26 |
flink_FunctionUtils_uncheckedConsumer_rdh | /**
* Converts a {@link ThrowingConsumer} into a {@link Consumer} which throws checked exceptions
* as unchecked.
*
* @param throwingConsumer
* to convert into a {@link Consumer}
* @param <A>
* input type
* @return {@link Consumer} which throws all checked exceptions as unchecked
*/
public static <A> Consumer<A> uncheckedConsumer(ThrowingConsumer<A, ?> throwingConsumer) {
return (A value) -> {
try {
throwingConsumer.accept(value);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
}
/**
* Converts a {@link SupplierWithException} into a {@link Supplier} which throws all checked
* exceptions as unchecked.
*
* @param supplierWithException
* to convert into a {@link Supplier}
* @return {@link Supplier} | 3.26 |
flink_FunctionUtils_uncheckedFunction_rdh | /**
* Convert at {@link FunctionWithException} into a {@link Function}.
*
* @param functionWithException
* function with exception to convert into a function
* @param <A>
* input type
* @param <B>
* output type
* @return {@link Function} which throws all checked exception as an unchecked exception.
*/
public static <A, B> Function<A, B> uncheckedFunction(FunctionWithException<A, B, ?> functionWithException) {
return (A value) -> {
try {
return functionWithException.apply(value);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
// we need this to appease the compiler :-(
return null;
}};
} | 3.26 |
flink_SortedGrouping_first_rdh | /**
* Returns a new set containing the first n elements in this grouped and sorted {@link DataSet}.
*
* @param n
* The desired number of elements for each group.
* @return A GroupReduceOperator that represents the DataSet containing the elements.
*/
public GroupReduceOperator<T, T> first(int n) {
if (n < 1) {
throw
new InvalidProgramException("Parameter n of first(n) must be at least 1.");
}
return reduceGroup(new FirstReducer<T>(n));
} | 3.26 |
flink_SortedGrouping_withPartitioner_rdh | /**
* Uses a custom partitioner for the grouping.
*
* @param partitioner
* The custom partitioner.
* @return The grouping object itself, to allow for method chaining.
*/
public SortedGrouping<T> withPartitioner(Partitioner<?> partitioner) {
Preconditions.checkNotNull(partitioner);
getKeys().validateCustomPartitioner(partitioner,
null);
this.customPartitioner = partitioner;
return this;
} | 3.26 |
flink_SortedGrouping_reduceGroup_rdh | /**
* Applies a GroupReduce transformation on a grouped and sorted {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichGroupReduceFunction} for each group of the DataSet.
* A GroupReduceFunction can iterate over all elements of a group and emit any number of output
* elements including none.
*
* @param reducer
* The GroupReduceFunction that is applied on each group of the DataSet.
* @return A GroupReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichGroupReduceFunction
* @see GroupReduceOperator
* @see DataSet
*/
public <R> GroupReduceOperator<T, R> reduceGroup(GroupReduceFunction<T, R>
reducer) {
if (reducer == null) {
throw new NullPointerException("GroupReduce function must not be null.");
}
TypeInformation<R> resultType = TypeExtractor.getGroupReduceReturnTypes(reducer, inputDataSet.getType(), Utils.getCallLocationName(), true);
return new GroupReduceOperator<>(this, resultType, inputDataSet.clean(reducer), Utils.getCallLocationName());
} | 3.26 |
flink_SortedGrouping_sortGroup_rdh | /**
* Sorts {@link org.apache.flink.api.java.tuple.Tuple} or POJO elements within a group on the
* specified field in the specified {@link Order}.
*
* <p><b>Note: Only groups of Tuple or Pojo elements can be sorted.</b>
*
* <p>Groups can be sorted by multiple fields by chaining {@link #sortGroup(String, Order)}
* calls.
*
* @param field
* The Tuple or Pojo field on which the group is sorted.
* @param order
* The Order in which the specified field is sorted.
* @return A SortedGrouping with specified order of group element.
* @see org.apache.flink.api.java.tuple.Tuple
* @see Order
*/
public SortedGrouping<T> sortGroup(String field, Order order) {
if (groupSortSelectorFunctionKey != null) {
throw new InvalidProgramException("Chaining sortGroup with KeySelector sorting is not supported");
}
if
(!Keys.ExpressionKeys.isSortKey(field, inputDataSet.getType())) {
throw new InvalidProgramException("Selected sort key is not a sortable type");
}
ExpressionKeys<T> ek = new ExpressionKeys<>(field, inputDataSet.getType());
addSortGroupInternal(ek, order);
return this;
} | 3.26 |
flink_SortedGrouping_getGroupSortKeyPositions_rdh | // --------------------------------------------------------------------------------------------
protected int[] getGroupSortKeyPositions() {
return this.groupSortKeyPositions;
} | 3.26 |
flink_SortedGrouping_combineGroup_rdh | /**
* Applies a GroupCombineFunction on a grouped {@link DataSet}. A CombineFunction is similar to
* a GroupReduceFunction but does not perform a full data exchange. Instead, the CombineFunction
* calls the combine method once per partition for combining a group of results. This operator
* is suitable for combining values into an intermediate format before doing a proper
* groupReduce where the data is shuffled across the node for further reduction. The GroupReduce
* operator can also be supplied with a combiner by implementing the RichGroupReduce function.
* The combine method of the RichGroupReduce function demands input and output type to be the
* same. The CombineFunction, on the other side, can have an arbitrary output type.
*
* @param combiner
* The GroupCombineFunction that is applied on the DataSet.
* @return A GroupCombineOperator which represents the combined DataSet.
*/
public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) {
if (combiner == null) {
throw new NullPointerException("GroupCombine function must not be null.");}
TypeInformation<R> resultType = TypeExtractor.getGroupCombineReturnTypes(combiner, this.getInputDataSet().getType(), Utils.getCallLocationName(), true);
return new GroupCombineOperator<>(this, resultType, inputDataSet.clean(combiner), Utils.getCallLocationName());
} | 3.26 |
flink_GSRecoverableWriterCommitter_writeFinalBlob_rdh | /**
* Writes the final blob by composing the temporary blobs and copying, if necessary.
*/
private void writeFinalBlob() {
// do we have any component blobs?
List<GSBlobIdentifier> blobIdentifiers = recoverable.getComponentBlobIds(options);
if (blobIdentifiers.isEmpty()) {
// we have no blob identifiers, so just create an empty target blob
storage.createBlob(recoverable.finalBlobIdentifier);
} else {
// yes, we have component blobs. compose them into the final blob id. if the component
// blob ids are in the same bucket as the final blob id, this can be done directly.
// otherwise, we must compose to a new temporary blob id in the same bucket as the
// component blob ids and then copy that blob to the final blob location
String temporaryBucketName = BlobUtils.getTemporaryBucketName(recoverable.finalBlobIdentifier, options);
if (recoverable.finalBlobIdentifier.bucketName.equals(temporaryBucketName)) {
// compose directly to final blob
composeBlobs(recoverable.getComponentBlobIds(options), recoverable.finalBlobIdentifier);
} else {
// compose to the intermediate blob, then copy
UUID temporaryObjectId = UUID.randomUUID();
GSBlobIdentifier intermediateBlobIdentifier = BlobUtils.getTemporaryBlobIdentifier(recoverable.finalBlobIdentifier, temporaryObjectId, options);
composeBlobs(recoverable.getComponentBlobIds(options), intermediateBlobIdentifier);
storage.copy(intermediateBlobIdentifier, recoverable.finalBlobIdentifier);
}
}
} | 3.26 |
flink_GSRecoverableWriterCommitter_composeBlobs_rdh | /**
* Helper to compose an arbitrary number of blobs into a final blob, staying under the
* composeMaxBlobs limit for any individual compose operation.
*
* @param sourceBlobIdentifiers
* The source blob ids to compose
* @param targetBlobIdentifier
* The target blob id for the composed result
*/private void composeBlobs(List<GSBlobIdentifier> sourceBlobIdentifiers, GSBlobIdentifier targetBlobIdentifier) {
LOGGER.trace("Composing blobs {} to {} for commit with options {}", sourceBlobIdentifiers, targetBlobIdentifier, options);
Preconditions.checkNotNull(sourceBlobIdentifiers);
Preconditions.checkArgument(sourceBlobIdentifiers.size() > 0);
Preconditions.checkNotNull(targetBlobIdentifier);
// split the source list into two parts; first, the ones we can compose in this operation
// (up to composeMaxBlobs), and, second, whichever blobs are left over
final int composeToIndex = Math.min(composeMaxBlobs, sourceBlobIdentifiers.size());
List<GSBlobIdentifier> composeBlobIds = sourceBlobIdentifiers.subList(0, composeToIndex);
List<GSBlobIdentifier> remainingBlobIds = sourceBlobIdentifiers.subList(composeToIndex, sourceBlobIdentifiers.size());
// determine the resulting blob id for this compose operation. if this is the last compose,
// i.e. if there are no remaining blob ids, then the composed blob id is the originally
// specified target blob id. otherwise, we must create an intermediate blob id to hold the
// result of this compose operation
UUID temporaryObjectId = UUID.randomUUID();
GSBlobIdentifier composedBlobId = (remainingBlobIds.isEmpty()) ? targetBlobIdentifier : BlobUtils.getTemporaryBlobIdentifier(recoverable.finalBlobIdentifier, temporaryObjectId, options);
// compose the blobs
storage.compose(composeBlobIds, composedBlobId);
// if we have remaining blobs, add the composed blob id to the beginning of the list
// of remaining blob ids, and recurse
if (!remainingBlobIds.isEmpty()) {
remainingBlobIds.add(0, composedBlobId);
composeBlobs(remainingBlobIds, targetBlobIdentifier);
}
} | 3.26 |
flink_GSRecoverableWriterCommitter_cleanupTemporaryBlobs_rdh | /**
* Clean up after a successful commit operation, by deleting any temporary blobs associated with
* the final blob.
*/
private void cleanupTemporaryBlobs()
{
LOGGER.trace("Cleaning up temporary blobs for recoverable with options {}: {}", options, recoverable);
// determine the partial name for the temporary objects to be deleted
String temporaryBucketName = BlobUtils.getTemporaryBucketName(recoverable.finalBlobIdentifier, options);
String temporaryObjectPartialName = BlobUtils.getTemporaryObjectPartialName(recoverable.finalBlobIdentifier);
// find all the temp blobs by looking for anything that starts with the temporary
// object partial name. doing it this way finds any orphaned temp blobs as well
List<GSBlobIdentifier> foundTempBlobIdentifiers = storage.list(temporaryBucketName, temporaryObjectPartialName);
if (!foundTempBlobIdentifiers.isEmpty()) {
// delete all the temp blobs, and populate the set with ones that were actually deleted
// normalize in case the blob came back with a generation populated
storage.delete(foundTempBlobIdentifiers);
}
} | 3.26 |
flink_HashPartitionIterator_advanceAndRead_rdh | /* jump to the next partition and continue reading from that */
private BT advanceAndRead() throws IOException {
if (!partitions.hasNext()) {
return null;
}
currentPartition = partitions.next();
currentPartition.setReadPosition(0);
try {
return serializer.deserialize(currentPartition);
} catch (EOFException e) {
return advanceAndRead();
}
} | 3.26 |
flink_RocksDBStateUploader_uploadFilesToCheckpointFs_rdh | /**
* Upload all the files to checkpoint fileSystem using specified number of threads.
*
* @param files
* The files will be uploaded to checkpoint filesystem.
* @param checkpointStreamFactory
* The checkpoint streamFactory used to create outputstream.
* @param stateScope
* @throws Exception
* Thrown if can not upload all the files.
*/
public List<HandleAndLocalPath> uploadFilesToCheckpointFs(@Nonnull
List<Path> files, CheckpointStreamFactory checkpointStreamFactory, CheckpointedStateScope stateScope, CloseableRegistry closeableRegistry, CloseableRegistry tmpResourcesRegistry) throws Exception {
List<CompletableFuture<HandleAndLocalPath>> futures = createUploadFutures(files, checkpointStreamFactory, stateScope, closeableRegistry, tmpResourcesRegistry);
List<HandleAndLocalPath> handles = new ArrayList<>(files.size());try {
FutureUtils.waitForAll(futures).get();
for (CompletableFuture<HandleAndLocalPath> future : futures) {
handles.add(future.get());
}
} catch (ExecutionException e) {
Throwable throwable = ExceptionUtils.stripExecutionException(e);
throwable = ExceptionUtils.stripException(throwable, RuntimeException.class);
if (throwable instanceof IOException) {
throw ((IOException) (throwable));
} else {
throw new FlinkRuntimeException("Failed to upload data for state handles.", e);
}
}
return
handles;
} | 3.26 |
flink_OutputEmitter_forward_rdh | // --------------------------------------------------------------------------------------------
private int forward() {
return 0;
} | 3.26 |
flink_OutputEmitter_setup_rdh | // ------------------------------------------------------------------------
@Override
public void setup(int numberOfChannels) {
this.numberOfChannels = numberOfChannels;
} | 3.26 |
flink_DebeziumJsonFormatFactory_validateEncodingFormatOptions_rdh | /**
* Validator for debezium encoding format.
*/
private static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);// validator for {@link SCHEMA_INCLUDE}
if (tableOptions.get(SCHEMA_INCLUDE)) {
throw new ValidationException(String.format("Debezium JSON serialization doesn't support '%s.%s' option been set to true.", IDENTIFIER, SCHEMA_INCLUDE.key()));
}
} | 3.26 |
flink_PartitionWriterFactory_get_rdh | /**
* Util for get a {@link PartitionWriterFactory}.
*/
static <T> PartitionWriterFactory<T> get(boolean dynamicPartition, boolean grouped, LinkedHashMap<String, String> staticPartitions) {
if (dynamicPartition) {
return grouped ? GroupedPartitionWriter::new : DynamicPartitionWriter::new;
} else {
return ((PartitionWriterFactory<T>) ((context, manager, computer, writerListener) -> new SingleDirectoryWriter<>(context, manager, computer, staticPartitions, writerListener)));
}
} | 3.26 |
flink_ResourceManager_onNewTokensObtained_rdh | // ------------------------------------------------------------------------
// Resource Management
// ------------------------------------------------------------------------
@Override
public void onNewTokensObtained(byte[] tokens) throws Exception {
latestTokens.set(tokens);log.info("Updating delegation tokens for {} task manager(s).", taskExecutors.size());
if (!taskExecutors.isEmpty()) {
final List<CompletableFuture<Acknowledge>> futures = new ArrayList<>(taskExecutors.size());
for (Map.Entry<ResourceID, WorkerRegistration<WorkerType>> workerRegistrationEntry : taskExecutors.entrySet()) {
WorkerRegistration<WorkerType> registration = workerRegistrationEntry.getValue();
log.info("Updating delegation tokens for node {}.", registration.getNodeId());
final TaskExecutorGateway taskExecutorGateway = registration.getTaskExecutorGateway();
futures.add(taskExecutorGateway.updateDelegationTokens(getFencingToken(), tokens));
}
FutureUtils.combineAll(futures).get();
}
} | 3.26 |
flink_ResourceManager_m5_rdh | /**
* Stops the given worker if supported.
*
* @param worker
* The worker.
*/
public void m5(WorkerType worker) {
if (resourceAllocator.isSupported()) {
resourceAllocator.cleaningUpDisconnectedResource(worker.getResourceID());
}
} | 3.26 |
flink_ResourceManager_setFailUnfulfillableRequest_rdh | /**
* Set {@link SlotManager} whether to fail unfulfillable slot requests.
*
* @param failUnfulfillableRequest
* whether to fail unfulfillable requests
*/
protected void setFailUnfulfillableRequest(boolean failUnfulfillableRequest) {
slotManager.setFailUnfulfillableRequest(failUnfulfillableRequest);
} | 3.26 |
flink_ResourceManager_closeTaskManagerConnection_rdh | /**
* This method should be called by the framework once it detects that a currently registered
* task executor has failed.
*
* @param resourceID
* Id of the TaskManager that has failed.
* @param cause
* The exception which cause the TaskManager failed.
* @return The {@link WorkerType} of the closed connection, or empty if already removed.
*/
protected Optional<WorkerType> closeTaskManagerConnection(final ResourceID resourceID, final Exception cause) {
taskManagerHeartbeatManager.unmonitorTarget(resourceID);WorkerRegistration<WorkerType> workerRegistration = taskExecutors.remove(resourceID);
if (workerRegistration != null) {
log.info("Closing TaskExecutor connection {} because: {}", resourceID.getStringWithMetadata(), cause.getMessage(), ExceptionUtils.returnExceptionIfUnexpected(cause.getCause()));
ExceptionUtils.logExceptionIfExcepted(cause.getCause(), log);
// TODO :: suggest failed task executor to stop itself
slotManager.unregisterTaskManager(workerRegistration.getInstanceID(), cause);
clusterPartitionTracker.processTaskExecutorShutdown(resourceID);
workerRegistration.getTaskExecutorGateway().disconnectResourceManager(cause);
} else {
log.debug("No open TaskExecutor connection {}. Ignoring close TaskExecutor connection. Closing reason was: {}", resourceID.getStringWithMetadata(), cause.getMessage());
}
return Optional.ofNullable(workerRegistration).map(WorkerRegistration::getWorker);
} | 3.26 |
flink_ResourceManager_onStart_rdh | // ------------------------------------------------------------------------
// RPC lifecycle methods
// ------------------------------------------------------------------------
@Override
public final void onStart() throws Exception {
try {
log.info("Starting the resource manager.");
startResourceManagerServices();
startedFuture.complete(null);} catch (Throwable t) {
final ResourceManagerException exception = new ResourceManagerException(String.format("Could not start the ResourceManager %s", getAddress()), t);
onFatalError(exception);
throw exception;
}
} | 3.26 |
flink_ResourceManager_registerTaskExecutorInternal_rdh | /**
* Registers a new TaskExecutor.
*
* @param taskExecutorRegistration
* task executor registration parameters
* @return RegistrationResponse
*/
private RegistrationResponse registerTaskExecutorInternal(TaskExecutorGateway taskExecutorGateway, TaskExecutorRegistration taskExecutorRegistration) {
ResourceID taskExecutorResourceId = taskExecutorRegistration.getResourceId();
WorkerRegistration<WorkerType> oldRegistration = taskExecutors.remove(taskExecutorResourceId);
if (oldRegistration != null) {
// TODO :: suggest old taskExecutor to stop itself
log.debug("Replacing old registration of TaskExecutor {}.", taskExecutorResourceId.getStringWithMetadata());
// remove old task manager registration from slot manager
slotManager.unregisterTaskManager(oldRegistration.getInstanceID(), new ResourceManagerException(String.format("TaskExecutor %s re-connected to the ResourceManager.", taskExecutorResourceId.getStringWithMetadata())));
}
final Optional<WorkerType> newWorkerOptional = getWorkerNodeIfAcceptRegistration(taskExecutorResourceId);
String taskExecutorAddress = taskExecutorRegistration.getTaskExecutorAddress();
if (!newWorkerOptional.isPresent()) {
log.warn("Discard registration from TaskExecutor {} at ({}) because the framework did " + "not recognize it", taskExecutorResourceId.getStringWithMetadata(), taskExecutorAddress);
return new TaskExecutorRegistrationRejection("The ResourceManager does not recognize this TaskExecutor.");
} else {
WorkerType newWorker = newWorkerOptional.get();
WorkerRegistration<WorkerType> registration = new WorkerRegistration<>(taskExecutorGateway, newWorker, taskExecutorRegistration.getDataPort(), taskExecutorRegistration.getJmxPort(), taskExecutorRegistration.getHardwareDescription(), taskExecutorRegistration.getMemoryConfiguration(),
taskExecutorRegistration.getTotalResourceProfile(),
taskExecutorRegistration.getDefaultSlotResourceProfile(), taskExecutorRegistration.getNodeId());
log.info("Registering TaskManager with ResourceID {} ({}) at ResourceManager", taskExecutorResourceId.getStringWithMetadata(), taskExecutorAddress);
taskExecutors.put(taskExecutorResourceId, registration);
taskManagerHeartbeatManager.monitorTarget(taskExecutorResourceId, new TaskExecutorHeartbeatSender(taskExecutorGateway));
return new TaskExecutorRegistrationSuccess(registration.getInstanceID(), resourceId, clusterInformation, latestTokens.get());
}
} | 3.26 |
flink_ResourceManager_onFatalError_rdh | // ------------------------------------------------------------------------
// Error Handling
// ------------------------------------------------------------------------
/**
* Notifies the ResourceManager that a fatal error has occurred and it cannot proceed.
*
* @param t
* The exception describing the fatal error
*/
protected void onFatalError(Throwable t) {
try {
log.error("Fatal error occurred in ResourceManager.", t);
} catch (Throwable ignored) {
}
// The fatal error handler implementation should make sure that this call is non-blocking
fatalErrorHandler.onFatalError(t);
} | 3.26 |
flink_ResourceManager_getNodeIdOfTaskManager_rdh | // ------------------------------------------------------------------------
// Internal methods
// ------------------------------------------------------------------------
@VisibleForTesting
String getNodeIdOfTaskManager(ResourceID taskManagerId) {
checkState(taskExecutors.containsKey(taskManagerId));
return taskExecutors.get(taskManagerId).getNodeId();
} | 3.26 |
flink_ResourceManager_closeJobManagerConnection_rdh | /**
* This method should be called by the framework once it detects that a currently registered job
* manager has failed.
*
* @param jobId
* identifying the job whose leader shall be disconnected.
* @param resourceRequirementHandling
* indicating how existing resource requirements for the
* corresponding job should be handled
* @param cause
* The exception which cause the JobManager failed.
*/
protected void closeJobManagerConnection(JobID jobId, ResourceRequirementHandling resourceRequirementHandling, Exception cause) {
JobManagerRegistration jobManagerRegistration = jobManagerRegistrations.remove(jobId);
if (jobManagerRegistration != null) {
final ResourceID jobManagerResourceId = jobManagerRegistration.getJobManagerResourceID();
final JobMasterGateway jobMasterGateway = jobManagerRegistration.getJobManagerGateway();
final JobMasterId
jobMasterId = jobManagerRegistration.getJobMasterId();
log.info("Disconnect job manager {}@{} for job {} from the resource manager.", jobMasterId, jobMasterGateway.getAddress(), jobId);
jobManagerHeartbeatManager.unmonitorTarget(jobManagerResourceId);
jmResourceIdRegistrations.remove(jobManagerResourceId);
blocklistHandler.deregisterBlocklistListener(jobMasterGateway);
if (resourceRequirementHandling == ResourceRequirementHandling.CLEAR) {
slotManager.clearResourceRequirements(jobId);
}
// tell the job manager about the disconnect
jobMasterGateway.disconnectResourceManager(getFencingToken(), cause);
} else {
log.debug("There was no registered job manager for job {}.", jobId);
}
} | 3.26 |
flink_ResourceManager_m3_rdh | /**
* Registers a new JobMaster.
*
* @param jobMasterGateway
* to communicate with the registering JobMaster
* @param jobId
* of the job for which the JobMaster is responsible
* @param jobManagerAddress
* address of the JobMaster
* @param jobManagerResourceId
* ResourceID of the JobMaster
* @return RegistrationResponse
*/
private RegistrationResponse m3(final JobMasterGateway jobMasterGateway, JobID jobId, String jobManagerAddress, ResourceID jobManagerResourceId) {
if (jobManagerRegistrations.containsKey(jobId)) {
JobManagerRegistration oldJobManagerRegistration = jobManagerRegistrations.get(jobId);
if (Objects.equals(oldJobManagerRegistration.getJobMasterId(), jobMasterGateway.getFencingToken())) {
// same registration
log.debug("Job manager {}@{} was already registered.", jobMasterGateway.getFencingToken(), jobManagerAddress);
} else {
// tell old job manager that he is no longer the job leader
closeJobManagerConnection(oldJobManagerRegistration.getJobID(), ResourceRequirementHandling.f0, new Exception(("New job leader for job " + jobId) + " found."));JobManagerRegistration jobManagerRegistration = new JobManagerRegistration(jobId, jobManagerResourceId,
jobMasterGateway);jobManagerRegistrations.put(jobId, jobManagerRegistration);
jmResourceIdRegistrations.put(jobManagerResourceId, jobManagerRegistration);
blocklistHandler.registerBlocklistListener(jobMasterGateway);
}
} else {
// new registration for the job
JobManagerRegistration jobManagerRegistration = new
JobManagerRegistration(jobId, jobManagerResourceId, jobMasterGateway);
jobManagerRegistrations.put(jobId, jobManagerRegistration);jmResourceIdRegistrations.put(jobManagerResourceId, jobManagerRegistration);
blocklistHandler.registerBlocklistListener(jobMasterGateway);
}
log.info("Registered job manager {}@{} for job {}.", jobMasterGateway.getFencingToken(), jobManagerAddress, jobId);
jobManagerHeartbeatManager.monitorTarget(jobManagerResourceId, new JobMasterHeartbeatSender(jobMasterGateway));
return new JobMasterRegistrationSuccess(getFencingToken(), resourceId);
} | 3.26 |
flink_ResourceManager_deregisterApplication_rdh | /**
* Cleanup application and shut down cluster.
*
* @param finalStatus
* of the Flink application
* @param diagnostics
* diagnostics message for the Flink application or {@code null}
*/
@Override
public CompletableFuture<Acknowledge> deregisterApplication(final ApplicationStatus finalStatus, @Nullable
final String diagnostics) {
log.info("Shut down cluster because application is in {}, diagnostics {}.", finalStatus, diagnostics);
try {
internalDeregisterApplication(finalStatus, diagnostics);
} catch (ResourceManagerException e) {
log.warn("Could not properly shutdown the application.", e);
}
return CompletableFuture.completedFuture(Acknowledge.get());
} | 3.26 |
flink_ResourceManager_requestTaskExecutorThreadInfoGateway_rdh | // Bug; see FLINK-27954
@Override
@Local
public CompletableFuture<TaskExecutorThreadInfoGateway> requestTaskExecutorThreadInfoGateway(ResourceID taskManagerId, Time timeout) {
final WorkerRegistration<WorkerType> taskExecutor = taskExecutors.get(taskManagerId);
if (taskExecutor == null) {
return FutureUtils.completedExceptionally(new UnknownTaskExecutorException(taskManagerId));
} else {
return CompletableFuture.completedFuture(taskExecutor.getTaskExecutorGateway());
}
} | 3.26 |
flink_ResourceManager_registerJobMaster_rdh | // ------------------------------------------------------------------------
// RPC methods
// ------------------------------------------------------------------------
@Override
public CompletableFuture<RegistrationResponse> registerJobMaster(final JobMasterId jobMasterId, final ResourceID jobManagerResourceId, final String jobManagerAddress, final JobID jobId, final Time timeout) {
checkNotNull(jobMasterId);
checkNotNull(jobManagerResourceId);
checkNotNull(jobManagerAddress);
checkNotNull(jobId);
if (!jobLeaderIdService.containsJob(jobId)) {
try {
jobLeaderIdService.addJob(jobId);
} catch (Exception e) {
ResourceManagerException exception = new ResourceManagerException(("Could not add the job " + jobId) + " to the job id leader service.", e);
onFatalError(exception);
log.error("Could not add job {} to job leader id service.", jobId, e);
return FutureUtils.completedExceptionally(exception);
}
}
log.info("Registering job manager {}@{} for job {}.", jobMasterId, jobManagerAddress,
jobId);
CompletableFuture<JobMasterId> jobMasterIdFuture;
try
{
jobMasterIdFuture = jobLeaderIdService.getLeaderId(jobId);
} catch (Exception e) {
// we cannot check the job leader id so let's fail
// TODO: Maybe it's also ok to skip this check in case that we cannot check the leader
// id
ResourceManagerException exception = new ResourceManagerException("Cannot obtain the " + "job leader id future to verify the correct job leader.", e);
onFatalError(exception);
log.debug("Could not obtain the job leader id future to verify the correct job leader.");
return FutureUtils.completedExceptionally(exception);
}
CompletableFuture<JobMasterGateway> jobMasterGatewayFuture = getRpcService().connect(jobManagerAddress, jobMasterId, JobMasterGateway.class);
CompletableFuture<RegistrationResponse> registrationResponseFuture = jobMasterGatewayFuture.thenCombineAsync(jobMasterIdFuture, (JobMasterGateway jobMasterGateway,JobMasterId leadingJobMasterId) -> {
if (Objects.equals(leadingJobMasterId, jobMasterId)) {
return registerJobMasterInternal(jobMasterGateway, jobId, jobManagerAddress, jobManagerResourceId);
} else {
final String declineMessage = String.format("The leading JobMaster id %s did not match the received JobMaster id %s. " + "This indicates that a JobMaster leader change has happened.", leadingJobMasterId, jobMasterId);
log.debug(declineMessage);
return new RegistrationResponse.Failure(new FlinkException(declineMessage));
}
}, getMainThreadExecutor());
// handle exceptions which might have occurred in one of the futures inputs of combine
return registrationResponseFuture.handleAsync((RegistrationResponse registrationResponse,Throwable throwable) -> {
if (throwable != null) {
if (log.isDebugEnabled()) {
log.debug("Registration of job manager {}@{} failed.", jobMasterId, jobManagerAddress, throwable);
} else {
log.info("Registration of job manager {}@{} failed.", jobMasterId, jobManagerAddress);
}
return new RegistrationResponse.Failure(throwable);
} else {
return registrationResponse;
}
}, ioExecutor);
} | 3.26 |
flink_ExecutionPlanUtil_getExecutionPlanAsJSON_rdh | /**
* Extracts the execution plan (as JSON) from the given {@link Plan}.
*/
public static String getExecutionPlanAsJSON(Plan plan) {
checkNotNull(plan);
ExecutionPlanJSONGenerator jsonGenerator = getJSONGenerator();
return jsonGenerator.getExecutionPlan(plan);
} | 3.26 |
flink_BufferSizeEMA_calculateBufferSize_rdh | /**
* Calculating the buffer size over total possible buffers size and number of buffers in use.
*
* @param totalBufferSizeInBytes
* Total buffers size.
* @param totalBuffers
* Total number of buffers in use.
* @return Throughput calculated according to implemented algorithm.
*/
public int calculateBufferSize(long totalBufferSizeInBytes, int totalBuffers) {
checkArgument(totalBufferSizeInBytes >= 0, "Size of buffer should be non negative");
checkArgument(totalBuffers > 0, "Number of buffers should be positive");
// Since the result value is always limited by max buffer size while the instant value is
// potentially unlimited. It can lead to an instant change from min to max value in case
// when the instant value is significantly larger than the possible max value.
// The solution is to limit the instant buffer size by twice of current buffer size in order
// to have the same growth and shrink speeds. for example if the instant value is equal to 0
// and the current value is 16000 we can decrease it at maximum by 1600(suppose alfa=0.1) .
// The idea is to allow increase and decrease size by the same number. So if the instant
// value would be large(for example 100000) it will be possible to increase the current
// value by 1600(the same as decreasing) because the limit will be 2 * currentValue = 32000.
// Example of change speed:
// growing = 32768, 29647, 26823, 24268, 21956, 19864
// shrinking = 19864, 21755, 23826, 26095, 28580, 31301, 32768
long desirableBufferSize = Math.min(totalBufferSizeInBytes / totalBuffers, 2L * lastBufferSize);
lastBufferSize += alpha * (desirableBufferSize - lastBufferSize);
return lastBufferSize = Math.max(minBufferSize, Math.min(lastBufferSize, maxBufferSize));
} | 3.26 |
flink_HiveSetProcessor_startWithHiveSpecialVariablePrefix_rdh | /**
* check whether the variable's name is started with the special variable prefix that Hive
* reserves.
*/
public static boolean startWithHiveSpecialVariablePrefix(String varname) {
String[]
hiveSpecialVariablePrefix = new String[]{ ENV_PREFIX, SYSTEM_PREFIX, HIVECONF_PREFIX, HIVEVAR_PREFIX, METACONF_PREFIX };
for (String prefix : hiveSpecialVariablePrefix) {
if (varname.startsWith(prefix)) {
return true;
}
}
return false;
} | 3.26 |
flink_HiveSetProcessor_setVariable_rdh | /**
* Set variable following Hive's implementation.
*/
public static void setVariable(HiveConf hiveConf, Map<String, String> hiveVariables, String varname, String varvalue) {
if (varname.startsWith(ENV_PREFIX)) {
throw new UnsupportedOperationException("env:* variables can not be set.");
} else if (varname.startsWith(SYSTEM_PREFIX)) {
String propName = varname.substring(SYSTEM_PREFIX.length());
System.getProperties().setProperty(propName, new VariableSubstitution(() -> hiveVariables).substitute(hiveConf, varvalue));
} else if (varname.startsWith(HIVECONF_PREFIX)) {
String propName = varname.substring(HIVECONF_PREFIX.length());
setConf(hiveConf, hiveVariables, varname, propName, varvalue);
} else if (varname.startsWith(HIVEVAR_PREFIX)) {
String propName = varname.substring(HIVEVAR_PREFIX.length());hiveVariables.put(propName, new VariableSubstitution(() -> hiveVariables).substitute(hiveConf, varvalue));
} else if (varname.startsWith(METACONF_PREFIX)) {
String propName = varname.substring(METACONF_PREFIX.length());
try {
Hive hive = Hive.get(hiveConf);
hive.setMetaConf(propName, new VariableSubstitution(() -> hiveVariables).substitute(hiveConf, varvalue));
} catch (HiveException e) {
throw new FlinkHiveException(String.format("'SET %s=%s' FAILED.", varname, varvalue), e);
}
} else {
// here is a little of different from Hive's behavior,
// if there's no prefix, we also put it to passed hiveVariables for flink
// may use it as its own configurations.
// Otherwise, there's no way to set Flink's configuration using Hive's set command.
hiveVariables.put(varname, new VariableSubstitution(() -> hiveVariables).substitute(hiveConf, varvalue));
setConf(hiveConf, hiveVariables, varname, varname, varvalue);
}
} | 3.26 |
flink_StandaloneLeaderRetrievalService_start_rdh | // ------------------------------------------------------------------------
@Override
public void start(LeaderRetrievalListener listener) {
checkNotNull(listener, "Listener must not be null.");
synchronized(startStopLock) {
checkState(!started, "StandaloneLeaderRetrievalService can only be started once.");
started = true;
// directly notify the listener, because we already know the leading JobManager's
// address
listener.notifyLeaderAddress(leaderAddress, leaderId);
}
} | 3.26 |
flink_HiveTablePartition_ofTable_rdh | /**
* Creates a HiveTablePartition to represent a hive table.
*
* @param hiveConf
* the HiveConf used to connect to HMS
* @param hiveVersion
* the version of hive in use, if it's null the version will be automatically
* detected
* @param dbName
* name of the database
* @param tableName
* name of the table
*/
public static HiveTablePartition ofTable(HiveConf hiveConf, @Nullable
String hiveVersion, String dbName, String tableName) {
HiveShim hiveShim = getHiveShim(hiveVersion);
try (HiveMetastoreClientWrapper client = new HiveMetastoreClientWrapper(hiveConf, hiveShim)) {
Table hiveTable = client.getTable(dbName, tableName);
return new HiveTablePartition(hiveTable.getSd(), HiveReflectionUtils.getTableMetadata(hiveShim, hiveTable));
}
catch (TException e) {
throw new FlinkHiveException(String.format("Failed to create HiveTablePartition for hive table %s.%s", dbName, tableName), e);
}
} | 3.26 |
flink_HiveTablePartition_ofPartition_rdh | /**
* Creates a HiveTablePartition to represent a hive partition.
*
* @param hiveConf
* the HiveConf used to connect to HMS
* @param hiveVersion
* the version of hive in use, if it's null the version will be automatically
* detected
* @param dbName
* name of the database
* @param tableName
* name of the table
* @param partitionSpec
* map from each partition column to its value. The map should contain
* exactly all the partition columns and in the order in which the partition columns are
* defined
*/
public static HiveTablePartition ofPartition(HiveConf hiveConf, @Nullable
String hiveVersion,
String dbName, String tableName, LinkedHashMap<String, String> partitionSpec) {
HiveShim hiveShim = getHiveShim(hiveVersion);
try (HiveMetastoreClientWrapper client = new HiveMetastoreClientWrapper(hiveConf, hiveShim)) {
Table hiveTable = client.getTable(dbName, tableName);
Partition hivePartition = client.getPartition(dbName, tableName, new ArrayList<>(partitionSpec.values()));
return new HiveTablePartition(hivePartition.getSd(), partitionSpec, HiveReflectionUtils.getTableMetadata(hiveShim, hiveTable));
} catch (TException e) {
throw new FlinkHiveException(String.format("Failed to create HiveTablePartition for partition %s of hive table %s.%s", partitionSpec, dbName, tableName), e);
}
} | 3.26 |
flink_SetOperationFactory_create_rdh | /**
* Creates a valid algebraic operation.
*
* @param type
* type of operation to create
* @param left
* first relational operation of the operation
* @param right
* second relational operation of the operation
* @param all
* flag defining how duplicates should be handled
* @return creates a valid algebraic operation
*/
QueryOperation create(SetQueryOperationType type, QueryOperation left, QueryOperation right,
boolean all) {
failIfStreaming(type, all);
validateSetOperation(type, left, right);
return new SetQueryOperation(left, right, type, all, createCommonTableSchema(left, right));
} | 3.26 |
flink_AlterSchemaConverter_convertAlterSchema_rdh | /**
* Convert ALTER TABLE DROP WATERMARK to generate an updated {@link Schema}.
*/
public Operation convertAlterSchema(SqlAlterTableDropWatermark dropWatermark, ResolvedCatalogTable oldTable) {
if (oldTable.getResolvedSchema().getWatermarkSpecs().isEmpty()) {
throw new ValidationException(String.format("%sThe base table does not define any watermark strategy.", EX_MSG_PREFIX));
}
Schema.Builder schemaBuilder = Schema.newBuilder();
buildUpdatedColumn(schemaBuilder, oldTable, (builder, column) -> builder.fromColumns(Collections.singletonList(column)));
buildUpdatedPrimaryKey(schemaBuilder, oldTable, Function.identity()); return buildAlterTableChangeOperation(dropWatermark, Collections.singletonList(TableChange.dropWatermark()), schemaBuilder.build(), oldTable);
} | 3.26 |
flink_AlterSchemaConverter_buildUpdatedColumn_rdh | // --------------------------------------------------------------------------------------------
private void buildUpdatedColumn(Schema.Builder builder, ResolvedCatalogTable oldTable, BiConsumer<Schema.Builder, Schema.UnresolvedColumn> columnConsumer) {
// build column
oldTable.getUnresolvedSchema().getColumns().forEach(column -> columnConsumer.accept(builder, column));
} | 3.26 |
flink_MetricAssertions_isCloseTo_rdh | /**
* Verifies that the gauges value is close to the expected value within a certain deviation.
*
* @param value
* the expected value
* @param epsilon
* the maximum deviation from the expected value
* @return this assertion object
*/
public GaugeAssert<T> isCloseTo(long value, long epsilon) {
assertThat(((Long) (actual.getValue()))).isGreaterThan(value - epsilon).isLessThan(value + epsilon);
return this;
} | 3.26 |
flink_DeltaIterationBase_getInitialWorkset_rdh | /**
* Returns the initial workset input, or null, if none is set.
*
* @return The iteration's workset input.
*/
public Operator<WT> getInitialWorkset() {
return
getSecondInput();
} | 3.26 |
flink_DeltaIterationBase_setBroadcastVariables_rdh | /**
* The DeltaIteration meta operator cannot have broadcast inputs. This method always throws an
* exception.
*
* @param inputs
* Ignored
*/
public <X> void setBroadcastVariables(Map<String, Operator<X>> inputs) {
throw new UnsupportedOperationException("The DeltaIteration meta operator cannot have broadcast inputs.");
} | 3.26 |
flink_DeltaIterationBase_setInitialSolutionSet_rdh | /**
* Sets the given input as the initial solution set.
*
* @param input
* The contract to set the initial solution set.
*/
public void setInitialSolutionSet(Operator input) {
setFirstInput(input);
} | 3.26 |
flink_DeltaIterationBase_setBroadcastVariable_rdh | /**
* The DeltaIteration meta operator cannot have broadcast inputs. This method always throws an
* exception.
*
* @param name
* Ignored.
* @param root
* Ignored.
*/
public void setBroadcastVariable(String name, Operator<?> root) {
throw new UnsupportedOperationException("The DeltaIteration meta operator cannot have broadcast inputs.");
} | 3.26 |
flink_DeltaIterationBase_setInitialWorkset_rdh | /**
* Sets the given input as the initial workset.
*
* @param input
* The contract to set as the initial workset.
*/
public void setInitialWorkset(Operator<WT> input) {
setSecondInput(input);
} | 3.26 |
flink_DeltaIterationBase_getBroadcastInputs_rdh | /**
* DeltaIteration meta operator cannot have broadcast inputs.
*
* @return An empty map.
*/
public Map<String, Operator<?>> getBroadcastInputs() {
return Collections.emptyMap();
} | 3.26 |
flink_DeltaIterationBase_getInitialSolutionSet_rdh | // --------------------------------------------------------------------------------------------
// Getting / Setting the Inputs
// --------------------------------------------------------------------------------------------
/**
* Returns the initial solution set input, or null, if none is set.
*
* @return The iteration's initial solution set input.
*/
public Operator getInitialSolutionSet() {
return
getFirstInput();
} | 3.26 |
flink_DeltaIterationBase_setNextWorkset_rdh | /**
* Sets the contract of the step function that represents the next workset. This contract is
* considered one of the two sinks of the step function (the other one being the solution set
* delta).
*
* @param result
* The contract representing the next workset.
*/
public void setNextWorkset(Operator<WT> result) {
this.nextWorkset = result;
} | 3.26 |
flink_DeltaIterationBase_setSolutionSetDelta_rdh | /**
* Sets the contract of the step function that represents the solution set delta. This contract
* is considered one of the two sinks of the step function (the other one being the next
* workset).
*
* @param delta
* The contract representing the solution set delta.
*/
public void setSolutionSetDelta(Operator delta) {
this.solutionSetDelta = delta;
} | 3.26 |
flink_DeltaIterationBase_getSolutionSetKeyFields_rdh | // --------------------------------------------------------------------------------------------
public int[] getSolutionSetKeyFields() {return this.solutionSetKeyFields;
} | 3.26 |
flink_DeltaIterationBase_setSolutionSetUnManaged_rdh | /**
* Sets whether to keep the solution set in managed memory (safe against heap exhaustion) or
* unmanaged memory (objects on heap).
*
* @param solutionSetUnManaged
* True to keep the solution set in unmanaged memory, false to keep
* it in managed memory.
* @see #isSolutionSetUnManaged()
*/
public void setSolutionSetUnManaged(boolean solutionSetUnManaged) {
this.solutionSetUnManaged = solutionSetUnManaged;
} | 3.26 |
flink_BatchTask_initBroadcastInputReaders_rdh | /**
* Creates the record readers for the extra broadcast inputs as configured by {@link TaskConfig#getNumBroadcastInputs()}. This method requires that the task configuration, the
* driver, and the user-code class loader are set.
*/
protected void initBroadcastInputReaders() throws Exception {
final int numBroadcastInputs = this.config.getNumBroadcastInputs();
final MutableReader<?>[] broadcastInputReaders = new MutableReader<?>[numBroadcastInputs];
int currentReaderOffset = config.getNumInputs();
for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) {
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize
= this.config.getBroadcastGroupSize(i);
if (groupSize == 1) {
// non-union case
broadcastInputReaders[i] = new MutableRecordReader<>(getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1) {
// union case
IndexedInputGate[] readers = new IndexedInputGate[groupSize];
for (int j = 0; j < groupSize; ++j) {
readers[j] = getEnvironment().getInputGate(currentReaderOffset + j);}broadcastInputReaders[i] = new MutableRecordReader<>(new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
currentReaderOffset += groupSize;
}
this.broadcastInputReaders = broadcastInputReaders;
} | 3.26 |
flink_BatchTask_getOutputCollector_rdh | // --------------------------------------------------------------------------------------------
// Result Shipping and Chained Tasks
// --------------------------------------------------------------------------------------------
/**
* Creates the {@link Collector} for the given task, as described by the given configuration.
* The output collector contains the writers that forward the data to the different tasks that
* the given task is connected to. Each writer applies the partitioning as described in the
* configuration.
*
* @param task
* The task that the output collector is created for.
* @param config
* The configuration describing the output shipping strategies.
* @param cl
* The classloader used to load user defined types.
* @param eventualOutputs
* The output writers that this task forwards to the next task for each
* output.
* @param outputOffset
* The offset to start to get the writers for the outputs
* @param numOutputs
* The number of outputs described in the configuration.
* @return The OutputCollector that data produced in this task is submitted to.
*/
public static <T> Collector<T> getOutputCollector(AbstractInvokable task, TaskConfig config, ClassLoader cl, List<RecordWriter<?>> eventualOutputs, int outputOffset, int numOutputs) throws Exception {
if (numOutputs == 0) {
return null;
}
// get the factory for the serializer
final TypeSerializerFactory<T> v85 = config.getOutputSerializer(cl);
final List<RecordWriter<SerializationDelegate<T>>> v86 = new ArrayList<>(numOutputs);
// create a writer for each output
for (int i = 0; i < numOutputs; i++) {
// create the OutputEmitter from output ship strategy
final ShipStrategyType strategy = config.getOutputShipStrategy(i);
final int indexInSubtaskGroup = task.getIndexInSubtaskGroup();
final TypeComparatorFactory<T> compFactory = config.getOutputComparator(i, cl);
final ChannelSelector<SerializationDelegate<T>> oe;
if (compFactory == null) {
oe = new
OutputEmitter<>(strategy, indexInSubtaskGroup);
} else {
final DataDistribution dataDist = config.getOutputDataDistribution(i, cl);
final Partitioner<?> partitioner = config.getOutputPartitioner(i, cl);
final TypeComparator<T> comparator = compFactory.createComparator();
oe = new OutputEmitter<>(strategy, indexInSubtaskGroup,
comparator, partitioner, dataDist);
}
final RecordWriter<SerializationDelegate<T>> recordWriter = new RecordWriterBuilder().setChannelSelector(oe).setTaskName(task.getEnvironment().getTaskInfo().getTaskNameWithSubtasks()).build(task.getEnvironment().getWriter(outputOffset + i));
recordWriter.setMetricGroup(task.getEnvironment().getMetricGroup().getIOMetricGroup());
v86.add(recordWriter);
}
if (eventualOutputs != null) {
eventualOutputs.addAll(v86);
}
return new OutputCollector<>(v86, v85.getSerializer());
} | 3.26 |
flink_BatchTask_initOutputs_rdh | /**
* Creates a writer for each output. Creates an OutputCollector which forwards its input to all
* writers. The output collector applies the configured shipping strategy.
*/
@SuppressWarnings("unchecked")
public static <T> Collector<T> initOutputs(AbstractInvokable containingTask, UserCodeClassLoader cl, TaskConfig config, List<ChainedDriver<?, ?>> chainedTasksTarget, List<RecordWriter<?>> eventualOutputs, ExecutionConfig executionConfig, Map<String, Accumulator<?, ?>> accumulatorMap) throws Exception {
final int numOutputs = config.getNumOutputs();
// check whether we got any chained tasks
final int numChained = config.getNumberOfChainedStubs();
if (numChained > 0) {
// got chained stubs. that means that this one may only have a single forward connection
if ((numOutputs != 1) || (config.getOutputShipStrategy(0) != ShipStrategyType.FORWARD)) {
throw new RuntimeException("Plan Generation Bug: Found a chained stub that is not connected via an only forward connection.");
}
// instantiate each task
@SuppressWarnings("rawtypes")
Collector previous = null;
for (int i = numChained - 1; i >= 0; --i) {
// get the task first
final ChainedDriver<?, ?> ct;
try {
Class<? extends ChainedDriver<?, ?>> v101 = config.getChainedTask(i);
ct = v101.newInstance();
} catch (Exception ex) {
throw new RuntimeException("Could not instantiate chained task driver.", ex);
}
// get the configuration for the task
final TaskConfig chainedStubConf = config.getChainedStubConfig(i);
final String taskName = config.getChainedTaskName(i);
if (i == (numChained - 1)) {
// last in chain, instantiate the output collector for this task
previous = getOutputCollector(containingTask, chainedStubConf, cl.asClassLoader(), eventualOutputs, 0, chainedStubConf.getNumOutputs());
}
ct.setup(chainedStubConf, taskName, previous, containingTask, cl, executionConfig, accumulatorMap);
chainedTasksTarget.add(0, ct);
if (i == (numChained - 1)) {
ct.getIOMetrics().reuseOutputMetricsForTask();
}
previous = ct;
}
// the collector of the first in the chain is the collector for the task
return ((Collector<T>) (previous));
}
// else
// instantiate the output collector the default way from this configuration
return
getOutputCollector(containingTask, config, cl.asClassLoader(), eventualOutputs, 0, numOutputs);
} | 3.26 |
flink_BatchTask_closeUserCode_rdh | /**
* Closes the given stub using its {@link org.apache.flink.api.common.functions.RichFunction#close()} method. If the close call
* produces an exception, a new exception with a standard error message is created, using the
* encountered exception as its cause.
*
* @param stub
* The user code instance to be closed.
* @throws Exception
* Thrown, if the user code's close method produces an exception.
*/
public static void closeUserCode(Function stub) throws Exception {
try {
FunctionUtils.closeFunction(stub);
} catch (Throwable t) {
throw new Exception("The user defined 'close()' method caused an exception: "
+ t.getMessage(), t);
}
} | 3.26 |
flink_BatchTask_closeChainedTasks_rdh | /**
* Closes all chained tasks, in the order as they are stored in the array. The closing process
* creates a standardized log info message.
*
* @param tasks
* The tasks to be closed.
* @param parent
* The parent task, used to obtain parameters to include in the log message.
* @throws Exception
* Thrown, if the closing encounters an exception.
*/
public static void
closeChainedTasks(List<ChainedDriver<?, ?>>
tasks, AbstractInvokable parent) throws Exception {
for (ChainedDriver<?, ?> task : tasks) {
task.closeTask();
if (LOG.isDebugEnabled()) {
LOG.debug(constructLogString("Finished task code", task.getTaskName(), parent));
}}
} | 3.26 |
flink_BatchTask_openChainedTasks_rdh | // --------------------------------------------------------------------------------------------
// Chained Task LifeCycle
// --------------------------------------------------------------------------------------------
/**
* Opens all chained tasks, in the order as they are stored in the array. The opening process
* creates a standardized log info message.
*
* @param tasks
* The tasks to be opened.
* @param parent
* The parent task, used to obtain parameters to include in the log message.
* @throws Exception
* Thrown, if the opening encounters an exception.
*/
public static void openChainedTasks(List<ChainedDriver<?, ?>> tasks, AbstractInvokable parent) throws Exception {
// start all chained tasks
for (ChainedDriver<?, ?> task : tasks) {if (LOG.isDebugEnabled()) {
LOG.debug(constructLogString("Start task code", task.getTaskName(), parent));
}
task.openTask();}
} | 3.26 |
flink_BatchTask_getLastOutputCollector_rdh | // --------------------------------------------------------------------------------------------
// Task Setup and Teardown
// --------------------------------------------------------------------------------------------
/**
*
* @return the last output collector in the collector chain
*/
@SuppressWarnings("unchecked")
protected Collector<OT> getLastOutputCollector() {
int numChained = this.chainedTasks.size();
return numChained == 0 ? output : ((Collector<OT>) (chainedTasks.get(numChained - 1).getOutputCollector()));
} | 3.26 |
flink_BatchTask_cancelChainedTasks_rdh | /**
* Cancels all tasks via their {@link ChainedDriver#cancelTask()} method. Any occurring
* exception and error is suppressed, such that the canceling method of every task is invoked in
* all cases.
*
* @param tasks
* The tasks to be canceled.
*/
public static void cancelChainedTasks(List<ChainedDriver<?, ?>> tasks) {
for (ChainedDriver<?, ?> task : tasks) {
try {
task.cancelTask();
} catch (Throwable t) {
// do nothing
}
}
} | 3.26 |
flink_BatchTask_instantiateUserCode_rdh | // --------------------------------------------------------------------------------------------
// Miscellaneous Utilities
// --------------------------------------------------------------------------------------------
/**
* Instantiates a user code class from is definition in the task configuration. The class is
* instantiated without arguments using the null-ary constructor. Instantiation will fail if
* this constructor does not exist or is not public.
*
* @param <T>
* The generic type of the user code class.
* @param config
* The task configuration containing the class description.
* @param cl
* The class loader to be used to load the class.
* @param superClass
* The super class that the user code class extends or implements, for type
* checking.
* @return An instance of the user code class.
*/
public static <T> T instantiateUserCode(TaskConfig config, ClassLoader cl, Class<? super T> superClass) {
try {
T stub = config.<T>getStubWrapper(cl).getUserCodeObject(superClass, cl);
// check if the class is a subclass, if the check is required
if ((superClass != null) && (!superClass.isAssignableFrom(stub.getClass()))) {
throw new RuntimeException(((("The class '" + stub.getClass().getName()) + "' is not a subclass of '") + superClass.getName()) + "' as is required.");
}
return stub;
} catch (ClassCastException ccex) {
throw new RuntimeException("The UDF class is not a proper subclass of " + superClass.getName(), ccex); }
} | 3.26 |
flink_BatchTask_initBroadcastInputsSerializers_rdh | /**
* Creates all the serializers and iterators for the broadcast inputs.
*/protected void initBroadcastInputsSerializers(int numBroadcastInputs) {
this.broadcastInputSerializers = new TypeSerializerFactory<?>[numBroadcastInputs];
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
for (int i = 0; i < numBroadcastInputs; i++) {
// ---------------- create the serializer first ---------------------
final TypeSerializerFactory<?> serializerFactory = this.config.getBroadcastInputSerializer(i, userCodeClassLoader);
this.broadcastInputSerializers[i] = serializerFactory;
}
} | 3.26 |
flink_BatchTask_invoke_rdh | // --------------------------------------------------------------------------------------------
// Task Interface
// --------------------------------------------------------------------------------------------
/**
* The main work method.
*/@Override
public void invoke() throws Exception {// --------------------------------------------------------------------
// Initialize
// --------------------------------------------------------------------
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Start registering input and output."));
}
// obtain task configuration (including stub parameters)
Configuration taskConf = getTaskConfiguration();
this.config = new TaskConfig(taskConf);
// now get the operator class which drives the operation
final Class<? extends Driver<S, OT>> driverClass = this.config.getDriver();
this.driver = InstantiationUtil.instantiate(driverClass, Driver.class);
String headName = getEnvironment().getTaskInfo().getTaskName().split("->")[0].trim();
this.metrics = getEnvironment().getMetricGroup().getOrAddOperator(headName.startsWith("CHAIN") ? headName.substring(6) : headName);
this.metrics.getIOMetricGroup().reuseInputMetricsForTask();
if (config.getNumberOfChainedStubs() == 0) {
this.metrics.getIOMetricGroup().reuseOutputMetricsForTask();
}
// initialize the readers.
// this does not yet trigger any stream consuming or processing.
initInputReaders();
initBroadcastInputReaders();
// initialize the writers.
initOutputs();
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Finished registering input and output."));
}
// --------------------------------------------------------------------
// Invoke
// --------------------------------------------------------------------
if (LOG.isDebugEnabled())
{LOG.debug(formatLogString("Start task code."));
}
this.runtimeUdfContext = m2(metrics);// whatever happens in this scope, make sure that the local strategies are cleaned up!
// note that the initialization of the local strategies is in the try-finally block as well,
// so that the thread that creates them catches its own errors that may happen in that
// process.
// this is especially important, since there may be asynchronous closes (such as through
// canceling).
try {
// initialize the remaining data structures on the input and trigger the local
// processing
// the local processing includes building the dams / caches
try {
int numInputs = driver.getNumberOfInputs();
int numComparators = driver.getNumberOfDriverComparators();
int numBroadcastInputs = this.config.getNumBroadcastInputs();
initInputsSerializersAndComparators(numInputs, numComparators);
initBroadcastInputsSerializers(numBroadcastInputs);
// set the iterative status for inputs and broadcast inputs
{
List<Integer> iterativeInputs = new ArrayList<>();
for (int i = 0; i < numInputs; i++) {
final int numberOfEventsUntilInterrupt = getTaskConfig().getNumberOfEventsUntilInterruptInIterativeGate(i);
if (numberOfEventsUntilInterrupt < 0) {
throw new IllegalArgumentException();
} else if (numberOfEventsUntilInterrupt > 0) {
this.inputReaders[i].setIterativeReader();
iterativeInputs.add(i);
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString(((("Input [" + i)
+ "] reads in supersteps with [") + numberOfEventsUntilInterrupt) + "] event(s) till next superstep."));
}
}
}
this.iterativeInputs = asArray(iterativeInputs);
}
{
List<Integer> iterativeBcInputs = new ArrayList<>();for (int i = 0; i < numBroadcastInputs; i++) {
final int numberOfEventsUntilInterrupt = getTaskConfig().getNumberOfEventsUntilInterruptInIterativeBroadcastGate(i);
if (numberOfEventsUntilInterrupt < 0) {
throw new IllegalArgumentException();
} else if (numberOfEventsUntilInterrupt > 0) {
this.broadcastInputReaders[i].setIterativeReader();
iterativeBcInputs.add(i);
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString(((("Broadcast input [" + i) + "] reads in supersteps with [") + numberOfEventsUntilInterrupt) + "] event(s) till next superstep."));
}
}
}
this.iterativeBroadcastInputs = asArray(iterativeBcInputs);
}
initLocalStrategies(numInputs);
} catch (Exception e) {
throw new RuntimeException("Initializing the input processing failed" + (e.getMessage() == null ? "." : ": " + e.getMessage()), e);
}if (!this.running) {
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Task cancelled before task code was started."));
}
return;
}// pre main-function initialization
initialize();
// read the broadcast variables. they will be released in the finally clause
for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) {
final String name = this.config.getBroadcastInputName(i);
/* superstep one for the start */
m0(i, name, this.runtimeUdfContext, 1);
}
// the work goes here
run();
} finally {
// clean up in any case!
closeLocalStrategiesAndCaches();
clearReaders(inputReaders);
m3(eventualOutputs);
}
if (this.running) {
if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Finished task code."));
}
} else if (LOG.isDebugEnabled()) {
LOG.debug(formatLogString("Task code cancelled."));
}
} | 3.26 |
flink_BatchTask_constructLogString_rdh | // ============================================================================================
// Static Utilities
//
// Utilities are consolidated here to ensure a uniform way of running,
// logging, exception handling, and error messages.
// ============================================================================================
// --------------------------------------------------------------------------------------------
// Logging
// --------------------------------------------------------------------------------------------
/**
* Utility function that composes a string for logging purposes. The string includes the given
* message, the given name of the task and the index in its subtask group as well as the number
* of instances that exist in its subtask group.
*
* @param message
* The main message for the log.
* @param taskName
* The name of the task.
* @param parent
* The task that contains the code producing the message.
* @return The string for logging.
*/
public static String constructLogString(String message, String taskName, AbstractInvokable parent) {
return ((((((message + ": ") +
taskName) + " (") + (parent.getEnvironment().getTaskInfo().getIndexOfThisSubtask() + 1)) +
'/') + parent.getEnvironment().getTaskInfo().getNumberOfParallelSubtasks()) + ')';
} | 3.26 |
flink_BatchTask_initInputReaders_rdh | /**
* Creates the record readers for the number of inputs as defined by {@link #getNumTaskInputs()}. This method requires that the task configuration, the driver, and the
* user-code class loader are set.
*/protected void initInputReaders() throws Exception {final int numInputs = getNumTaskInputs();
final MutableReader<?>[] v30 = new MutableReader<?>[numInputs];
int currentReaderOffset = 0;
for (int i = 0; i < numInputs; i++) {
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize = this.config.getGroupSize(i);
if (groupSize == 1) {
// non-union case
v30[i] = new MutableRecordReader<>(getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
IndexedInputGate[] readers = new IndexedInputGate[groupSize];
for (int j = 0; j < groupSize; ++j) {
readers[j] = getEnvironment().getInputGate(currentReaderOffset + j);
}
v30[i] = new MutableRecordReader<>(new UnionInputGate(readers),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}currentReaderOffset += groupSize;
}
this.inputReaders = v30;
// final sanity check
if (currentReaderOffset != this.config.getNumInputs()) {
throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent.");
}
} | 3.26 |
flink_BatchTask_setLastOutputCollector_rdh | /**
* Sets the last output {@link Collector} of the collector chain of this {@link BatchTask}.
*
* <p>In case of chained tasks, the output collector of the last {@link ChainedDriver} is set.
* Otherwise it is the single collector of the {@link BatchTask}.
*
* @param newOutputCollector
* new output collector to set as last collector
*/
protected void setLastOutputCollector(Collector<OT> newOutputCollector) {
int numChained = this.chainedTasks.size();
if (numChained == 0) {
output = newOutputCollector;
return;
}
chainedTasks.get(numChained - 1).setOutputCollector(newOutputCollector);
} | 3.26 |
flink_BatchTask_logAndThrowException_rdh | /**
* Prints an error message and throws the given exception. If the exception is of the type
* {@link ExceptionInChainedStubException} then the chain of contained exceptions is followed
* until an exception of a different type is found.
*
* @param ex
* The exception to be thrown.
* @param parent
* The parent task, whose information is included in the log message.
* @throws Exception
* Always thrown.
*/
public static void logAndThrowException(Exception ex, AbstractInvokable parent) throws Exception {
String taskName;
if (ex instanceof ExceptionInChainedStubException) {
do {
ExceptionInChainedStubException cex = ((ExceptionInChainedStubException) (ex));
taskName = cex.getTaskName();
ex =
cex.getWrappedException();
} while (ex instanceof ExceptionInChainedStubException );
} else {
taskName = parent.getEnvironment().getTaskInfo().getTaskName();
}
if (LOG.isErrorEnabled()) {
LOG.error(constructLogString("Error in task code", taskName, parent), ex); }throw ex;
} | 3.26 |
flink_BatchTask_initLocalStrategies_rdh | /**
* NOTE: This method must be invoked after the invocation of {@code #initInputReaders()} and
* {@code #initInputSerializersAndComparators(int)}!
*/
protected void initLocalStrategies(int numInputs) throws Exception {
final MemoryManager memMan = getMemoryManager();
final IOManager ioMan = getIOManager();
this.localStrategies = new CloseableInputProvider<?>[numInputs];
this.inputs = new MutableObjectIterator<?>[numInputs];
this.excludeFromReset = new boolean[numInputs];
this.inputIsCached = new boolean[numInputs];this.inputIsAsyncMaterialized = new boolean[numInputs];
this.materializationMemory = new int[numInputs];
// set up the local strategies first, such that the can work before any temp barrier is
// created
for (int i = 0; i < numInputs; i++) {
initInputLocalStrategy(i);
}
// we do another loop over the inputs, because we want to instantiate all
// sorters, etc before requesting the first input (as this call may block)
// we have two types of materialized inputs, and both are replayable (can act as a cache)
// The first variant materializes in a different thread and hence
// acts as a pipeline breaker. this one should only be there, if a pipeline breaker is
// needed.
// the second variant spills to the side and will not read unless the result is also
// consumed
// in a pipelined fashion.
this.resettableInputs = new SpillingResettableMutableObjectIterator<?>[numInputs];
this.tempBarriers = new TempBarrier<?>[numInputs];
for (int i = 0; i <
numInputs; i++) {
final int v55;
final boolean async =
this.config.isInputAsynchronouslyMaterialized(i);
final boolean cached = this.config.isInputCached(i);
this.inputIsAsyncMaterialized[i] = async;
this.inputIsCached[i] = cached;
if (async || cached) {
v55 = memMan.computeNumberOfPages(this.config.getRelativeInputMaterializationMemory(i));
if (v55 <= 0) {
throw new Exception("Input marked as materialized/cached, but no memory for materialization provided.");
}
this.materializationMemory[i] = v55;
} else {
v55 = 0;
}
if (async) {
@SuppressWarnings({ "unchecked",
"rawtypes" })
TempBarrier<?> barrier = new TempBarrier(this, getInput(i), this.inputSerializers[i], memMan, ioMan, v55, emptyList());
barrier.startReading();
this.tempBarriers[i] = barrier;
this.inputs[i] = null;
} else if (cached) {
@SuppressWarnings({ "unchecked", "rawtypes" })
SpillingResettableMutableObjectIterator<?> iter = new SpillingResettableMutableObjectIterator(getInput(i), this.inputSerializers[i].getSerializer(), getMemoryManager(), getIOManager(), v55, this);
this.resettableInputs[i] = iter;
this.inputs[i] = iter;
}
}
} | 3.26 |
flink_BatchTask_openUserCode_rdh | // --------------------------------------------------------------------------------------------
// User Code LifeCycle
// --------------------------------------------------------------------------------------------
/**
* Opens the given stub using its {@link org.apache.flink.api.common.functions.RichFunction#open(OpenContext)} method. If the open
* call produces an exception, a new exception with a standard error message is created, using
* the encountered exception as its cause.
*
* @param stub
* The user code instance to be opened.
* @param parameters
* The parameters supplied to the user code.
* @throws Exception
* Thrown, if the user code's open method produces an exception.
*/
public static void openUserCode(Function stub, Configuration parameters) throws Exception {
try {
FunctionUtils.openFunction(stub, DefaultOpenContext.INSTANCE);
} catch (Throwable t) {
throw new Exception((("The user defined 'open(Configuration)' method in " + stub.getClass().toString()) + " caused an exception: ") + t.getMessage(), t);
} } | 3.26 |
flink_BatchTask_initialize_rdh | // --------------------------------------------------------------------------------------------
// Main Work Methods
// --------------------------------------------------------------------------------------------
protected void initialize() throws Exception {
// create the operator
try {this.driver.setup(this);
} catch (Throwable t) {
throw new Exception((("The driver setup for '" + this.getEnvironment().getTaskInfo().getTaskName()) + "' , caused an error: ") + t.getMessage(), t);
}
// instantiate the UDF
try {
final Class<? super S> userCodeFunctionType = this.driver.getStubType();
// if the class is null, the driver has no user code
if (userCodeFunctionType != null) {this.stub = initStub(userCodeFunctionType);
}
} catch (Exception e) {
throw new RuntimeException("Initializing the UDF" + (e.getMessage() == null ? "." : ": " + e.getMessage()), e);
}
} | 3.26 |
flink_BatchTask_initInputsSerializersAndComparators_rdh | /**
* Creates all the serializers and comparators.
*/
protected void initInputsSerializersAndComparators(int numInputs, int numComparators) {
this.inputSerializers = new TypeSerializerFactory<?>[numInputs];
this.inputComparators = (numComparators
> 0) ? new TypeComparator<?>[numComparators] : null;this.inputIterators = new MutableObjectIterator<?>[numInputs];
ClassLoader userCodeClassLoader =
getUserCodeClassLoader();
for (int i = 0; i <
numInputs; i++) {
final TypeSerializerFactory<?> serializerFactory = this.config.getInputSerializer(i, userCodeClassLoader);
this.inputSerializers[i] = serializerFactory;
this.inputIterators[i] = createInputIterator(this.inputReaders[i], this.inputSerializers[i]);
}
// ---------------- create the driver's comparators ---------------------
for (int i = 0;
i < numComparators; i++) {
if (this.inputComparators != null) {
final TypeComparatorFactory<?> comparatorFactory = this.config.getDriverComparator(i, userCodeClassLoader);
this.inputComparators[i] = comparatorFactory.createComparator();
}
}
} | 3.26 |
flink_OutputFormatBase_close_rdh | /**
* Close the format waiting for pending writes and reports errors.
*/
@Override
public final void close() throws IOException {checkAsyncErrors();
flush();
checkAsyncErrors();
postClose();
}
/**
* Tear down the OutputFormat. This method is called at the end of {@link OutputFormatBase#close()} | 3.26 |
flink_OutputFormatBase_writeRecord_rdh | /**
* Asynchronously write a record and deal with {@link OutputFormatBase#maxConcurrentRequests}.
* To specify how a record is written, please override the {@link OutputFormatBase#send(Object)}
* method.
*/
@Overridepublic final void writeRecord(OUT record) throws IOException {
checkAsyncErrors();
tryAcquire(1);final CompletionStage<V> completionStage;
try {
completionStage = send(record);
} catch (Throwable e) {
semaphore.release();
throw e;
} completionStage.whenComplete((result, throwable) -> {
if (throwable == null) {
callback.onSuccess(result);
} else {
callback.onFailure(throwable);
}
});
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.