name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ResolvedSchema_getColumnNames_rdh | /**
* Returns all column names. It does not distinguish between different kinds of columns.
*/
public List<String> getColumnNames() {
return columns.stream().map(Column::getName).collect(Collectors.toList());
} | 3.26 |
flink_ResolvedSchema_toRowDataType_rdh | // --------------------------------------------------------------------------------------------
private DataType toRowDataType(Predicate<Column> columnPredicate) {
return // the row should never be null
columns.stream().filter(columnPredicate).map(ResolvedSchema::columnToField).collect(Collectors.collectingAndThen(Collectors.toList(), DataTypes::ROW)).notNull();
} | 3.26 |
flink_ResolvedSchema_getPrimaryKey_rdh | /**
* Returns the primary key if it has been defined.
*/
public Optional<UniqueConstraint> getPrimaryKey() {
return
Optional.ofNullable(primaryKey);
} | 3.26 |
flink_ResolvedSchema_m0_rdh | /**
* Shortcut for a resolved schema of only physical columns.
*/
public static ResolvedSchema m0(List<String> columnNames, List<DataType> columnDataTypes)
{
Preconditions.checkArgument(columnNames.size() == columnDataTypes.size(), "Mismatch between number of columns names and data types.");
final List<Column> columns = IntStream.range(0, columnNames.size()).mapToObj(i -> Column.physical(columnNames.get(i), columnDataTypes.get(i))).collect(Collectors.toList());
return new ResolvedSchema(columns, Collections.emptyList(), null);
} | 3.26 |
flink_ResolvedSchema_getColumn_rdh | /**
* Returns the {@link Column} instance for the given column name.
*
* @param columnName
* the name of the column
*/
public Optional<Column> getColumn(String columnName) {
return this.columns.stream().filter(column -> column.getName().equals(columnName)).findFirst();
} | 3.26 |
flink_ResolvedSchema_of_rdh | /**
* Shortcut for a resolved schema of only columns.
*/
public static ResolvedSchema of(Column... columns) {
return ResolvedSchema.of(Arrays.asList(columns));
} | 3.26 |
flink_ResolvedSchema_toSourceRowDataType_rdh | /**
* Converts all columns of this schema into a (possibly nested) row data type.
*
* <p>This method returns the <b>source-to-query schema</b>.
*
* <p>Note: The returned row data type contains physical, computed, and metadata columns. Be
* careful when using this method in a table source or table sink. In many cases, {@link #toPhysicalRowDataType()} might be more appropriate.
*
* @see DataTypes#ROW(DataTypes.Field...)
* @see #toPhysicalRowDataType()
* @see #toSinkRowDataType()
*/public DataType toSourceRowDataType() {
return toRowDataType(c -> true);
} | 3.26 |
flink_ResolvedSchema_getColumns_rdh | /**
* Returns all {@link Column}s of this schema.
*/
public List<Column> getColumns() {return columns;
} | 3.26 |
flink_ResolvedSchema_m1_rdh | /**
* Returns a list of watermark specifications each consisting of a rowtime attribute and
* watermark strategy expression.
*
* <p>Note: Currently, there is at most one {@link WatermarkSpec} in the list, because we don't
* support multiple watermark definitions yet.
*/
public List<WatermarkSpec> m1() {
return watermarkSpecs;
} | 3.26 |
flink_FileChannelOutputView_closeAndDelete_rdh | /**
* Closes this output, writing pending data and releasing the memory.
*
* @throws IOException
* Thrown, if the pending data could not be written.
*/
public void closeAndDelete() throws IOException {
close(true);
} | 3.26 |
flink_FileChannelOutputView_getBlockCount_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the number of blocks written by this output view.
*
* @return The number of blocks written by this output view.
*/
public int getBlockCount() {
return numBlocksWritten;
} | 3.26 |
flink_FileChannelOutputView_close_rdh | // --------------------------------------------------------------------------------------------
/**
* Closes this output, writing pending data and releasing the memory.
*
* @throws IOException
* Thrown, if the pending data could not be written.
*/
public void close() throws IOException {
close(false);
} | 3.26 |
flink_AbstractBytesMultiMap_writePointer_rdh | /**
* Write value into the output view, and return offset of the value.
*/
private long writePointer(SimpleCollectingOutputView outputView, int value) throws IOException {
int oldPosition = ((int) (outputView.getCurrentOffset()));int skip = checkSkipWriteForPointer(outputView);
outputView.getCurrentSegment().putInt(outputView.getCurrentPositionInSegment(), value);
// advance position in segment
outputView.skipBytesToWrite(ELEMENT_POINT_LENGTH);
return oldPosition + skip;
} | 3.26 |
flink_AbstractBytesMultiMap_checkSkipWriteForPointer_rdh | /**
* For pointer needing update, skip unaligned part (4 bytes) for convenient updating.
*/
private int checkSkipWriteForPointer(AbstractPagedOutputView outView) throws IOException {
// skip if there is no enough size.
int available = outView.getSegmentSize() - outView.getCurrentPositionInSegment();
if (available < ELEMENT_POINT_LENGTH) {
outView.advance();
return available;
}
return 0;
} | 3.26 |
flink_AbstractBytesMultiMap_append_rdh | // ----------------------- Public Interface -----------------------
/**
* Append an value into the hash map's record area.
*/
public void append(LookupInfo<K, Iterator<RowData>> lookupInfo, BinaryRowData value) throws IOException {
try {
if (lookupInfo.found) {
// append value only if there exits key-value pair.
int newPointer = ((RecordArea) (recordArea)).appendValue(value);
if (pointerToSecondValue == (-1)) {
// this is the second value
((RecordArea) (recordArea)).updateValuePointerInKeyArea(newPointer, endPtr);
} else {
((RecordArea) (recordArea)).updateValuePointerInValueArea(newPointer, endPtr);
}
// update pointer of the tail value under a key
endPtr = newPointer;
((RecordArea) (recordArea)).updateValuePointerInKeyArea(newPointer, endPtrOffset);
} else {
if (numKeys >= growthThreshold) {
growAndRehash();
// update info's bucketSegmentIndex and bucketOffset
lookup(lookupInfo.key);
}
// append key and value if it does not exists
int pointerToAppended = recordArea.appendRecord(lookupInfo, value);
bucketSegments.get(lookupInfo.bucketSegmentIndex).putInt(lookupInfo.bucketOffset, pointerToAppended);
bucketSegments.get(lookupInfo.bucketSegmentIndex).putInt(lookupInfo.bucketOffset + ELEMENT_POINT_LENGTH, lookupInfo.keyHashCode);
numKeys++;
}
numElements++;
} catch (EOFException e) {
numSpillFiles++;
spillInBytes += recordArea.getSegmentsSize();
throw e;
}
} | 3.26 |
flink_AbstractBytesMultiMap_appendRecord_rdh | /**
* The key is not exist before. Add key and first value to key area.
*/
@Override
public int appendRecord(LookupInfo<K, Iterator<RowData>> lookupInfo, BinaryRowData value)
throws IOException {
int lastPosition = ((int) (keyOutView.getCurrentOffset()));
// write key to keyOutView
int skip = keySerializer.serializeToPages(lookupInfo.key, keyOutView);
int keyOffset = lastPosition + skip;
// skip the pointer to the tail value.
endPtrOffset = skipPointer(keyOutView);
// write a value entry: a next-pointer and value data
long pointerOfEndValue = writePointer(keyOutView, -1);
// write first value to keyOutView.
valueSerializer.serializeToPages(value, keyOutView);
if (pointerOfEndValue > Integer.MAX_VALUE) {
LOG.warn("We can't handle key area with more than Integer.MAX_VALUE bytes," + " because the pointer is a integer.");
throw new
EOFException();
}
endPtr = ((int) (pointerOfEndValue));
// update pointer to the tail value
updateValuePointerInKeyArea(endPtr, endPtrOffset);
return keyOffset;
} | 3.26 |
flink_AbstractBytesMultiMap_free_rdh | /**
*
* @param reservedFixedMemory
* reserved fixed memory or not.
*/
@Override
public void free(boolean reservedFixedMemory) {
recordArea.release();
numKeys = 0;
super.free(reservedFixedMemory);
} | 3.26 |
flink_AbstractBytesMultiMap_getNumKeys_rdh | // ----------------------- Abstract Interface -----------------------
@Override
public long getNumKeys() {
return numKeys;
} | 3.26 |
flink_AbstractBytesMultiMap_reset_rdh | /**
* reset the map's record and bucket area's memory segments for reusing.
*/
@Override
public void reset() {
super.reset();
// reset the record segments.
recordArea.reset();
numKeys = 0;
} | 3.26 |
flink_AbstractBytesMultiMap_updateValuePointer_rdh | /**
* Update the content from specific offset.
*/
private void
updateValuePointer(RandomAccessInputView view, int newPointer, int ptrOffset) throws IOException {
view.setReadPosition(ptrOffset);
int v8 = view.getCurrentPositionInSegment();
view.getCurrentSegment().putInt(v8, newPointer);
} | 3.26 |
flink_MetadataOutputStreamWrapper_closeForCommit_rdh | /**
* The function will check output stream valid. If it has been closed before, it will throw
* {@link IOException}. If not, it will invoke {@code closeForCommitAction()} and mark it
* closed.
*/final void closeForCommit() throws IOException
{
if (closed) {
throw new IOException("The output stream has been closed. This should not happen.");
}
closeForCommitAction();
closed = true;
}
/**
* The function will check output stream valid. If it has been closed before, it will do
* nothing. If not, it will invoke {@code closeAction()} | 3.26 |
flink_ResourceSpec_setExtendedResource_rdh | /**
* Add the given extended resource. The old value with the same resource name will be
* replaced if present.
*/
public Builder setExtendedResource(ExternalResource extendedResource) {
this.extendedResources.put(extendedResource.getName(), extendedResource);
return this;} | 3.26 |
flink_ResourceSpec_newBuilder_rdh | // ------------------------------------------------------------------------
// builder
// ------------------------------------------------------------------------
public static Builder newBuilder(double cpuCores, int taskHeapMemoryMB)
{
return new Builder(new
CPUResource(cpuCores), MemorySize.ofMebiBytes(taskHeapMemoryMB));
} | 3.26 |
flink_ResourceSpec_subtract_rdh | /**
* Subtracts another resource spec from this one.
*
* @param other
* The other resource spec to subtract.
* @return The subtracted resource spec.
*/public ResourceSpec subtract(final ResourceSpec other) {
checkNotNull(other, "Cannot subtract null resources");
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
checkArgument(other.lessThanOrEqual(this), "Cannot subtract a larger ResourceSpec from this one.");
Map<String, ExternalResource> resultExtendedResources = new HashMap<>(extendedResources);
for (ExternalResource resource : other.extendedResources.values()) {
resultExtendedResources.merge(resource.getName(), resource, (v1, v2) -> v1.subtract(v2));
}
return new ResourceSpec(this.cpuCores.subtract(other.cpuCores), this.taskHeapMemory.subtract(other.taskHeapMemory), this.taskOffHeapMemory.subtract(other.taskOffHeapMemory), this.managedMemory.subtract(other.managedMemory), resultExtendedResources);
} | 3.26 |
flink_ResourceSpec_setExtendedResources_rdh | /**
* Add the given extended resources. This will discard all the previous added extended
* resources.
*/
public Builder setExtendedResources(Collection<ExternalResource> extendedResources) {
this.extendedResources = extendedResources.stream().collect(Collectors.toMap(ExternalResource::getName, Function.identity()));
return this;
} | 3.26 |
flink_ResourceSpec_merge_rdh | /**
* Used by system internally to merge the other resources of chained operators when generating
* the job graph.
*
* @param other
* Reference to resource to merge in.
* @return The new resource with merged values.
*/
public ResourceSpec merge(final ResourceSpec other) {
checkNotNull(other, "Cannot merge with null resources");
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach((String name,ExternalResource resource) -> {
resultExtendedResource.compute(name, (ignored, oldResource) -> oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceSpec(this.cpuCores.merge(other.cpuCores), this.taskHeapMemory.add(other.taskHeapMemory), this.taskOffHeapMemory.add(other.taskOffHeapMemory), this.managedMemory.add(other.managedMemory), resultExtendedResource);
} | 3.26 |
flink_ResourceSpec_readResolve_rdh | // ------------------------------------------------------------------------
// serialization
// ------------------------------------------------------------------------
private Object readResolve() {
// try to preserve the singleton property for UNKNOWN
return this.equals(UNKNOWN) ? UNKNOWN : this;
} | 3.26 |
flink_CommittableCollector_isFinished_rdh | /**
* Returns whether all {@link CheckpointCommittableManager} currently hold by the collector are
* either committed or failed.
*
* @return state of the {@link CheckpointCommittableManager}
*/
public boolean isFinished() {
return checkpointCommittables.values().stream().allMatch(CheckpointCommittableManagerImpl::isFinished);
} | 3.26 |
flink_CommittableCollector_copy_rdh | /**
* Returns a new committable collector that deep copies all internals.
*
* @return {@link CommittableCollector}
*/
public CommittableCollector<CommT> copy() {
return new CommittableCollector<>(checkpointCommittables.entrySet().stream().map(e -> Tuple2.of(e.getKey(), e.getValue().copy())).collect(Collectors.toMap(t -> t.f0, t -> t.f1)), subtaskId, numberOfSubtasks, metricGroup); } | 3.26 |
flink_CommittableCollector_addMessage_rdh | /**
* Adds a {@link CommittableMessage} to the collector to hold it until emission.
*
* @param message
* either {@link CommittableSummary} or {@link CommittableWithLineage}
*/
public void addMessage(CommittableMessage<CommT> message) {
if (message instanceof CommittableSummary) {
addSummary(((CommittableSummary<CommT>) (message)));
} else if (message instanceof CommittableWithLineage) {
addCommittable(((CommittableWithLineage<CommT>) (message)));
}
} | 3.26 |
flink_CommittableCollector_of_rdh | /**
* Creates a {@link CommittableCollector} based on the current runtime information. This method
* should be used for to instantiate a collector for all Sink V2.
*
* @param context
* holding runtime of information
* @param metricGroup
* storing the committable metrics
* @param <CommT>
* type of the committable
* @return {@link CommittableCollector}
*/
public static <CommT> CommittableCollector<CommT> of(RuntimeContext context, SinkCommitterMetricGroup metricGroup)
{
return new CommittableCollector<>(context.getIndexOfThisSubtask(), context.getNumberOfParallelSubtasks(), metricGroup);
} | 3.26 |
flink_CommittableCollector_merge_rdh | /**
* Merges all information from an external collector into this collector.
*
* <p>This method is important during recovery from existing state.
*
* @param cc
* other {@link CommittableCollector}
*/
public void merge(CommittableCollector<CommT> cc) {
for (Entry<Long, CheckpointCommittableManagerImpl<CommT>> checkpointEntry : cc.checkpointCommittables.entrySet()) {
checkpointCommittables.merge(checkpointEntry.getKey(), checkpointEntry.getValue(), CheckpointCommittableManagerImpl::merge);
}
} | 3.26 |
flink_CommittableCollector_getSubtaskId_rdh | /**
* Returns subtask id.
*
* @return subtask id.
*/
public int getSubtaskId() {
return subtaskId;
} | 3.26 |
flink_CommittableCollector_m0_rdh | /**
* Returns all {@link CheckpointCommittableManager} until the requested checkpoint id.
*
* @param checkpointId
* counter
* @return collection of {@link CheckpointCommittableManager}
*/
public Collection<? extends CheckpointCommittableManager<CommT>> m0(long checkpointId) {// clean up fully committed previous checkpoints
// this wouldn't work with concurrent unaligned checkpoints
Collection<CheckpointCommittableManagerImpl<CommT>> checkpoints = checkpointCommittables.headMap(checkpointId, true).values();
checkpoints.removeIf(CheckpointCommittableManagerImpl::isFinished);
return checkpoints;
} | 3.26 |
flink_CommittableCollector_ofLegacy_rdh | /**
* Creates a {@link CommittableCollector} for a list of committables. This method is mainly used
* to create a collector from the state of Sink V1.
*
* @param committables
* list of committables
* @param metricGroup
* storing the committable metrics
* @param <CommT>
* type of committables
* @return {@link CommittableCollector}
*/
static <CommT> CommittableCollector<CommT> ofLegacy(List<CommT>
committables, SinkCommitterMetricGroup metricGroup) {
CommittableCollector<CommT> committableCollector = new CommittableCollector<>(0, 1, metricGroup);
// add a checkpoint with the lowest checkpoint id, this will be merged into the next
// checkpoint data, subtask id is arbitrary
CommittableSummary<CommT> summary = new CommittableSummary<>(0, 1, InitContext.INITIAL_CHECKPOINT_ID, committables.size(), committables.size(),
0);
committableCollector.addSummary(summary);
committables.forEach(c -> {
final CommittableWithLineage<CommT> committableWithLineage = new CommittableWithLineage<>(c, InitContext.INITIAL_CHECKPOINT_ID,
0);
committableCollector.addCommittable(committableWithLineage);
});
return committableCollector;
} | 3.26 |
flink_TaskStateStats_getSummaryStats_rdh | /**
*
* @return Summary of the subtask stats.
*/
public TaskStateStatsSummary getSummaryStats() {
return summaryStats;
} | 3.26 |
flink_TaskStateStats_getJobVertexId_rdh | /**
*
* @return ID of the operator the statistics belong to.
*/
public JobVertexID getJobVertexId() {
return f0;
} | 3.26 |
flink_TaskStateStats_getStateSize_rdh | /**
*
* @return Total checkpoint state size over all subtasks.
*/
public long getStateSize() {
return summaryStats.m0().getSum();
} | 3.26 |
flink_TaskStateStats_getCheckpointedSize_rdh | /**
*
* @return Total persisted size over all subtasks of this checkpoint.
*/
public long getCheckpointedSize() {return summaryStats.getCheckpointedSize().getSum();
} | 3.26 |
flink_TaskStateStats_getSubtaskStats_rdh | /**
* Returns the stats for all subtasks.
*
* <p>Elements of the returned array are <code>null</code> if no stats are available yet for the
* respective subtask.
*
* <p>Note: The returned array must not be modified.
*
* @return Array of subtask stats (elements are <code>null</code> if no stats available yet).
*/
public SubtaskStateStats[] getSubtaskStats() {
return subtaskStats;
} | 3.26 |
flink_StringColumnSummary_getMaxLength_rdh | /**
* Longest String length.
*/public Integer getMaxLength() {
return maxLength;
} | 3.26 |
flink_StringColumnSummary_m0_rdh | /**
* Number of empty strings e.g. java.lang.String.isEmpty().
*/
public long m0() {
return emptyCount;
} | 3.26 |
flink_StringColumnSummary_getMinLength_rdh | /**
* Shortest String length.
*/
public Integer getMinLength() {
return minLength;} | 3.26 |
flink_KeyedStream_getKeySelector_rdh | // ------------------------------------------------------------------------
// properties
// ------------------------------------------------------------------------
/**
* Gets the key selector that can get the key by which the stream if partitioned from the
* elements.
*
* @return The key selector for the key.
*/
@Internal
public KeySelector<T, KEY> getKeySelector() {
return this.keySelector;} | 3.26 |
flink_KeyedStream_between_rdh | /**
* Specifies the time boundaries over which the join operation works, so that
*
* <pre>
* leftElement.timestamp + lowerBound <= rightElement.timestamp <= leftElement.timestamp + upperBound
* </pre>
*
* <p>By default both the lower and the upper bound are inclusive. This can be configured
* with {@link IntervalJoined#lowerBoundExclusive()} and {@link IntervalJoined#upperBoundExclusive()}
*
* @param lowerBound
* The lower bound. Needs to be smaller than or equal to the upperBound
* @param upperBound
* The upper bound. Needs to be bigger than or equal to the lowerBound
*/
@PublicEvolving
public IntervalJoined<T1, T2, KEY> between(Time lowerBound, Time upperBound) {
if (timeBehaviour != TimeBehaviour.EventTime) {
throw new UnsupportedTimeCharacteristicException("Time-bounded stream joins are only supported in event time");
}
checkNotNull(lowerBound, "A lower bound needs to be provided for a time-bounded join");
checkNotNull(upperBound, "An upper bound needs to be provided for a time-bounded join");
return new IntervalJoined<>(streamOne, f0, lowerBound.toMilliseconds(), upperBound.toMilliseconds(), true, true);
} | 3.26 |
flink_KeyedStream_inEventTime_rdh | /**
* Sets the time characteristic to event time.
*/
public IntervalJoin<T1, T2, KEY> inEventTime() {
timeBehaviour =
TimeBehaviour.EventTime;
return this;
} | 3.26 |
flink_KeyedStream_window_rdh | /**
* Windows this data stream to a {@code WindowedStream}, which evaluates windows over a key
* grouped stream. Elements are put into windows by a {@link WindowAssigner}. The grouping of
* elements is done both by key and by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger} that is used if a {@code Trigger} is not specified.
*
* @param assigner
* The {@code WindowAssigner} that assigns elements to windows.
* @return The trigger windows data stream.
*/
@PublicEvolving
public <W extends
Window> WindowedStream<T, KEY, W> window(WindowAssigner<? super T, W> assigner) {
return new WindowedStream<>(this, assigner);
} | 3.26 |
flink_KeyedStream_maxBy_rdh | /**
* Applies an aggregation that gives the current element with the maximum value at the given
* position by the given key. An independent aggregate is kept per key. If more elements have
* the maximum value at the given position, the operator returns either the first or last one,
* depending on the parameter set.
*
* @param positionToMaxBy
* The field position in the data points to minimize. This is applicable
* to Tuple types, Scala case classes, and primitive types (which is considered as having
* one field).
* @param first
* If true, then the operator return the first element with the maximum value,
* otherwise returns the last
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> maxBy(int positionToMaxBy, boolean first) {
return aggregate(new ComparableAggregator<>(positionToMaxBy, getType(), AggregationType.MAXBY, first, getExecutionConfig()));
} | 3.26 |
flink_KeyedStream_sum_rdh | /**
* Applies an aggregation that gives the current sum of the data stream at the given field by
* the given key. An independent aggregate is kept per key.
*
* @param field
* In case of a POJO, Scala case class, or Tuple type, the name of the (public)
* field on which to perform the aggregation. Additionally, a dot can be used to drill down
* into nested objects, as in {@code "field1.fieldxy"}. Furthermore "*" can be specified in
* case of a basic type (which is considered as having only one field).
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> sum(String field) {
return aggregate(new SumAggregator<>(field, getType(), getExecutionConfig()));
} | 3.26 |
flink_KeyedStream_upperBoundExclusive_rdh | /**
* Set the upper bound to be exclusive.
*/
@PublicEvolvingpublic IntervalJoined<IN1, IN2, KEY> upperBoundExclusive()
{
this.upperBoundInclusive = false;
return this;
} | 3.26 |
flink_KeyedStream_doTransform_rdh | // ------------------------------------------------------------------------
// basic transformations
// ------------------------------------------------------------------------
@Override
protected <R> SingleOutputStreamOperator<R> doTransform(final String operatorName, final TypeInformation<R> outTypeInfo, final StreamOperatorFactory<R> operatorFactory) {
SingleOutputStreamOperator<R> v4 = super.doTransform(operatorName, outTypeInfo, operatorFactory);
// inject the key selector and key type
OneInputTransformation<T, R> transform = ((OneInputTransformation<T, R>) (v4.getTransformation()));
transform.setStateKeySelector(keySelector);
transform.setStateKeyType(keyType);
return v4;
} | 3.26 |
flink_KeyedStream_validateKeyType_rdh | /**
* Validates that a given type of element (as encoded by the provided {@link TypeInformation})
* can be used as a key in the {@code DataStream.keyBy()} operation. This is done by searching
* depth-first the key type and checking if each of the composite types satisfies the required
* conditions (see {@link #validateKeyTypeIsHashable(TypeInformation)}).
*
* @param keyType
* The {@link TypeInformation} of the key.
*/
@SuppressWarnings("rawtypes")
private TypeInformation<KEY> validateKeyType(TypeInformation<KEY> keyType) {Stack<TypeInformation<?>> stack = new Stack<>();
stack.push(keyType);
List<TypeInformation<?>> unsupportedTypes = new ArrayList<>();
while (!stack.isEmpty()) {
TypeInformation<?> typeInfo = stack.pop();
if (!validateKeyTypeIsHashable(typeInfo)) {
unsupportedTypes.add(typeInfo);
}
if (typeInfo instanceof TupleTypeInfoBase) {
for (int i = 0; i < typeInfo.getArity(); i++) {
stack.push(((TupleTypeInfoBase) (typeInfo)).getTypeAt(i));
}
}
}
if (!unsupportedTypes.isEmpty()) {
throw new InvalidProgramException((((((("Type " + keyType) + " cannot be used as key. Contained ") + "UNSUPPORTED key types: ") + StringUtils.join(unsupportedTypes, ", ")) + ". Look ") + "at the keyBy() documentation for the conditions a type has to satisfy in order to be ") + "eligible for a key.");
}
return keyType;
}
/**
* Validates that a given type of element (as encoded by the provided {@link TypeInformation})
* can be used as a key in the {@code DataStream.keyBy()} operation.
*
* @param type
* The {@link TypeInformation} of the type to check.
* @return {@code false} if:
<ol>
<li>it is a POJO type but does not override the {@link #hashCode()} method and relies
on the {@link Object#hashCode()} implementation.
<li>it is an array of any type (see {@link PrimitiveArrayTypeInfo}, {@link BasicArrayTypeInfo}, {@link ObjectArrayTypeInfo}).
<li>it is enum type
</ol>
, {@code true} | 3.26 |
flink_KeyedStream_min_rdh | /**
* Applies an aggregation that gives the current minimum of the data stream at the given field
* expression by the given key. An independent aggregate is kept per key. A field expression is
* either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy"}.
*
* @param field
* In case of a POJO, Scala case class, or Tuple type, the name of the (public)
* field on which to perform the aggregation. Additionally, a dot can be used to drill down
* into nested objects, as in {@code "field1.fieldxy"}. Furthermore "*" can be specified in
* case of a basic type (which is considered as having only one field).
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> min(String field) {
return aggregate(new ComparableAggregator<>(field, getType(), AggregationType.MIN, false, getExecutionConfig()));
} | 3.26 |
flink_KeyedStream_max_rdh | /**
* Applies an aggregation that gives the current maximum of the data stream at the given field
* expression by the given key. An independent aggregate is kept per key. A field expression is
* either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy"}.
*
* @param field
* In case of a POJO, Scala case class, or Tuple type, the name of the (public)
* field on which to perform the aggregation. Additionally, a dot can be used to drill down
* into nested objects, as in {@code "field1.fieldxy"}. Furthermore "*" can be specified in
* case of a basic type (which is considered as having only one field).
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> max(String field) {
return aggregate(new ComparableAggregator<>(field, getType(), AggregationType.MAX, false, getExecutionConfig()));
} | 3.26 |
flink_KeyedStream_countWindow_rdh | /**
* Windows this {@code KeyedStream} into sliding count windows.
*
* @param size
* The size of the windows in number of elements.
* @param slide
* The slide interval in number of elements.
*/
public WindowedStream<T, KEY, GlobalWindow> countWindow(long size, long slide) {
return window(GlobalWindows.create()).evictor(CountEvictor.of(size)).trigger(CountTrigger.of(slide));
} | 3.26 |
flink_KeyedStream_inProcessingTime_rdh | /**
* Sets the time characteristic to processing time.
*/
public IntervalJoin<T1, T2, KEY> inProcessingTime() {
timeBehaviour = TimeBehaviour.ProcessingTime;
return this;
} | 3.26 |
flink_KeyedStream_m0_rdh | /**
* Applies the given {@link ProcessFunction} on the input stream, thereby creating a transformed
* output stream.
*
* <p>The function will be called for every element in the input streams and can produce zero or
* more output elements. Contrary to the {@link DataStream#flatMap(FlatMapFunction)} function,
* this function can also query the time and set timers. When reacting to the firing of set
* timers the function can directly emit elements and/or register yet more timers.
*
* @param processFunction
* The {@link ProcessFunction} that is called for each element in the
* stream.
* @param outputType
* {@link TypeInformation} for the result type of the function.
* @param <R>
* The type of elements emitted by the {@code ProcessFunction}.
* @return The transformed {@link DataStream}.
* @deprecated Use {@link KeyedStream#process(KeyedProcessFunction, TypeInformation)}
*/
@Deprecated
@Override
@Internal
public <R> SingleOutputStreamOperator<R> m0(ProcessFunction<T, R> processFunction, TypeInformation<R> outputType) {
LegacyKeyedProcessOperator<KEY, T, R> operator = new LegacyKeyedProcessOperator<>(clean(processFunction));
return transform("Process", outputType, operator);
} | 3.26 |
flink_KeyedStream_lowerBoundExclusive_rdh | /**
* Set the lower bound to be exclusive.
*/
@PublicEvolving
public IntervalJoined<IN1, IN2, KEY> lowerBoundExclusive() {
this.lowerBoundInclusive = false;
return this;
} | 3.26 |
flink_KeyedStream_process_rdh | /**
* Completes the join operation with the given user function that is executed for each
* joined pair of elements. This methods allows for passing explicit type information for
* the output type.
*
* @param processJoinFunction
* The user-defined process join function.
* @param outputType
* The type information for the output type.
* @param <OUT>
* The output type.
* @return The transformed {@link DataStream}.
*/
@PublicEvolvingpublic <OUT> SingleOutputStreamOperator<OUT> process(ProcessJoinFunction<IN1, IN2, OUT> processJoinFunction, TypeInformation<OUT> outputType) {
Preconditions.checkNotNull(processJoinFunction);
Preconditions.checkNotNull(outputType);
final ProcessJoinFunction<IN1, IN2, OUT> cleanedUdf = left.getExecutionEnvironment().clean(processJoinFunction);
final IntervalJoinOperator<KEY, IN1, IN2, OUT> operator = new IntervalJoinOperator<>(lowerBound, upperBound, lowerBoundInclusive, upperBoundInclusive, f1,
rightLateDataOutputTag, left.getType().createSerializer(left.getExecutionConfig()), right.getType().createSerializer(right.getExecutionConfig()), cleanedUdf);
return left.connect(right).keyBy(keySelector1, keySelector2).transform("Interval Join", outputType, operator);
} | 3.26 |
flink_KeyedStream_sideOutputLeftLateData_rdh | /**
* Send late arriving left-side data to the side output identified by the given {@link OutputTag}. Data is considered late after the watermark
*/@PublicEvolving
public IntervalJoined<IN1, IN2, KEY> sideOutputLeftLateData(OutputTag<IN1> outputTag) {
outputTag = left.getExecutionEnvironment().clean(outputTag);
this.leftLateDataOutputTag = outputTag;
return this;
} | 3.26 |
flink_KeyedStream_intervalJoin_rdh | // ------------------------------------------------------------------------
// Joining
// ------------------------------------------------------------------------
/**
* Join elements of this {@link KeyedStream} with elements of another {@link KeyedStream} over a
* time interval that can be specified with {@link IntervalJoin#between(Time, Time)}.
*
* @param otherStream
* The other keyed stream to join this keyed stream with
* @param <T1>
* Type parameter of elements in the other stream
* @return An instance of {@link IntervalJoin} with this keyed stream and the other keyed stream
*/
@PublicEvolving
public <T1> IntervalJoin<T, T1, KEY> intervalJoin(KeyedStream<T1, KEY> otherStream) {
return new IntervalJoin<>(this, otherStream);
} | 3.26 |
flink_KeyedStream_minBy_rdh | /**
* Applies an aggregation that gives the current element with the minimum value at the given
* position by the given key. An independent aggregate is kept per key. If more elements have
* the minimum value at the given position, the operator returns either the first or last one,
* depending on the parameter set.
*
* @param positionToMinBy
* The field position in the data points to minimize. This is applicable
* to Tuple types, Scala case classes, and primitive types (which is considered as having
* one field).
* @param first
* If true, then the operator return the first element with the minimal value,
* otherwise returns the last
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> minBy(int positionToMinBy, boolean first) {return aggregate(new ComparableAggregator<>(positionToMinBy, getType(), AggregationType.MINBY, first, getExecutionConfig()));
} | 3.26 |
flink_KeyedStream_reduce_rdh | // ------------------------------------------------------------------------
// Non-Windowed aggregation operations
// ------------------------------------------------------------------------
/**
* Applies a reduce transformation on the grouped data stream grouped on by the given key
* position. The {@link ReduceFunction} will receive input values based on the key value. Only
* input values with the same key will go to the same reducer.
*
* @param reducer
* The {@link ReduceFunction} that will be called for every element of the input
* values with the same key.
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> reduce(ReduceFunction<T> reducer) {
ReduceTransformation<T, KEY> reduce = new ReduceTransformation<>("Keyed Reduce",
environment.getParallelism(), transformation, clean(reducer), keySelector, getKeyType(), false);
getExecutionEnvironment().addOperator(reduce);
return new SingleOutputStreamOperator<>(getExecutionEnvironment(), reduce);
} | 3.26 |
flink_KeyedStream_m1_rdh | /**
* Send late arriving right-side data to the side output identified by the given {@link OutputTag}. Data is considered late after the watermark
*/
@PublicEvolving
public IntervalJoined<IN1, IN2, KEY> m1(OutputTag<IN2> outputTag) {
outputTag = right.getExecutionEnvironment().clean(outputTag);
this.rightLateDataOutputTag = outputTag;
return this;
} | 3.26 |
flink_DefaultExecutionGraphFactory_tryRestoreExecutionGraphFromSavepoint_rdh | /**
* Tries to restore the given {@link ExecutionGraph} from the provided {@link SavepointRestoreSettings}, iff checkpointing is enabled.
*
* @param executionGraphToRestore
* {@link ExecutionGraph} which is supposed to be restored
* @param savepointRestoreSettings
* {@link SavepointRestoreSettings} containing information about
* the savepoint to restore from
* @throws Exception
* if the {@link ExecutionGraph} could not be restored
*/
private void tryRestoreExecutionGraphFromSavepoint(ExecutionGraph executionGraphToRestore, SavepointRestoreSettings savepointRestoreSettings) throws Exception {
if (savepointRestoreSettings.restoreSavepoint()) {
final CheckpointCoordinator checkpointCoordinator = executionGraphToRestore.getCheckpointCoordinator();
if (checkpointCoordinator != null) {
checkpointCoordinator.restoreSavepoint(savepointRestoreSettings, executionGraphToRestore.getAllVertices(), userCodeClassLoader);
}
}
} | 3.26 |
flink_SchedulingPipelinedRegionComputeUtil_mergeRegionsOnCycles_rdh | /**
* Merge the regions base on <a
* href="https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm">
* Tarjan's strongly connected components algorithm</a>. For more details please see <a
* href="https://issues.apache.org/jira/browse/FLINK-17330">FLINK-17330</a>.
*/
private static Set<Set<SchedulingExecutionVertex>> mergeRegionsOnCycles(final Map<SchedulingExecutionVertex, Set<SchedulingExecutionVertex>> vertexToRegion, final Function<ExecutionVertexID, ? extends SchedulingExecutionVertex> executionVertexRetriever) {
final List<Set<SchedulingExecutionVertex>> regionList = new ArrayList<>(uniqueVertexGroups(vertexToRegion));
final List<List<Integer>> outEdges = buildOutEdgesDesc(vertexToRegion, regionList, executionVertexRetriever);
final Set<Set<Integer>> sccs = StronglyConnectedComponentsComputeUtils.computeStronglyConnectedComponents(outEdges.size(), outEdges);
final Set<Set<SchedulingExecutionVertex>> mergedRegions = Collections.newSetFromMap(new IdentityHashMap<>());
for (Set<Integer> scc : sccs) {
checkState(scc.size() > 0);
Set<SchedulingExecutionVertex> mergedRegion = new HashSet<>();
for (int
regionIndex
: scc) {
mergedRegion = mergeVertexGroups(mergedRegion, regionList.get(regionIndex), vertexToRegion);
}
mergedRegions.add(mergedRegion);
}
return
mergedRegions;
} | 3.26 |
flink_RocksDBFullRestoreOperation_restore_rdh | /**
* Restores all key-groups data that is referenced by the passed state handles.
*/@Override
public RocksDBRestoreResult restore() throws IOException, StateMigrationException, RocksDBException {rocksHandle.openDB();
try (ThrowingIterator<SavepointRestoreResult> restore = savepointRestoreOperation.restore()) {
while (restore.hasNext()) {
applyRestoreResult(restore.next());
}
}
return new RocksDBRestoreResult(this.rocksHandle.getDb(), this.rocksHandle.getDefaultColumnFamilyHandle(), this.rocksHandle.getNativeMetricMonitor(), -1, null, null);
} | 3.26 |
flink_RocksDBFullRestoreOperation_restoreKVStateData_rdh | /**
* Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state
* handle.
*/
private void restoreKVStateData(ThrowingIterator<KeyGroup> keyGroups, Map<Integer, ColumnFamilyHandle> columnFamilies) throws IOException, RocksDBException, StateMigrationException {
// for all key-groups in the current state handle...
try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) {
ColumnFamilyHandle
handle = null;
while (keyGroups.hasNext()) {
KeyGroup v9 = keyGroups.next();
try (ThrowingIterator<KeyGroupEntry> groupEntries = v9.getKeyGroupEntries()) {int oldKvStateId = -1;
while (groupEntries.hasNext()) {
KeyGroupEntry groupEntry = groupEntries.next();
int v13 = groupEntry.getKvStateId();
if (v13
!= oldKvStateId) {
oldKvStateId =
v13;
handle = columnFamilies.get(v13);
}
writeBatchWrapper.put(handle, groupEntry.getKey(), groupEntry.getValue());
}
}
}
}
} | 3.26 |
flink_IOUtils_closeAll_rdh | /**
* Closes all {@link AutoCloseable} objects in the parameter, suppressing exceptions. Exception
* will be emitted after calling close() on every object.
*
* @param closeables
* iterable with closeables to close.
* @param suppressedException
* class of exceptions which should be suppressed during the closing.
* @throws Exception
* collected exceptions that occurred during closing
*/
public static <T extends Throwable>
void closeAll(Iterable<? extends AutoCloseable> closeables, Class<T> suppressedException)
throws Exception {
if (null != closeables) {
Exception collectedExceptions = null;
for (AutoCloseable closeable : closeables) {
try {
if (null != closeable) {
closeable.close(); }
} catch (Throwable e) {
if (!suppressedException.isAssignableFrom(e.getClass())) {
throw e;
}
Exception ex = (e instanceof Exception) ? ((Exception) (e)) : new Exception(e);
collectedExceptions = ExceptionUtils.firstOrSuppressed(ex, collectedExceptions);
}
}
if (null != collectedExceptions) {
throw collectedExceptions;
}
}
} | 3.26 |
flink_IOUtils_deleteFilesRecursively_rdh | /**
* Delete the given directory or file recursively.
*/
public static void deleteFilesRecursively(Path path) throws Exception {
File[] files
= path.toFile().listFiles();
if ((files == null) || (files.length == 0)) {
return;
}
for (File file : files) {
if (!file.isDirectory()) {
Files.deleteIfExists(file.toPath());
} else {
deleteFilesRecursively(file.toPath());
}
}
} | 3.26 |
flink_IOUtils_closeSocket_rdh | /**
* Closes the socket ignoring {@link IOException}.
*
* @param sock
* the socket to close
*/
public static void closeSocket(final Socket sock) {
// avoids try { close() } dance
if (sock != null) {
try {
sock.close();
} catch (IOException ignored) {
}
}
} | 3.26 |
flink_IOUtils_skipFully_rdh | /**
* Similar to readFully(). Skips bytes in a loop.
*
* @param in
* The InputStream to skip bytes from
* @param len
* number of bytes to skip
* @throws IOException
* if it could not skip requested number of bytes for any reason (including
* EOF)
*/
public static void skipFully(final InputStream in, long len) throws IOException {
while (len > 0) {
final long ret = in.skip(len);
if (ret < 0) {
throw new IOException("Premeture EOF from inputStream");
}
len -=
ret;
}
} | 3.26 |
flink_IOUtils_cleanup_rdh | // ------------------------------------------------------------------------
// Silent I/O cleanup / closing
// ------------------------------------------------------------------------
/**
* Close the AutoCloseable objects and <b>ignore</b> any {@link Exception} or null pointers.
* Must only be used for cleanup in exception handlers.
*
* @param log
* the log to record problems to at debug level. Can be <code>null</code>.
* @param closeables
* the objects to close
*/
public static void cleanup(final Logger log, final AutoCloseable... closeables) {
for (AutoCloseable c : closeables) {
if (c != null) {
try {
c.close();
} catch (Exception e) {
if ((log != null) && log.isDebugEnabled()) {
log.debug("Exception in closing " + c, e);
}
}
}
}
} | 3.26 |
flink_IOUtils_tryReadFully_rdh | /**
* Similar to {@link #readFully(InputStream, byte[], int, int)}. Returns the total number of
* bytes read into the buffer.
*
* @param in
* The InputStream to read from
* @param buf
* The buffer to fill
* @return The total number of bytes read into the buffer
* @throws IOException
* If the first byte cannot be read for any reason other than end of file,
* or if the input stream has been closed, or if some other I/O error occurs.
*/
public static int tryReadFully(final InputStream in, final byte[] buf) throws IOException {
int totalRead = 0;
while (totalRead != buf.length) {
int read = in.read(buf, totalRead, buf.length - totalRead);
if (read == (-1)) {
break;
}
totalRead += read;
}
return totalRead;
} | 3.26 |
flink_IOUtils_closeAllQuietly_rdh | /**
* Closes all elements in the iterable with closeQuietly().
*/
public static void closeAllQuietly(Iterable<? extends AutoCloseable> closeables) {
if (null
!= closeables) {
for (AutoCloseable closeable : closeables) {
closeQuietly(closeable);
}
}
} | 3.26 |
flink_IOUtils_closeStream_rdh | /**
* Closes the stream ignoring {@link IOException}. Must only be called in cleaning up from
* exception handlers.
*
* @param stream
* the stream to close
*/
public static void
closeStream(final Closeable stream) {
cleanup(null, stream);
} | 3.26 |
flink_IOUtils_copyBytes_rdh | /**
* Copies from one stream to another.
*
* @param in
* InputStream to read from
* @param out
* OutputStream to write to
* @param close
* whether or not close the InputStream and OutputStream at the end. The streams
* are closed in the finally clause.
* @throws IOException
* thrown if an I/O error occurs while copying
*/
public static void copyBytes(final InputStream in, final OutputStream out, final boolean close) throws IOException {
copyBytes(in, out, BLOCKSIZE, close);
} | 3.26 |
flink_IOUtils_readFully_rdh | // ------------------------------------------------------------------------
// Stream input skipping
// ------------------------------------------------------------------------
/**
* Reads len bytes in a loop.
*
* @param in
* The InputStream to read from
* @param buf
* The buffer to fill
* @param off
* offset from the buffer
* @param len
* the length of bytes to read
* @throws IOException
* if it could not read requested number of bytes for any reason (including
* EOF)
*/
public static void readFully(final InputStream in, final byte[] buf, int off, final int len) throws IOException {
int toRead = len;
while (toRead > 0) {
final int ret = in.read(buf, off, toRead);
if (ret < 0) {
throw new IOException("Premature EOF from inputStream");
}
toRead -= ret;
off += ret;
}
} | 3.26 |
flink_HiveParserQB_setTabAlias_rdh | /**
* Maintain table alias -> (originTableName, qualifiedName).
*
* @param alias
* table alias
* @param originTableName
* table name that be actually specified, may be "table", "db.table",
* "catalog.db.table"
* @param qualifiedName
* table name with full path, always is "catalog.db.table"
*/
public void setTabAlias(String alias, String originTableName,
String qualifiedName) {aliasToTabsOriginName.put(alias.toLowerCase(), originTableName.toLowerCase());
aliasToTabs.put(alias.toLowerCase(), qualifiedName);
} | 3.26 |
flink_HiveParserQB_containsQueryWithoutSourceTable_rdh | /**
* returns true, if the query block contains any query, or subquery without a source table. Like
* select current_user(), select current_database()
*
* @return true, if the query block contains any query without a source table
*/
public boolean containsQueryWithoutSourceTable() {
for (HiveParserQBExpr qbexpr : aliasToSubq.values()) {
if (qbexpr.containsQueryWithoutSourceTable()) {
return true;
}
}
return (aliasToTabs.size() == 0) && (aliasToSubq.size() == 0);
} | 3.26 |
flink_HiveParserQB_getSkewedColumnNames_rdh | /**
* Retrieve skewed column name for a table.
*/
public List<String> getSkewedColumnNames(String alias) {
// currently, skew column means nothing for flink, so we just return an empty list.
return Collections.emptyList();
} | 3.26 |
flink_HiveParserQB_getAppendedAliasFromId_rdh | // For sub-queries, the id. and alias should be appended since same aliases can be re-used
// within different sub-queries.
// For a query like:
// select ...
// (select * from T1 a where ...) subq1
// join
// (select * from T2 a where ...) subq2
// ..
// the alias is modified to subq1:a and subq2:a from a, to identify the right sub-query.
private static String getAppendedAliasFromId(String outerId, String alias) {return outerId == null ? alias : (outerId + ":") + alias;
} | 3.26 |
flink_HiveParserQB_isSimpleSelectQuery_rdh | // to find target for fetch task conversion optimizer (not allows subqueries)
public boolean isSimpleSelectQuery() {
if (((!qbp.isSimpleSelectQuery()) || isCTAS()) || qbp.isAnalyzeCommand()) {
return false;
}
for (HiveParserQBExpr qbexpr : aliasToSubq.values()) {
if (!qbexpr.isSimpleSelectQuery()) {
return false;
}
}
return true;
} | 3.26 |
flink_TaskMailboxImpl_put_rdh | // ------------------------------------------------------------------------------------------------------------------
@Override
public void put(@Nonnull
Mail
mail) {final ReentrantLock lock = this.lock;
lock.lock();
try {
checkPutStateConditions();
queue.addLast(mail);
hasNewMail = true;
notEmpty.signal();
} finally {
lock.unlock();
}
} | 3.26 |
flink_TaskMailboxImpl_createBatch_rdh | // ------------------------------------------------------------------------------------------------------------------
@Override
public boolean createBatch() {
checkIsMailboxThread();
if (!hasNewMail) {
// batch is usually depleted by previous MailboxProcessor#runMainLoop
// however, putFirst may add a message directly to the batch if called from mailbox
// thread
return !batch.isEmpty();
}
final ReentrantLock lock = this.lock;
lock.lock();
try {
Mail mail;
while ((mail = queue.pollFirst()) != null) {
batch.addLast(mail);
}
hasNewMail = false;
return !batch.isEmpty();
} finally {
lock.unlock();
}
} | 3.26 |
flink_FsJobArchivist_getArchivedJsons_rdh | /**
* Reads the given archive file and returns a {@link Collection} of contained {@link ArchivedJson}.
*
* @param file
* archive to extract
* @return collection of archived jsons
* @throws IOException
* if the file can't be opened, read or doesn't contain valid json
*/
public static Collection<ArchivedJson> getArchivedJsons(Path file) throws IOException {
try (FSDataInputStream input = file.getFileSystem().open(file);ByteArrayOutputStream output = new ByteArrayOutputStream()) {
IOUtils.copyBytes(input, output);
try {
JsonNode archive = mapper.readTree(output.toByteArray());Collection<ArchivedJson> archives = new ArrayList<>();
for (JsonNode archivePart
: archive.get(ARCHIVE)) {
String path = archivePart.get(PATH).asText();
String json = archivePart.get(JSON).asText();
archives.add(new ArchivedJson(path, json));
}
return archives;
} catch (NullPointerException npe)
{
// occurs if the archive is empty or any of the expected fields are not present
throw new IOException(("Job archive (" + file.getPath()) + ") did not conform to expected format.");
}
}
} | 3.26 |
flink_FsJobArchivist_archiveJob_rdh | /**
* Writes the given {@link AccessExecutionGraph} to the {@link FileSystem} pointed to by {@link JobManagerOptions#ARCHIVE_DIR}.
*
* @param rootPath
* directory to which the archive should be written to
* @param jobId
* job id
* @param jsonToArchive
* collection of json-path pairs to that should be archived
* @return path to where the archive was written, or null if no archive was created
* @throws IOException
*/
public static Path archiveJob(Path rootPath, JobID jobId, Collection<ArchivedJson> jsonToArchive) throws IOException {
try {
FileSystem fs = rootPath.getFileSystem();
Path path = new Path(rootPath, jobId.toString());
OutputStream out = fs.create(path, WriteMode.NO_OVERWRITE);
try (JsonGenerator gen = jacksonFactory.createGenerator(out, JsonEncoding.UTF8)) {
gen.writeStartObject();
gen.writeArrayFieldStart(ARCHIVE);
for (ArchivedJson archive : jsonToArchive) {
gen.writeStartObject();
gen.writeStringField(PATH, archive.getPath());
gen.writeStringField(JSON, archive.getJson());
gen.writeEndObject();
}
gen.writeEndArray();
gen.writeEndObject();
} catch (Exception e) {
fs.delete(path, false);
throw e;
}
LOG.info("Job {} has been archived at {}.", jobId, path);return path;
} catch (IOException
e) {
LOG.error("Failed to archive job.", e);
throw e;
}
} | 3.26 |
flink_OptimizerNode_areBranchCompatible_rdh | /**
* Checks whether to candidate plans for the sub-plan of this node are comparable. The two
* alternative plans are comparable, if
*
* <p>a) There is no branch in the sub-plan of this node b) Both candidates have the same
* candidate as the child at the last open branch.
*
* @param plan1
* The root node of the first candidate plan.
* @param plan2
* The root node of the second candidate plan.
* @return True if the nodes are branch compatible in the inputs.
*/
protected boolean areBranchCompatible(PlanNode plan1, PlanNode plan2) {
if ((plan1 == null)
|| (plan2 == null)) {
throw new NullPointerException();
}
// if there is no open branch, the children are always compatible.
// in most plans, that will be the dominant case
if ((this.hereJoinedBranches == null) || this.hereJoinedBranches.isEmpty()) {
return true;
}
for (OptimizerNode joinedBrancher
: hereJoinedBranches) {
final PlanNode branch1Cand = plan1.getCandidateAtBranchPoint(joinedBrancher);
final PlanNode branch2Cand = plan2.getCandidateAtBranchPoint(joinedBrancher);
if (((branch1Cand != null) && (branch2Cand != null)) && (branch1Cand != branch2Cand)) {
return false;
}
}
return true;
} | 3.26 |
flink_OptimizerNode_initId_rdh | /**
* Sets the ID of this node.
*
* @param id
* The id for this node.
*/
public void initId(int id) {
if (id <= 0) {
throw new IllegalArgumentException();
}
if (this.id == (-1)) {
this.id = id;
} else {
throw new IllegalStateException("Id has already been initialized.");
}
} | 3.26 |
flink_OptimizerNode_readStubAnnotations_rdh | // ------------------------------------------------------------------------
// Reading of stub annotations
// ------------------------------------------------------------------------
/**
* Reads all stub annotations, i.e. which fields remain constant, what cardinality bounds the
* functions have, which fields remain unique.
*/
protected void readStubAnnotations() {
readUniqueFieldsAnnotation();
} | 3.26 |
flink_OptimizerNode_hasUnclosedBranches_rdh | // --------------------------------------------------------------------------------------------
// Handling of branches
// --------------------------------------------------------------------------------------------
public boolean hasUnclosedBranches() {
return (this.openBranches != null) && (!this.openBranches.isEmpty());
} | 3.26 |
flink_OptimizerNode_setParallelism_rdh | /**
* Sets the parallelism for this optimizer node. The parallelism denotes how many parallel
* instances of the operator will be spawned during the execution.
*
* @param parallelism
* The parallelism to set. If this value is {@link ExecutionConfig#PARALLELISM_DEFAULT} then the system will take the default number of
* parallel instances.
* @throws IllegalArgumentException
* If the parallelism is smaller than one.
*/
public void setParallelism(int parallelism) {if ((parallelism < 1) && (parallelism != ExecutionConfig.PARALLELISM_DEFAULT)) {
throw new IllegalArgumentException(("Parallelism of " + parallelism) + " is invalid.");
}
this.parallelism = parallelism;
} | 3.26 |
flink_OptimizerNode_getUniqueFields_rdh | // ------------------------------------------------------------------------
// Access of stub annotations
// ------------------------------------------------------------------------
/**
* Gets the FieldSets which are unique in the output of the node.
*/
public Set<FieldSet> getUniqueFields() {
return this.uniqueFields
== null ? Collections.<FieldSet>emptySet() : this.uniqueFields;
} | 3.26 |
flink_OptimizerNode_getBroadcastConnectionNames_rdh | /**
* Return the list of names associated with broadcast inputs for this node.
*/
public List<String> getBroadcastConnectionNames() {
return this.broadcastConnectionNames;
} | 3.26 |
flink_OptimizerNode_setBroadcastInputs_rdh | /**
* This function connects the operators that produce the broadcast inputs to this operator.
*
* @param operatorToNode
* The map from program operators to optimizer nodes.
* @param defaultExchangeMode
* The data exchange mode to use, if the operator does not specify
* one.
* @throws CompilerException
*/
public void setBroadcastInputs(Map<Operator<?>, OptimizerNode> operatorToNode, ExecutionMode defaultExchangeMode) {
// skip for Operators that don't support broadcast variables
if (!(getOperator() instanceof AbstractUdfOperator<?, ?>)) {
return;
}// get all broadcast inputs
AbstractUdfOperator<?, ?> operator = ((AbstractUdfOperator<?, ?>) (getOperator()));
// create connections and add them
for (Map.Entry<String, Operator<?>> input : operator.getBroadcastInputs().entrySet()) {
OptimizerNode predecessor = operatorToNode.get(input.getValue());
DagConnection connection = new DagConnection(predecessor, this,
ShipStrategyType.BROADCAST, defaultExchangeMode);
addBroadcastConnection(input.getKey(), connection);
predecessor.addOutgoingConnection(connection);
}
} | 3.26 |
flink_OptimizerNode_getOperator_rdh | /**
* Gets the operator represented by this optimizer node.
*
* @return This node's operator.
*/
public Operator<?> getOperator() {
return this.operator;
} | 3.26 |
flink_OptimizerNode_prunePlanAlternatives_rdh | // --------------------------------------------------------------------------------------------
// Pruning
// --------------------------------------------------------------------------------------------
protected void prunePlanAlternatives(List<PlanNode> plans) {
if (plans.isEmpty()) {
throw new CompilerException(("No plan meeting the requirements could be created @ " + this) + ". Most likely reason: Too restrictive plan hints.");
}
// shortcut for the simple case
if (plans.size() == 1) {
return;
}
// we can only compare plan candidates that made equal choices
// at the branching points. for each choice at a branching point,
// we need to keep the cheapest (wrt. interesting properties).
// if we do not keep candidates for each branch choice, we might not
// find branch compatible candidates when joining the branches back.
// for pruning, we are quasi AFTER the node, so in the presence of
// branches, we need form the per-branch-choice groups by the choice
// they made at the latest un-joined branching node. Note that this is
// different from the check for branch compatibility of candidates, as
// this happens on the input sub-plans and hence BEFORE the node (therefore
// it is relevant to find the latest (partially) joined branch point.
if ((this.openBranches == null) || this.openBranches.isEmpty()) {
prunePlanAlternativesWithCommonBranching(plans);
} else {
// partition the candidates into groups that made the same sub-plan candidate
// choice at the latest unclosed branch point
final OptimizerNode[] branchDeterminers = new OptimizerNode[this.openBranches.size()];
for (int i = 0; i < branchDeterminers.length; i++) {
branchDeterminers[i] = this.openBranches.get((this.openBranches.size() - 1) - i).getBranchingNode();
}
// this sorter sorts by the candidate choice at the branch point
Comparator<PlanNode> sorter = new Comparator<PlanNode>() {
@Override
public int m2(PlanNode o1, PlanNode o2) {
for (OptimizerNode branchDeterminer : branchDeterminers) {
PlanNode n1 = o1.getCandidateAtBranchPoint(branchDeterminer);
PlanNode n2 = o2.getCandidateAtBranchPoint(branchDeterminer);
int hash1 = System.identityHashCode(n1);
int hash2 = System.identityHashCode(n2);
if (hash1 != hash2) {
return hash1 - hash2;
} }
return 0;
}
};
Collections.sort(plans, sorter);
List<PlanNode> result = new ArrayList<PlanNode>(); List<PlanNode> turn = new ArrayList<PlanNode>();
final PlanNode[]
determinerChoice = new PlanNode[branchDeterminers.length];
while (!plans.isEmpty()) {
// take one as the determiner
turn.clear();
PlanNode determiner = plans.remove(plans.size() - 1);
turn.add(determiner);
for (int i = 0; i < determinerChoice.length; i++) {
determinerChoice[i] = determiner.getCandidateAtBranchPoint(branchDeterminers[i]);
}
// go backwards through the plans and find all that are equal
boolean stillEqual = true;
for (int k = plans.size() - 1; (k >= 0) && stillEqual; k--) {
PlanNode toCheck = plans.get(k);
for (int i = 0; i < branchDeterminers.length; i++) {
PlanNode checkerChoice = toCheck.getCandidateAtBranchPoint(branchDeterminers[i]);
if (checkerChoice != determinerChoice[i]) {
// not the same anymore
stillEqual = false;
break;
}
}
if (stillEqual) {
// the same
plans.remove(k);
turn.add(toCheck);
}
} // now that we have only plans with the same branch alternatives, prune!
if (turn.size() > 1) {
prunePlanAlternativesWithCommonBranching(turn);
}
result.addAll(turn);
}
// after all turns are complete
plans.clear();
plans.addAll(result);
}
} | 3.26 |
flink_OptimizerNode_computeOutputEstimates_rdh | /**
* Causes this node to compute its output estimates (such as number of rows, size in bytes)
* based on the inputs and the compiler hints. The compiler hints are instantiated with
* conservative default values which are used if no other values are provided. Nodes may access
* the statistics to determine relevant information.
*
* @param statistics
* The statistics object which may be accessed to get statistical information.
* The parameter may be null, if no statistics are available.
*/
public void computeOutputEstimates(DataStatistics
statistics) {
// sanity checking
for (DagConnection c : getIncomingConnections()) {
if (c.getSource() == null) {
throw new CompilerException("Bug: Estimate computation called before inputs have been set.");
}
}
// let every operator do its computation
computeOperatorSpecificDefaultEstimates(statistics);
if (this.estimatedOutputSize < 0) {
this.estimatedOutputSize = -1;
}
if (this.estimatedNumRecords < 0) {
this.estimatedNumRecords = -1;
}// overwrite default estimates with hints, if given
if ((getOperator() == null) || (getOperator().getCompilerHints() == null)) {
return;
}
CompilerHints hints = getOperator().getCompilerHints();if (hints.getOutputSize() >= 0) {
this.estimatedOutputSize = hints.getOutputSize();
}
if
(hints.getOutputCardinality() >= 0) {
this.estimatedNumRecords = hints.getOutputCardinality();
}
if (hints.getFilterFactor()
>= 0.0F) {
if (this.estimatedNumRecords >= 0) {this.estimatedNumRecords = ((long) (this.estimatedNumRecords * hints.getFilterFactor()));
if (this.estimatedOutputSize >= 0) {
this.estimatedOutputSize = ((long) (this.estimatedOutputSize * hints.getFilterFactor()));
}
} else if (this instanceof SingleInputNode) {
OptimizerNode pred = ((SingleInputNode) (this)).getPredecessorNode();
if ((pred != null) && (pred.getEstimatedNumRecords() >= 0)) {
this.estimatedNumRecords = ((long) (pred.getEstimatedNumRecords() * hints.getFilterFactor()));
}
}
}
// use the width to infer the cardinality (given size) and vice versa
if (hints.getAvgOutputRecordSize() >= 1) {
// the estimated number of rows based on size
if ((this.estimatedNumRecords == (-1)) && (this.estimatedOutputSize >= 0)) {
this.estimatedNumRecords = ((long) (this.estimatedOutputSize / hints.getAvgOutputRecordSize()));
} else if ((this.estimatedOutputSize == (-1)) && (this.estimatedNumRecords
>= 0)) {
this.estimatedOutputSize = ((long) (this.estimatedNumRecords * hints.getAvgOutputRecordSize()));
}
}
} | 3.26 |
flink_OptimizerNode_getInterestingProperties_rdh | /**
* Gets the properties that are interesting for this node to produce.
*
* @return The interesting properties for this node, or null, if not yet computed.
*/
public InterestingProperties getInterestingProperties() {
return this.intProps;
} | 3.26 |
flink_OptimizerNode_computeUnionOfInterestingPropertiesFromSuccessors_rdh | /**
* Computes all the interesting properties that are relevant to this node. The interesting
* properties are a union of the interesting properties on each outgoing connection. However, if
* two interesting properties on the outgoing connections overlap, the interesting properties
* will occur only once in this set. For that, this method deduplicates and merges the
* interesting properties. This method returns copies of the original interesting properties
* objects and leaves the original objects, contained by the connections, unchanged.
*/
public void computeUnionOfInterestingPropertiesFromSuccessors() {
List<DagConnection> conns = getOutgoingConnections();if (conns.size() == 0) {
// no incoming, we have none ourselves
this.intProps = new InterestingProperties();
} else {
this.intProps = conns.get(0).getInterestingProperties().clone();
for (int
i = 1; i < conns.size();
i++) {
this.intProps.addInterestingProperties(conns.get(i).getInterestingProperties());
}
}
this.intProps.dropTrivials();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.