name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ExecEdge_hashShuffle_rdh | /**
* Return hash {@link Shuffle}.
*
* @param keys
* hash keys
*/
public static Shuffle hashShuffle(int[] keys) {
return new HashShuffle(keys);
} | 3.26 |
flink_ExecEdge_translateToPlan_rdh | /**
* Translates this edge into a Flink operator.
*
* @param planner
* The {@link Planner} of the translated Table.
*/
public Transformation<?> translateToPlan(Planner planner) {
return source.translateToPlan(planner);
} | 3.26 |
flink_RowType_validateFields_rdh | // --------------------------------------------------------------------------------------------
private static void validateFields(List<RowField> fields) {
final List<String> fieldNames = fields.stream().map(f -> f.name).collect(Collectors.toList());if (fieldNames.stream().anyMatch(StringUtils::isNullOrWhitespaceOnly)) {
throw new ValidationException("Field names must contain at least one non-whitespace character.");
}
final Set<String> duplicates = fieldNames.stream().filter(n -> Collections.frequency(fieldNames, n) > 1).collect(Collectors.toSet());
if (!duplicates.isEmpty()) {
throw new ValidationException(String.format("Field names must be unique. Found duplicates: %s", duplicates));
}
} | 3.26 |
flink_BinaryHashBucketArea_startLookup_rdh | /**
* Probe start lookup joined build rows.
*/
void startLookup(int hashCode) {
final int posHashCode = findBucket(hashCode);
// get the bucket for the given hash code
final int bucketArrayPos = posHashCode >> table.bucketsPerSegmentBits;
final int bucketInSegmentOffset = (posHashCode &
table.bucketsPerSegmentMask) << BUCKET_SIZE_BITS;
final MemorySegment bucket = this.buckets[bucketArrayPos];
table.bucketIterator.set(bucket,
overflowSegments, partition, hashCode, bucketInSegmentOffset);
} | 3.26 |
flink_BinaryHashBucketArea_buildBloomFilterAndFree_rdh | /**
* Three situations: 1.Not use bloom filter, just free memory. 2.In rehash, free new memory and
* let rehash go build bloom filter from old memory. 3.Not in rehash and use bloom filter, build
* it and free memory.
*/
void buildBloomFilterAndFree() {
if (inReHash || (!table.useBloomFilters)) {
freeMemory();
} else {
buildBloomFilterAndFree(buckets, numBuckets, overflowSegments);
}
} | 3.26 |
flink_BinaryHashBucketArea_insertToBucket_rdh | /**
* Insert into bucket by hashCode and pointer.
*
* @return return false when spill own partition.
*/
boolean insertToBucket(int hashCode, int pointer, boolean sizeAddAndCheckResize) throws IOException {
final int posHashCode = findBucket(hashCode);
// get the bucket for the given hash code
final int v47 = posHashCode >> table.bucketsPerSegmentBits;
final int bucketInSegmentPos = (posHashCode & table.bucketsPerSegmentMask) << BUCKET_SIZE_BITS;
final MemorySegment bucket = this.buckets[v47];
return insertToBucket(bucket, bucketInSegmentPos, hashCode, pointer, sizeAddAndCheckResize);
} | 3.26 |
flink_BinaryHashBucketArea_appendRecordAndInsert_rdh | /**
* Append record and insert to bucket.
*/
boolean appendRecordAndInsert(BinaryRowData record, int hashCode) throws IOException {
final int posHashCode = findBucket(hashCode);
// get the bucket for the given hash code
final int bucketArrayPos = posHashCode >> table.bucketsPerSegmentBits;
final int bucketInSegmentPos = (posHashCode &
table.bucketsPerSegmentMask) << BUCKET_SIZE_BITS;final MemorySegment v53 = this.buckets[bucketArrayPos];
if (((!table.tryDistinctBuildRow) || (!partition.isInMemory())) || (!findFirstSameBuildRow(v53, hashCode, bucketInSegmentPos, record))) {
int pointer = partition.insertIntoBuildBuffer(record);
if (pointer != (-1)) {
// record was inserted into an in-memory partition. a pointer must be inserted into
// the buckets
insertToBucket(v53, bucketInSegmentPos, hashCode, pointer, true);
return true;
} else {
return false;
}
} else {
// distinct build rows in memory.
return true;
}
} | 3.26 |
flink_BinaryHashBucketArea_findFirstSameBuildRow_rdh | /**
* For distinct build.
*/
private boolean findFirstSameBuildRow(MemorySegment bucket, int searchHashCode, int bucketInSegmentOffset, BinaryRowData buildRowToInsert) {
int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
int countInBucket = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
int numInBucket = 0;
RandomAccessInputView view = partition.getBuildStageInputView();
while (countInBucket != 0) {
while (numInBucket < countInBucket) {
final int thisCode = bucket.getInt(posInSegment);
posInSegment += HASH_CODE_LEN;
if (thisCode == searchHashCode) {
final int pointer = bucket.getInt((bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET) + (numInBucket * POINTER_LEN));
numInBucket++;
try {
view.setReadPosition(pointer);
BinaryRowData row = table.binaryBuildSideSerializer.mapFromPages(table.reuseBuildRow, view);
if (buildRowToInsert.equals(row)) {
return true;
}
} catch (IOException e) {
throw new RuntimeException("Error deserializing key or value from the hashtable: " + e.getMessage(), e);}
} else {
numInBucket++;
}
}
// this segment is done. check if there is another chained bucket
final int forwardPointer = bucket.getInt(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
return false;
}
final int overflowSegIndex = forwardPointer >>> table.segmentSizeBits;
bucket = overflowSegments[overflowSegIndex];
bucketInSegmentOffset = forwardPointer & table.segmentSizeMask;
countInBucket = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET);posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
numInBucket = 0;
}
return false;
} | 3.26 |
flink_SimpleCounter_inc_rdh | /**
* Increment the current count by the given value.
*
* @param n
* value to increment the current count by
*/
@Override
public void inc(long n) {
count += n;
} | 3.26 |
flink_SimpleCounter_dec_rdh | /**
* Decrement the current count by the given value.
*
* @param n
* value to decrement the current count by
*/
@Override
public void dec(long n) {
count -= n;
} | 3.26 |
flink_ColumnOperationUtils_addOrReplaceColumns_rdh | /**
* Creates a projection list that adds new or replaces existing (if a column with corresponding
* name already exists) columns.
*
* <p><b>NOTE:</b> Resulting expression are still unresolved.
*
* @param inputFields
* names of current columns
* @param newExpressions
* new columns to add
* @return projection expressions
*/
static List<Expression> addOrReplaceColumns(List<String> inputFields, List<Expression> newExpressions) {
LinkedHashMap<String, Expression> finalFields = new LinkedHashMap<>();
inputFields.forEach(field -> finalFields.put(field, unresolvedRef(field)));
newExpressions.forEach(expr -> {
String name = extractName(expr).orElse(expr.toString());
finalFields.put(name, expr);
});
return new ArrayList<>(finalFields.values());
} | 3.26 |
flink_ColumnOperationUtils_dropFields_rdh | /**
* Creates a projection list that removes given columns.
*
* <p><b>NOTE:</b> Resulting expression are still unresolved.
*
* @param inputFields
* names of current columns
* @param dropExpressions
* columns to remove
* @return projection expressions
*/static List<Expression> dropFields(List<String> inputFields, List<Expression> dropExpressions) {
Set<String> columnsToDrop = dropExpressions.stream().map(expr -> expr.accept(dropColumnsExtractor)).collect(Collectors.toSet());
columnsToDrop.forEach(c -> {
if (!inputFields.contains(c)) {
throw new ValidationException(format("Field %s does not exist in source table",
c));
}
});return inputFields.stream().filter(oldName -> !columnsToDrop.contains(oldName)).map(ApiExpressionUtils::unresolvedRef).collect(Collectors.toList());} | 3.26 |
flink_DefaultFileWriterBucketFactory_getNewBucket_rdh | /**
* A factory returning {@link FileWriter writer}.
*/
| 3.26 |
flink_MessageHeaders_getCustomHeaders_rdh | /**
* Returns a collection of custom HTTP headers.
*
* <p>This default implementation returns an empty list. Override this method to provide custom
* headers if needed.
*
* @return a collection of custom {@link HttpHeaders}, empty by default.
*/
default Collection<HttpHeader> getCustomHeaders() {return Collections.emptyList();
} | 3.26 |
flink_MessageHeaders_operationId_rdh | /**
* Returns a short description for this header suitable for method code generation.
*
* @return short description
*/
default String operationId() {
final String className = getClass().getSimpleName();
if (getHttpMethod() != HttpMethodWrapper.GET) {
throw new UnsupportedOperationException(("The default implementation is only supported for GET calls. Please override 'operationId()' in '" + className) + "'.");
}
final int headersSuffixStart = className.lastIndexOf("Headers");
if (headersSuffixStart == (-1)) {
throw new IllegalStateException(("Expect name of class " + getClass()) + " to end on 'Headers'. Please rename the class or override 'operationId()'.");
}return getHttpMethod().name().toLowerCase(Locale.ROOT) + className.substring(0, headersSuffixStart);
} | 3.26 |
flink_KvStateInternalRequest_deserializeMessage_rdh | /**
* A {@link MessageDeserializer deserializer} for {@link KvStateInternalRequest}.
*/public static class KvStateInternalRequestDeserializer implements MessageDeserializer<KvStateInternalRequest> {
@Override
public KvStateInternalRequest
deserializeMessage(ByteBuf buf) {
KvStateID kvStateId = new KvStateID(buf.readLong(), buf.readLong());
int length = buf.readInt();
Preconditions.checkArgument(length >= 0, "Negative length for key and namespace. " + "This indicates a serialization error.");byte[] serializedKeyAndNamespace = new byte[length];
if (length > 0) {
buf.readBytes(serializedKeyAndNamespace);
}
return new KvStateInternalRequest(kvStateId, serializedKeyAndNamespace);
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_getNextOffsetToLoad_rdh | /**
* Returns Long.MAX_VALUE if it shouldn't load.
*/
private long getNextOffsetToLoad() {
int bufferIndex = bufferIndexManager.getNextToLoad();
if (bufferIndex < 0) {return Long.MAX_VALUE;
} else {
return cachedRegionManager.getFileOffset(bufferIndex);
}
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_compareTo_rdh | /**
* Provides priority calculation logic for io scheduler.
*/
@Override
public int compareTo(HsSubpartitionFileReader that) {
checkArgument(that instanceof HsSubpartitionFileReaderImpl);
return Long.compare(getNextOffsetToLoad(), ((HsSubpartitionFileReaderImpl) (that)).getNextOffsetToLoad());
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_prepareForScheduling_rdh | /**
* Refresh downstream consumption progress for another round scheduling of reading.
*/
@Override
public void prepareForScheduling() {
// Access the consuming offset with lock, to prevent loading any buffer released from the
// memory data manager that is already consumed.
int consumingOffset = operations.getConsumingOffset(true);
bufferIndexManager.updateLastConsumed(consumingOffset);
cachedRegionManager.updateConsumingOffset(consumingOffset);
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_updateCachedRegionIfNeeded_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
/**
* Points the cursors to the given buffer index, if possible.
*/
private void updateCachedRegionIfNeeded(int bufferIndex) {
if (isInCachedRegion(bufferIndex)) {
int numAdvance = bufferIndex - currentBufferIndex;
numSkip += numAdvance;
numReadable -= numAdvance;
currentBufferIndex = bufferIndex;
return;
}
Optional<HsFileDataIndex.ReadableRegion> lookupResultOpt = dataIndex.getReadableRegion(subpartitionId, bufferIndex, consumingOffset);if (!lookupResultOpt.isPresent()) {
currentBufferIndex = -1;
numReadable = 0;
numSkip = 0; offset = -1L;
} else {
HsFileDataIndex.ReadableRegion cachedRegion = lookupResultOpt.get();
currentBufferIndex = bufferIndex;
numSkip = cachedRegion.numSkip;
numReadable = cachedRegion.numReadable;
offset = cachedRegion.offset;
}
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_updateConsumingOffset_rdh | // ------------------------------------------------------------------------
// Called by HsSubpartitionFileReader
// ------------------------------------------------------------------------
public void updateConsumingOffset(int consumingOffset) {
this.consumingOffset = consumingOffset;
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_getFileOffset_rdh | /**
* Return Long.MAX_VALUE if region does not exist to giving the lowest priority.
*/private long getFileOffset(int bufferIndex) {
updateCachedRegionIfNeeded(bufferIndex);
return currentBufferIndex == (-1) ? Long.MAX_VALUE : offset;
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_readBuffers_rdh | /**
* Read subpartition data into buffers.
*
* <p>This transfers the ownership of used buffers to this class. It's this class'
* responsibility to release the buffers using the recycler when no longer needed.
*
* <p>Calling this method does not always use up all the provided buffers. It's this class'
* decision when to stop reading. Currently, it stops reading when: 1) buffers are used up, or
* 2) reaches the end of the subpartition data within the region, or 3) enough data have been
* read ahead the downstream consuming offset.
*/
@Override
public void readBuffers(Queue<MemorySegment> buffers, BufferRecycler recycler) throws IOException {
synchronized(f0) {
if (isReleased) {
return;
}
if (isFailed) {
throw new IOException("subpartition reader has already failed."); }
int v1 = bufferIndexManager.getNextToLoad();
if (v1 <
0) {
return;
}
// If lookup result is empty, it means that one the following things have happened:
// 1) The target buffer has not been spilled into disk.
// 2) The target buffer has not been released from memory.
// So, just skip this round reading.
int numRemainingBuffer = cachedRegionManager.getRemainingBuffersInRegion(v1);
if (numRemainingBuffer == 0) {
return;}
moveFileOffsetToBuffer(v1);
int indexToLoad;
int numLoaded = 0;
while (((!buffers.isEmpty())
&& ((indexToLoad = bufferIndexManager.getNextToLoad()) >= 0)) && ((numRemainingBuffer--) >
0)) {
MemorySegment segment = buffers.poll();
Buffer buffer;
try {
if ((buffer = readFromByteChannel(dataFileChannel, headerBuf, segment, recycler)) == null) {
buffers.add(segment);
break;
} } catch (Throwable throwable) {
buffers.add(segment);
throw throwable;
}
tryIncreaseBacklog(buffer);
loadedBuffers.add(BufferIndexOrError.newBuffer(buffer, indexToLoad));
bufferIndexManager.updateLastLoaded(indexToLoad);
cachedRegionManager.advance(buffer.readableBytes() + BufferReaderWriterUtil.HEADER_LENGTH);
++numLoaded;
}
if (loadedBuffers.size() <= numLoaded) {
operations.notifyDataAvailable();
}
}
} | 3.26 |
flink_HsSubpartitionFileReaderImpl_getNextToLoad_rdh | /**
* Returns a negative value if shouldn't load.
*/
private int getNextToLoad() {
int nextToLoad = Math.max(lastLoaded, lastConsumed) + 1;
int maxToLoad = lastConsumed + maxBuffersReadAhead; return nextToLoad <= maxToLoad ? nextToLoad : -1;
}
}
/**
* Maintains a set of cursors on the last fetched readable region.
*
* <p>The semantics are:
*
* <ol>
* <li>The offset of the buffer with {@code currentBufferIndex} in file can be derived by
* starting from {@code offset} and skipping {@code numSkip} buffers.
* <li>The {@code numReadable} | 3.26 |
flink_QueryableStateStream_getKeySerializer_rdh | /**
* Returns the key serializer for the queryable state instance.
*
* @return Key serializer for the state instance.
*/
public TypeSerializer<K> getKeySerializer() {
return keySerializer;} | 3.26 |
flink_PbSchemaValidationUtils_validateTypeMatch_rdh | /**
* Validate type match of general type.
*
* @param fd
* the {@link Descriptors.Descriptor} of the protobuf object.
* @param logicalType
* the corresponding {@link LogicalType} to the {@link FieldDescriptor}
*/
private static void validateTypeMatch(FieldDescriptor fd, LogicalType logicalType) {
if (!fd.isRepeated()) {
if (fd.getJavaType() != JavaType.MESSAGE) {
// simple type
m0(fd, logicalType.getTypeRoot());
} else {
// message type
if (!(logicalType instanceof RowType)) {
throw new ValidationException(("Unexpected LogicalType: " + logicalType) + ". It should be RowType");
}
validateTypeMatch(fd.getMessageType(),
((RowType) (logicalType)));
}
} else if (fd.isMapField()) { // map type
if (!(logicalType instanceof MapType)) {
throw new ValidationException(("Unexpected LogicalType: " + logicalType) + ". It should be MapType");
}
MapType mapType = ((MapType) (logicalType));
m0(fd.getMessageType().findFieldByName(PbConstant.PB_MAP_KEY_NAME), mapType.getKeyType().getTypeRoot());validateTypeMatch(fd.getMessageType().findFieldByName(PbConstant.PB_MAP_VALUE_NAME), mapType.getValueType());
} else {
// array type
if (!(logicalType instanceof ArrayType)) {
throw new ValidationException(("Unexpected LogicalType: " + logicalType) + ". It should be ArrayType");
}
ArrayType
arrayType = ((ArrayType) (logicalType));
if (fd.getJavaType() == JavaType.MESSAGE) {
// array message type
LogicalType elementType = arrayType.getElementType();
if (!(elementType instanceof RowType)) {
throw new ValidationException(("Unexpected logicalType: " + elementType) + ". It should be RowType");
}
validateTypeMatch(fd.getMessageType(), ((RowType) (elementType)));
} else {
// array simple type
m0(fd, arrayType.getElementType().getTypeRoot());
}
}
} | 3.26 |
flink_PbSchemaValidationUtils_m0_rdh | /**
* Only validate type match for simple type like int, long, string, boolean.
*
* @param fd
* {@link FieldDescriptor} in proto descriptor
* @param logicalTypeRoot
* {@link LogicalTypeRoot} of row element
*/
private static void m0(FieldDescriptor fd, LogicalTypeRoot logicalTypeRoot)
{
if (!TYPE_MATCH_MAP.containsKey(fd.getJavaType())) {
throw new ValidationException("Unsupported protobuf java type: " + fd.getJavaType());
}
if (TYPE_MATCH_MAP.get(fd.getJavaType()).stream().noneMatch(x -> x == logicalTypeRoot)) {
throw new ValidationException((("Protobuf field type does not match column type, " + fd.getJavaType()) + "(protobuf) is not compatible of ") + logicalTypeRoot);
}
} | 3.26 |
flink_FlinkContainerTestEnvironment_getFlinkContainers_rdh | /**
* Get instance of Flink containers for cluster controlling.
*
* @return Flink cluster on Testcontainers
*/public FlinkContainers getFlinkContainers() {
return this.flinkContainers;
} | 3.26 |
flink_KubernetesJobGraphStoreUtil_jobIDToName_rdh | /**
* Convert a {@link JobID} to config map key. We will add prefix {@link Constants#JOB_GRAPH_STORE_KEY_PREFIX}.
*
* @param jobID
* job id
* @return a key to store job graph in the ConfigMap
*/
public String jobIDToName(JobID jobID) {
return JOB_GRAPH_STORE_KEY_PREFIX + jobID;
} | 3.26 |
flink_KubernetesJobGraphStoreUtil_nameToJobID_rdh | /**
* Convert a key in ConfigMap to {@link JobID}. The key is stored with prefix {@link Constants#JOB_GRAPH_STORE_KEY_PREFIX}.
*
* @param key
* job graph key in ConfigMap.
* @return the parsed {@link JobID}.
*/
public JobID nameToJobID(String key) {
return JobID.fromHexString(key.substring(JOB_GRAPH_STORE_KEY_PREFIX.length()));
} | 3.26 |
flink_DataStream_setConnectionType_rdh | /**
* Internal function for setting the partitioner for the DataStream.
*
* @param partitioner
* Partitioner to set.
* @return The modified DataStream.
*/
protected DataStream<T> setConnectionType(StreamPartitioner<T> partitioner) {
return new DataStream<>(this.getExecutionEnvironment(), new PartitionTransformation<>(this.getTransformation(), partitioner));
} | 3.26 |
flink_DataStream_rescale_rdh | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are distributed
* evenly to a subset of instances of the next operation in a round-robin fashion.
*
* <p>The subset of downstream operations to which the upstream operation sends elements depends
* on the degree of parallelism of both the upstream and downstream operation. For example, if
* the upstream operation has parallelism 2 and the downstream operation has parallelism 4, then
* one upstream operation would distribute elements to two downstream operations while the other
* upstream operation would distribute to the other two downstream operations. If, on the other
* hand, the downstream operation has parallelism 2 while the upstream operation has parallelism
* 4 then two upstream operations will distribute to one downstream operation while the other
* two upstream operations will distribute to the other downstream operations.
*
* <p>In cases where the different parallelisms are not multiples of each other one or several
* downstream operations will have a differing number of inputs from upstream operations.
*
* @return The DataStream with rescale partitioning set.
*/
@PublicEvolving
public DataStream<T> rescale() {
return setConnectionType(new RescalePartitioner<T>());
} | 3.26 |
flink_DataStream_printToErr_rdh | /**
* Writes a DataStream to the standard error stream (stderr).
*
* <p>For each element of the DataStream the result of {@link Object#toString()} is written.
*
* <p>NOTE: This will print to stderr on the machine where the code is executed, i.e. the Flink
* worker.
*
* @param sinkIdentifier
* The string to prefix the output with.
* @return The closed DataStream.
*/
@PublicEvolving public DataStreamSink<T> printToErr(String sinkIdentifier) {
PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, true);
return addSink(printFunction).name("Print to Std. Err");
}
/**
* Writes a DataStream to the file specified by path in text format.
*
* <p>For every element of the DataStream the result of {@link Object#toString()} is written.
*
* @param path
* The path pointing to the location the text file is written to.
* @return The closed DataStream.
* @deprecated Please use the {@link org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink} explicitly
using the {@link #addSink(SinkFunction)} | 3.26 |
flink_DataStream_addSink_rdh | /**
* Adds the given sink to this DataStream. Only streams with sinks added will be executed once
* the {@link StreamExecutionEnvironment#execute()} method is called.
*
* @param sinkFunction
* The object containing the sink's invoke function.
* @return The closed DataStream.
*/
public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
transformation.getOutputType();
// configure the type if needed
if (sinkFunction instanceof InputTypeConfigurable) {
((InputTypeConfigurable) (sinkFunction)).setInputType(getType(), getExecutionConfig());
}
return
DataStreamSink.forSinkFunction(this, clean(sinkFunction));
} | 3.26 |
flink_DataStream_map_rdh | /**
* Applies a Map transformation on a {@link DataStream}. The transformation calls a {@link MapFunction} for each element of the DataStream. Each MapFunction call returns exactly one
* element. The user can also extend {@link RichMapFunction} to gain access to other features
* provided by the {@link org.apache.flink.api.common.functions.RichFunction} interface.
*
* @param mapper
* The MapFunction that is called for each element of the DataStream.
* @param outputType
* {@link TypeInformation} for the result type of the function.
* @param <R>
* output type
* @return The transformed {@link DataStream}.
*/
public <R> SingleOutputStreamOperator<R> map(MapFunction<T, R> mapper, TypeInformation<R> outputType) {
return transform("Map", outputType, new StreamMap<>(clean(mapper)));
} | 3.26 |
flink_DataStream_filter_rdh | /**
* Applies a Filter transformation on a {@link DataStream}. The transformation calls a {@link FilterFunction} for each element of the DataStream and retains only those element for which
* the function returns true. Elements for which the function returns false are filtered. The
* user can also extend {@link RichFilterFunction} to gain access to other features provided by
* the {@link org.apache.flink.api.common.functions.RichFunction} interface.
*
* @param filter
* The FilterFunction that is called for each element of the DataStream.
* @return The filtered DataStream.
*/
public SingleOutputStreamOperator<T> filter(FilterFunction<T> filter) {
return transform("Filter", getType(), new StreamFilter<>(clean(filter)));
} | 3.26 |
flink_DataStream_shuffle_rdh | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are shuffled
* uniformly randomly to the next operation.
*
* @return The DataStream with shuffle partitioning set.
*/
@PublicEvolving
public DataStream<T> shuffle() {
return setConnectionType(new ShufflePartitioner<T>());
} | 3.26 |
flink_DataStream_project_rdh | /**
* Initiates a Project transformation on a {@link Tuple} {@link DataStream}.<br>
* <b>Note: Only Tuple DataStreams can be projected.</b>
*
* <p>The transformation projects each Tuple of the DataSet onto a (sub)set of fields.
*
* @param fieldIndexes
* The field indexes of the input tuples that are retained. The order of
* fields in the output tuple corresponds to the order of field indexes.
* @return The projected DataStream
* @see Tuple
* @see DataStream
*/
@PublicEvolving
public <R extends Tuple> SingleOutputStreamOperator<R> project(int... fieldIndexes) {
return new StreamProjection<>(this, fieldIndexes).projectTupleX();
} | 3.26 |
flink_DataStream_countWindowAll_rdh | /**
* Windows this {@code DataStream} into sliding count windows.
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* @param size
* The size of the windows in number of elements.
* @param slide
* The slide interval in number of elements.
*/
public AllWindowedStream<T, GlobalWindow> countWindowAll(long size, long slide) { return windowAll(GlobalWindows.create()).evictor(CountEvictor.of(size)).trigger(CountTrigger.of(slide));
} | 3.26 |
flink_DataStream_executeAndCollect_rdh | /**
* Triggers the distributed execution of the streaming dataflow and returns an iterator over the
* elements of the given DataStream.
*
* <p>The DataStream application is executed in the regular distributed manner on the target
* environment, and the events from the stream are polled back to this application process and
* thread through Flink's REST API.
*/
public List<T> executeAndCollect(String jobExecutionName, int limit) throws Exception {
Preconditions.checkState(limit > 0, "Limit must be greater than 0");
try (ClientAndIterator<T> clientAndIterator = executeAndCollectWithClient(jobExecutionName)) {
List<T> results = new ArrayList<>(limit);
while ((limit > 0) && clientAndIterator.iterator.hasNext()) {
results.add(clientAndIterator.iterator.next());
limit--;
}
return results;
}
} | 3.26 |
flink_DataStream_partitionCustom_rdh | // private helper method for custom partitioning
private <K> DataStream<T> partitionCustom(Partitioner<K> partitioner,
Keys<T> keys) {
KeySelector<T, K> keySelector = KeySelectorUtil.getSelectorForOneKey(keys, partitioner, getType(), getExecutionConfig());
return setConnectionType(new CustomPartitionerWrapper<>(clean(partitioner), clean(keySelector)));
} | 3.26 |
flink_DataStream_getPreferredResources_rdh | /**
* Gets the preferred resources for this operator.
*
* @return The preferred resources set for this operator.
*/ @PublicEvolving
public ResourceSpec getPreferredResources() {
return transformation.getPreferredResources();
} | 3.26 |
flink_DataStream_m1_rdh | /**
* Windows this {@code DataStream} into tumbling count windows.
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* @param size
* The size of the windows in number of elements.
*/
public AllWindowedStream<T, GlobalWindow> m1(long size) {
return windowAll(GlobalWindows.create()).trigger(PurgingTrigger.of(CountTrigger.of(size)));
} | 3.26 |
flink_DataStream_process_rdh | /**
* Applies the given {@link ProcessFunction} on the input stream, thereby creating a transformed
* output stream.
*
* <p>The function will be called for every element in the input streams and can produce zero or
* more output elements.
*
* @param processFunction
* The {@link ProcessFunction} that is called for each element in the
* stream.
* @param outputType
* {@link TypeInformation} for the result type of the function.
* @param <R>
* The type of elements emitted by the {@code ProcessFunction}.
* @return The transformed {@link DataStream}.
*/
@Internal
public <R> SingleOutputStreamOperator<R> process(ProcessFunction<T, R> processFunction, TypeInformation<R> outputType) {
ProcessOperator<T, R> operator = new ProcessOperator<>(clean(processFunction));
return transform("Process", outputType, operator);} | 3.26 |
flink_DataStream_flatMap_rdh | /**
* Applies a FlatMap transformation on a {@link DataStream}. The transformation calls a {@link FlatMapFunction} for each element of the DataStream. Each FlatMapFunction call can return any
* number of elements including none. The user can also extend {@link RichFlatMapFunction} to
* gain access to other features provided by the {@link org.apache.flink.api.common.functions.RichFunction} interface.
*
* @param flatMapper
* The FlatMapFunction that is called for each element of the DataStream
* @param outputType
* {@link TypeInformation} for the result type of the function.
* @param <R>
* output type
* @return The transformed {@link DataStream}.
*/
public <R> SingleOutputStreamOperator<R> flatMap(FlatMapFunction<T, R> flatMapper, TypeInformation<R> outputType) {
return transform("Flat Map", outputType, new StreamFlatMap<>(clean(flatMapper)));
} | 3.26 |
flink_DataStream_union_rdh | /**
* Creates a new {@link DataStream} by merging {@link DataStream} outputs of the same type with
* each other. The DataStreams merged using this operator will be transformed simultaneously.
*
* @param streams
* The DataStreams to union output with.
* @return The {@link DataStream}.
*/
@SafeVarargs
public final DataStream<T> union(DataStream<T>... streams) {
List<Transformation<T>> unionedTransforms = new ArrayList<>();
unionedTransforms.add(this.transformation);
for (DataStream<T> newStream : streams) {
if (!getType().equals(newStream.getType())) {
throw new IllegalArgumentException((("Cannot union streams of different types: " + getType()) + " and ") + newStream.getType());
}
unionedTransforms.add(newStream.getTransformation());
}
return new DataStream<>(this.environment, new UnionTransformation<>(unionedTransforms));
} | 3.26 |
flink_DataStream_getExecutionEnvironment_rdh | /**
* Returns the {@link StreamExecutionEnvironment} that was used to create this {@link DataStream}.
*
* @return The Execution Environment
*/
public StreamExecutionEnvironment getExecutionEnvironment() {
return environment;
} | 3.26 |
flink_DataStream_collectAsync_rdh | /**
* Sets up the collection of the elements in this {@link DataStream}, which can be retrieved
* later via the given {@link Collector}.
*
* <p>Caution: When multiple streams are being collected it is recommended to consume all
* streams in parallel to not back-pressure the job.
*
* <p>Caution: Closing the iterator from the collector cancels the job! It is recommended to
* close all iterators once you are no longer interested in any of the collected streams.
*
* <p>This method is functionally equivalent to {@link #collectAsync()}.
*
* <p>This method is meant to support use-cases where the application of a sink is done via a
* {@code Consumer<DataStream<T>>}, where it wouldn't be possible (or inconvenient) to return an
* iterator.
*
* @param collector
* a collector that can be used to retrieve the elements
*/
@Experimentalpublic void collectAsync(Collector<T> collector) {
TypeSerializer<T> serializer
= getType().createSerializer(getExecutionEnvironment().getConfig());
String
accumulatorName = "dataStreamCollect_" + UUID.randomUUID().toString();
StreamExecutionEnvironment env
= getExecutionEnvironment();
CollectSinkOperatorFactory<T> factory = new CollectSinkOperatorFactory<>(serializer, accumulatorName);
CollectSinkOperator<T> operator = ((CollectSinkOperator<T>) (factory.getOperator()));
long resultFetchTimeout = env.getConfiguration().get(AkkaOptions.ASK_TIMEOUT_DURATION).toMillis();
CollectResultIterator<T> iterator = new CollectResultIterator<>(operator.getOperatorIdFuture(), serializer, accumulatorName, env.getCheckpointConfig(), resultFetchTimeout);CollectStreamSink<T> sink = new CollectStreamSink<>(this, factory);
sink.name("Data stream collect sink");
env.addOperator(sink.getTransformation());
env.registerCollectIterator(iterator);
collector.setIterator(iterator);
}
/**
* This class acts as an accessor to elements collected via {@link #collectAsync(Collector)} | 3.26 |
flink_DataStream_windowAll_rdh | /**
* Windows this data stream to a {@code AllWindowedStream}, which evaluates windows over a non
* key grouped stream. Elements are put into windows by a {@link org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}. The grouping of elements
* is done by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger} that is used if a {@code Trigger} is not specified.
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* @param assigner
* The {@code WindowAssigner} that assigns elements to windows.
* @return The trigger windows data stream.
*/
@PublicEvolving
public <W extends Window> AllWindowedStream<T, W> windowAll(WindowAssigner<? super T, W> assigner) {
return new AllWindowedStream<>(this,
assigner);
} | 3.26 |
flink_DataStream_print_rdh | /**
* Writes a DataStream to the standard output stream (stdout).
*
* <p>For each element of the DataStream the result of {@link Object#toString()} is written.
*
* <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink
* worker.
*
* @param sinkIdentifier
* The string to prefix the output with.
* @return The closed DataStream.
*/
@PublicEvolving
public DataStreamSink<T> print(String sinkIdentifier) {
PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, false);
return addSink(printFunction).name("Print to Std. Out");
} | 3.26 |
flink_DataStream_writeToSocket_rdh | /**
* Writes the DataStream to a socket as a byte array. The format of the output is specified by a
* {@link SerializationSchema}.
*
* @param hostName
* host of the socket
* @param port
* port of the socket
* @param schema
* schema for serialization
* @return the closed DataStream
*/
@PublicEvolving
public DataStreamSink<T> writeToSocket(String hostName, int port, SerializationSchema<T> schema) {DataStreamSink<T> returnStream = addSink(new SocketClientSink<>(hostName, port, schema, 0));returnStream.setParallelism(1);// It would not work if multiple instances would connect to the same port
return returnStream;
}
/**
* Writes the dataStream into an output, described by an OutputFormat.
*
* <p>The output is not participating in Flink's checkpointing!
*
* <p>For writing to a file system periodically, the use of the {@link org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink} is recommended.
*
* @param format
* The output format
* @return The closed DataStream
* @deprecated Please use the {@link org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink} explicitly
using the {@link #addSink(SinkFunction)} | 3.26 |
flink_DataStream_transform_rdh | /**
* Method for passing user defined operators created by the given factory along with the type
* information that will transform the DataStream.
*
* <p>This method uses the rather new operator factories and should only be used when custom
* factories are needed.
*
* @param operatorName
* name of the operator, for logging purposes
* @param outTypeInfo
* the output type of the operator
* @param operatorFactory
* the factory for the operator.
* @param <R>
* type of the return stream
* @return the data stream constructed.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> transform(String operatorName, TypeInformation<R> outTypeInfo, OneInputStreamOperatorFactory<T, R> operatorFactory) {
return m2(operatorName, outTypeInfo, operatorFactory);
} | 3.26 |
flink_DataStream_getType_rdh | /**
* Gets the type of the stream.
*
* @return The type of the datastream.
*/
public TypeInformation<T> getType() {
return
transformation.getOutputType();
} | 3.26 |
flink_DataStream_broadcast_rdh | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are broadcasted
* to every parallel instance of the next operation.
*
* @return The DataStream with broadcast partitioning set.
*/
public DataStream<T> broadcast() {
return setConnectionType(new BroadcastPartitioner<T>());
}
/**
* Sets the partitioning of the {@link DataStream} so that the output elements are broadcasted
* to every parallel instance of the next operation. In addition, it implicitly as many {@link org.apache.flink.api.common.state.BroadcastState broadcast states} as the specified
* descriptors which can be used to store the element of the stream.
*
* @param broadcastStateDescriptors
* the descriptors of the broadcast states to create.
* @return A {@link BroadcastStream} which can be used in the {@link #connect(BroadcastStream)}
to create a {@link BroadcastConnectedStream} | 3.26 |
flink_DataStream_assignTimestampsAndWatermarks_rdh | /**
* Assigns timestamps to the elements in the data stream and creates watermarks based on events,
* to signal event time progress.
*
* <p>This method uses the deprecated watermark generator interfaces. Please switch to {@link #assignTimestampsAndWatermarks(WatermarkStrategy)} to use the new interfaces instead. The new
* interfaces support watermark idleness and no longer need to differentiate between "periodic"
* and "punctuated" watermarks.
*
* @deprecated Please use {@link #assignTimestampsAndWatermarks(WatermarkStrategy)} instead.
*/
@Deprecated
public SingleOutputStreamOperator<T> assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks<T> timestampAndWatermarkAssigner) {
final AssignerWithPunctuatedWatermarks<T> cleanedAssigner = clean(timestampAndWatermarkAssigner);
final WatermarkStrategy<T> wms = new AssignerWithPunctuatedWatermarksAdapter.Strategy<>(cleanedAssigner);
return assignTimestampsAndWatermarks(wms);
} | 3.26 |
flink_DataStream_rebalance_rdh | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are distributed
* evenly to instances of the next operation in a round-robin fashion.
*
* @return The DataStream with rebalance partitioning set.
*/
public DataStream<T> rebalance() {
return setConnectionType(new RebalancePartitioner<T>());
} | 3.26 |
flink_DataStream_getOutput_rdh | /**
* Returns an iterator over the collected elements. The returned iterator must only be used
* once the job execution was triggered.
*
* <p>This method will always return the same iterator instance.
*
* @return iterator over collected elements
*/public CloseableIterator<T> getOutput() {
// we intentionally fail here instead of waiting, because it indicates a
// misunderstanding on the user and would usually just block the application
Preconditions.checkNotNull(iterator, "The job execution was not yet started.");
return iterator;
} | 3.26 |
flink_DataStream_connect_rdh | /**
* Creates a new {@link BroadcastConnectedStream} by connecting the current {@link DataStream}
* or {@link KeyedStream} with a {@link BroadcastStream}.
*
* <p>The latter can be created using the {@link #broadcast(MapStateDescriptor[])} method.
*
* <p>The resulting stream can be further processed using the {@code BroadcastConnectedStream.process(MyFunction)} method, where {@code MyFunction} can be either
* a {@link org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction
* KeyedBroadcastProcessFunction} or a {@link org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction
* BroadcastProcessFunction} depending on the current stream being a {@link KeyedStream} or not.
*
* @param broadcastStream
* The broadcast stream with the broadcast state to be connected with
* this stream.
* @return The {@link BroadcastConnectedStream}.
*/
@PublicEvolving
public <R> BroadcastConnectedStream<T, R> connect(BroadcastStream<R> broadcastStream) {
return new BroadcastConnectedStream<>(environment, this, Preconditions.checkNotNull(broadcastStream), broadcastStream.getBroadcastStateDescriptors());
} | 3.26 |
flink_DataStream_global_rdh | /**
* Sets the partitioning of the {@link DataStream} so that the output values all go to the first
* instance of the next processing operator. Use this setting with care since it might cause a
* serious performance bottleneck in the application.
*
* @return The DataStream with shuffle partitioning set.
*/
@PublicEvolving
public DataStream<T> global() {
return setConnectionType(new GlobalPartitioner<T>());
}
/**
* Initiates an iterative part of the program that feeds back data streams. The iterative part
* needs to be closed by calling {@link IterativeStream#closeWith(DataStream)}. The
* transformation of this IterativeStream will be the iteration head. The data stream given to
* the {@link IterativeStream#closeWith(DataStream)} method is the data stream that will be fed
* back and used as the input for the iteration head. The user can also use different feedback
* type than the input of the iteration and treat the input and feedback streams as a {@link ConnectedStreams} be calling {@link IterativeStream#withFeedbackType(TypeInformation)}
*
* <p>A common usage pattern for streaming iterations is to use output splitting to send a part
* of the closing data stream to the head. Refer to {@link ProcessFunction.Context#output(OutputTag, Object)} for more information.
*
* <p>The iteration edge will be partitioned the same way as the first input of the iteration
* head unless it is changed in the {@link IterativeStream#closeWith(DataStream)} | 3.26 |
flink_DataStream_join_rdh | /**
* Creates a join operation. See {@link JoinedStreams} for an example of how the keys and window
* can be specified.
*/
public <T2> JoinedStreams<T, T2> join(DataStream<T2> otherStream) {
return new JoinedStreams<>(this, otherStream);
}
/**
* Windows this {@code DataStream} into tumbling time windows.
*
* <p>This is a shortcut for either {@code .window(TumblingEventTimeWindows.of(size))} or {@code .window(TumblingProcessingTimeWindows.of(size))} depending on the time characteristic set
* using
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* <p>{@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}
*
* @param size
* The size of the window.
* @deprecated Please use {@link #windowAll(WindowAssigner)} with either {@link TumblingEventTimeWindows} or {@link TumblingProcessingTimeWindows}. For more information,
see the deprecation notice on {@link TimeCharacteristic} | 3.26 |
flink_DataStream_keyBy_rdh | /**
* Partitions the operator state of a {@link DataStream} using field expressions. A field
* expression is either the name of a public field or a getter method with parentheses of the
* {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in
* {@code "field1.getInnerField2()"}.
*
* @deprecated Use {@link DataStream#keyBy(KeySelector)}.
* @param fields
* One or more field expressions on which the state of the {@link DataStream}
* operators will be partitioned.
* @return The {@link DataStream} with partitioned state (i.e. KeyedStream)
*/
@Deprecated
public KeyedStream<T, Tuple> keyBy(String... fields) {
return keyBy(new Keys.ExpressionKeys<>(fields, getType()));
} | 3.26 |
flink_DataStream_clean_rdh | /**
* Invokes the {@link org.apache.flink.api.java.ClosureCleaner} on the given function if closure
* cleaning is enabled in the {@link ExecutionConfig}.
*
* @return The cleaned Function
*/
protected <F> F clean(F f) {return getExecutionEnvironment().clean(f);
} | 3.26 |
flink_DataStream_getId_rdh | /**
* Returns the ID of the {@link DataStream} in the current {@link StreamExecutionEnvironment}.
*
* @return ID of the DataStream
*/
@Internal
public int getId() {
return transformation.getId();} | 3.26 |
flink_DataStream_sinkTo_rdh | /**
* Adds the given {@link Sink} to this DataStream. Only streams with sinks added will be
* executed once the {@link StreamExecutionEnvironment#execute()} method is called.
*
* <p>This method is intended to be used only to recover a snapshot where no uids have been set
* before taking the snapshot.
*
* @param customSinkOperatorUidHashes
* operator hashes to support state binding
* @param sink
* The user defined sink.
* @return The closed DataStream.
*/
@PublicEvolving
public DataStreamSink<T> sinkTo(Sink<T> sink, CustomSinkOperatorUidHashes customSinkOperatorUidHashes) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
transformation.getOutputType();
return DataStreamSink.forSink(this, sink, customSinkOperatorUidHashes);
} | 3.26 |
flink_HiveParserIntervalDayTime_clone_rdh | /**
* Return a copy of this object.
*/
public Object clone() {
return new HiveParserIntervalDayTime(f0, nanos);
} | 3.26 |
flink_HiveParserIntervalDayTime_getDouble_rdh | /**
*
* @return double representation of the interval day time, accurate to nanoseconds
*/
public double getDouble() {
return f0 + (nanos / 1000000000);
} | 3.26 |
flink_HiveParserIntervalDayTime_normalizeSecondsAndNanos_rdh | // Ensures that the seconds and nanoseconds fields have consistent sign
protected void normalizeSecondsAndNanos() {
if ((f0 > 0) && (nanos < 0)) {
--f0;
nanos += HiveParserIntervalUtils.NANOS_PER_SEC;
} else if ((f0 < 0) && (nanos > 0)) {
++f0;
nanos -= HiveParserIntervalUtils.NANOS_PER_SEC;
}
} | 3.26 |
flink_MemoryLogger_getDirectMemoryStatsAsString_rdh | /**
* Returns a String with the <strong>direct</strong> memory footprint.
*
* <p>These stats are not part of the other memory beans.
*
* @param bufferPoolMxBean
* The direct buffer pool bean or <code>null</code> if none available.
* @return A string with the count, total capacity, and used direct memory.
*/
public static String getDirectMemoryStatsAsString(BufferPoolMXBean bufferPoolMxBean) {
if (bufferPoolMxBean == null) {
return "Direct memory stats: unavailable";
} else {
return String.format("Direct memory stats: Count: %d, Total Capacity: %d, Used Memory: %d", bufferPoolMxBean.getCount(), bufferPoolMxBean.getTotalCapacity(), bufferPoolMxBean.getMemoryUsed());
}
} | 3.26 |
flink_MemoryLogger_getMemoryPoolStatsAsString_rdh | /**
* Gets the memory pool statistics from the JVM.
*
* @param poolBeans
* The collection of memory pool beans.
* @return A string denoting the names and sizes of the memory pools.
*/
public static String getMemoryPoolStatsAsString(List<MemoryPoolMXBean> poolBeans) {
StringBuilder bld = new StringBuilder("Off-heap pool stats: ");
int count = 0;
for (MemoryPoolMXBean bean :
poolBeans) {
if (bean.getType() == MemoryType.NON_HEAP) {
if (count > 0) {
bld.append(", ");
}
count++;
MemoryUsage usage = bean.getUsage();
long used = usage.getUsed() >> 20;
long committed = usage.getCommitted() >> 20;
long max = usage.getMax() >> 20;
bld.append('[').append(bean.getName()).append(": ");
bld.append(used).append('/').append(committed).append('/').append(max);
bld.append(" MB (used/committed/max)]");
}
}
return bld.toString();
} | 3.26 |
flink_MemoryLogger_getGarbageCollectorStatsAsString_rdh | /**
* Gets the garbage collection statistics from the JVM.
*
* @param gcMXBeans
* The collection of garbage collector beans.
* @return A string denoting the number of times and total elapsed time in garbage collection.
*/
public static String getGarbageCollectorStatsAsString(List<GarbageCollectorMXBean> gcMXBeans) {
StringBuilder bld = new StringBuilder("Garbage collector stats: ");
for (GarbageCollectorMXBean bean : gcMXBeans) {
bld.append('[').append(bean.getName()).append(", GC TIME (ms): ").append(bean.getCollectionTime());
bld.append(", GC COUNT: ").append(bean.getCollectionCount()).append(']');
bld.append(", ");
}
if (!gcMXBeans.isEmpty()) {
bld.setLength(bld.length() - 2);
}
return bld.toString();
} | 3.26 |
flink_MemoryLogger_getMemoryUsageStatsAsString_rdh | /**
* Gets the memory footprint of the JVM in a string representation.
*
* @return A string describing how much heap memory and direct memory are allocated and used.
*/
public static String getMemoryUsageStatsAsString(MemoryMXBean memoryMXBean) {
MemoryUsage heap = memoryMXBean.getHeapMemoryUsage();
MemoryUsage nonHeap = memoryMXBean.getNonHeapMemoryUsage();
long heapUsed = heap.getUsed() >> 20;
long heapCommitted = heap.getCommitted() >> 20;
long heapMax = heap.getMax() >> 20;
long v7 = nonHeap.getUsed() >> 20;long nonHeapCommitted = nonHeap.getCommitted() >> 20;long nonHeapMax = nonHeap.getMax() >> 20;
return String.format("Memory usage stats: [HEAP: %d/%d/%d MB, " + "NON HEAP: %d/%d/%d MB (used/committed/max)]", heapUsed, heapCommitted, heapMax, v7, nonHeapCommitted, nonHeapMax);
} | 3.26 |
flink_MemoryLogger_run_rdh | // ------------------------------------------------------------------------
@Override
public void run() {
try {
while (running && ((monitored == null) || (!monitored.isDone()))) {
logger.info(getMemoryUsageStatsAsString(memoryBean));
logger.info(getDirectMemoryStatsAsString(directBufferBean));
logger.info(getMemoryPoolStatsAsString(poolBeans));
logger.info(getGarbageCollectorStatsAsString(gcBeans));
try {
Thread.sleep(interval);} catch (InterruptedException e) {
if (running) {
throw e;
}
}
} } catch (Throwable t) {
logger.error("Memory logger terminated with exception", t);
}
} | 3.26 |
flink_OpFusionCodegenSpecGenerator_setup_rdh | /**
* Initializes the operator spec generator needed information. This method must be called before
* produce and consume related method.
*/
public void setup(Context context) {
this.managedMemoryFraction = context.getManagedMemoryFraction();
this.opFusionCodegenSpec.setup(opFusionContext);
} | 3.26 |
flink_DistributedRandomSampler_sample_rdh | /**
* Combine the first phase and second phase in sequence, implemented for test purpose only.
*
* @param input
* Source data.
* @return Sample result in sequence.
*/
@Override
public Iterator<T> sample(Iterator<T> input) {
return sampleInCoordinator(sampleInPartition(input));
} | 3.26 |
flink_DistributedRandomSampler_sampleInCoordinator_rdh | /**
* Sample algorithm for the second phase. This operation should be executed as the UDF of an all
* reduce operation.
*
* @param input
* The intermediate sample output generated in the first phase.
* @return The sampled output.
*/
public Iterator<T> sampleInCoordinator(Iterator<IntermediateSampleData<T>>
input) {
if (f0 == 0) {
return emptyIterable;
}
// This queue holds fixed number elements with the top K weight for the coordinator.
PriorityQueue<IntermediateSampleData<T>> reservoir = new PriorityQueue<IntermediateSampleData<T>>(f0);
int index = 0;
IntermediateSampleData<T> smallest = null;
while (input.hasNext()) {
IntermediateSampleData<T> element =
input.next();
if (index < f0) {
// Fill the queue with first K elements from input.
reservoir.add(element);
smallest = reservoir.peek();
} else // If current element weight is larger than the smallest one in queue, remove the
// element
// with the smallest weight, and append current element into the queue.
if (element.getWeight() > smallest.getWeight())
{
reservoir.remove();
reservoir.add(element);
smallest = reservoir.peek();
}
index++;
}
final Iterator<IntermediateSampleData<T>> itr =
reservoir.iterator();
return new Iterator<T>() {
@Override
public boolean hasNext() {
return itr.hasNext();
}
@Override
public T m0() {
return itr.next().getElement();
}
@Override
public void remove() {
itr.remove();
}
};
} | 3.26 |
flink_AbstractPagedOutputView_seekOutput_rdh | /**
* Sets the internal state to the given memory segment and the given position within the
* segment.
*
* @param seg
* The memory segment to write the next bytes to.
* @param position
* The position to start writing the next bytes to.
*/
protected void seekOutput(MemorySegment seg, int position) {
this.currentSegment = seg;
this.positionInSegment = position;
} | 3.26 |
flink_AbstractPagedOutputView_m0_rdh | // --------------------------------------------------------------------------------------------
// Data Output Specific methods
// --------------------------------------------------------------------------------------------
@Override
public void m0(int b) throws IOException {
writeByte(b); } | 3.26 |
flink_AbstractPagedOutputView_clear_rdh | /**
* Clears the internal state. Any successive write calls will fail until either {@link #advance()} or {@link #seekOutput(MemorySegment, int)} is called.
*
* @see #advance()
* @see #seekOutput(MemorySegment, int)
*/
protected void clear() {
this.currentSegment = null;
this.positionInSegment = this.headerLength;} | 3.26 |
flink_AbstractPagedOutputView_advance_rdh | /**
* Moves the output view to the next page. This method invokes internally the {@link #nextSegment(MemorySegment, int)} method to give the current memory segment to the concrete
* subclass' implementation and obtain the next segment to write to. Writing will continue
* inside the new segment after the header.
*
* @throws IOException
* Thrown, if the current segment could not be processed or a new segment
* could not be obtained.
*/
public void advance() throws IOException {
this.currentSegment = nextSegment(this.currentSegment, this.positionInSegment);
this.positionInSegment = this.headerLength;
} | 3.26 |
flink_AbstractPagedOutputView_getSegmentSize_rdh | /**
* Gets the size of the segments used by this view.
*
* @return The memory segment size.
*/
public int getSegmentSize() {return this.segmentSize;
} | 3.26 |
flink_AbstractPagedOutputView_getCurrentPositionInSegment_rdh | /**
* Gets the current write position (the position where the next bytes will be written) in the
* current memory segment.
*
* @return The current write offset in the current memory segment.
*/
public int getCurrentPositionInSegment() {
return this.positionInSegment;
} | 3.26 |
flink_AbstractPagedOutputView_getHeaderLength_rdh | /**
*
* @return header length.
*/
public int getHeaderLength() {
return headerLength;
} | 3.26 |
flink_CompositeTypeSerializerSnapshot_internalWriteOuterSnapshot_rdh | // ------------------------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------------------------
private void internalWriteOuterSnapshot(DataOutputView out) throws IOException {
out.writeInt(MAGIC_NUMBER);
out.writeInt(getCurrentOuterSnapshotVersion());
writeOuterSnapshot(out);
} | 3.26 |
flink_CompositeTypeSerializerSnapshot_writeOuterSnapshot_rdh | // ------------------------------------------------------------------------------------------
// Outer snapshot methods; need to be overridden if outer snapshot is not empty,
// or in other words, the outer serializer has extra configuration beyond its nested
// serializers.
// ------------------------------------------------------------------------------------------
/**
* Writes the outer snapshot, i.e. any information beyond the nested serializers of the outer
* serializer.
*
* <p>The base implementation of this methods writes nothing, i.e. it assumes that the outer
* serializer only has nested serializers and no extra information. Otherwise, if the outer
* serializer contains some extra information that needs to be persisted as part of the
* serializer snapshot, this must be overridden. Note that this method and the corresponding
* methods {@link #readOuterSnapshot(int, DataInputView, ClassLoader)}, {@link #resolveOuterSchemaCompatibility(TypeSerializer)} needs to be implemented.
*
* @param out
* the {@link DataOutputView} to write the outer snapshot to.
*/
protected void writeOuterSnapshot(DataOutputView out) throws IOException {
} | 3.26 |
flink_CompositeTypeSerializerSnapshot_readOuterSnapshot_rdh | /**
* Reads the outer snapshot, i.e. any information beyond the nested serializers of the outer
* serializer.
*
* <p>The base implementation of this methods reads nothing, i.e. it assumes that the outer
* serializer only has nested serializers and no extra information. Otherwise, if the outer
* serializer contains some extra information that has been persisted as part of the serializer
* snapshot, this must be overridden. Note that this method and the corresponding methods {@link #writeOuterSnapshot(DataOutputView)}, {@link #resolveOuterSchemaCompatibility(TypeSerializer)} needs to be implemented.
*
* @param readOuterSnapshotVersion
* the read version of the outer snapshot.
* @param in
* the {@link DataInputView} to read the outer snapshot from.
* @param userCodeClassLoader
* the user code class loader.
*/
protected void readOuterSnapshot(int readOuterSnapshotVersion, DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
}
/**
* Checks whether the outer snapshot is compatible with a given new serializer.
*
* <p>The base implementation of this method just returns {@code true}, i.e. it assumes that the
* outer serializer only has nested serializers and no extra information, and therefore the
* result of the check must always be true. Otherwise, if the outer serializer contains some
* extra information that has been persisted as part of the serializer snapshot, this must be
* overridden. Note that this method and the corresponding methods {@link #writeOuterSnapshot(DataOutputView)}, {@link #readOuterSnapshot(int, DataInputView,
* ClassLoader)} needs to be implemented.
*
* @param newSerializer
* the new serializer, which contains the new outer information to check
* against.
* @return a flag indicating whether or not the new serializer's outer information is compatible
with the one written in this snapshot.
* @deprecated this method is deprecated, and will be removed in the future. Please implement
{@link #resolveOuterSchemaCompatibility(TypeSerializer)} | 3.26 |
flink_CommonExecSink_getFieldInfoForLengthEnforcer_rdh | /**
* Returns a List of {@link ConstraintEnforcer.FieldInfo}, each containing the info needed to
* determine whether a string or binary value needs trimming and/or padding.
*/
private List<ConstraintEnforcer.FieldInfo> getFieldInfoForLengthEnforcer(RowType physicalType, LengthEnforcerType enforcerType) {
LogicalTypeRoot staticType = null;
LogicalTypeRoot variableType = null;
int maxLength = 0;
switch (enforcerType) {
case CHAR :staticType = LogicalTypeRoot.CHAR;
variableType = LogicalTypeRoot.VARCHAR;
maxLength = CharType.MAX_LENGTH;
break;
case BINARY :
staticType = LogicalTypeRoot.BINARY;
variableType = LogicalTypeRoot.VARBINARY;
maxLength = BinaryType.MAX_LENGTH;}
final List<ConstraintEnforcer.FieldInfo> fieldsAndLengths = new ArrayList<>();
for (int i = 0; i < physicalType.getFieldCount(); i++) {
LogicalType type = physicalType.getTypeAt(i);
boolean isStatic = type.is(staticType);
// Should trim and possibly pad
if ((isStatic && (LogicalTypeChecks.getLength(type) < maxLength)) || (type.is(variableType) && (LogicalTypeChecks.getLength(type) < maxLength))) {
fieldsAndLengths.add(new ConstraintEnforcer.FieldInfo(i, LogicalTypeChecks.getLength(type), isStatic));
} else if (isStatic) {
// Should pad
fieldsAndLengths.add(new ConstraintEnforcer.FieldInfo(i, null, isStatic));
}
}
return fieldsAndLengths;
} | 3.26 |
flink_CommonExecSink_applyConstraintValidations_rdh | /**
* Apply an operator to filter or report error to process not-null values for not-null fields.
*/ private Transformation<RowData> applyConstraintValidations(Transformation<RowData> inputTransform, ExecNodeConfig config, RowType physicalRowType) {
final ConstraintEnforcer.Builder validatorBuilder = ConstraintEnforcer.newBuilder();
final String[] fieldNames = physicalRowType.getFieldNames().toArray(new String[0]);
// Build NOT NULL enforcer
final int[] notNullFieldIndices = getNotNullFieldIndices(physicalRowType);
if (notNullFieldIndices.length > 0) {
final ExecutionConfigOptions.NotNullEnforcer notNullEnforcer = config.get(ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER);
final List<String> notNullFieldNames
= Arrays.stream(notNullFieldIndices).mapToObj(idx -> fieldNames[idx]).collect(Collectors.toList());
validatorBuilder.addNotNullConstraint(notNullEnforcer, notNullFieldIndices, notNullFieldNames, fieldNames);
}
final ExecutionConfigOptions.TypeLengthEnforcer typeLengthEnforcer = config.get(ExecutionConfigOptions.TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER);
// Build CHAR/VARCHAR length enforcer
final List<ConstraintEnforcer.FieldInfo> charFieldInfo = getFieldInfoForLengthEnforcer(physicalRowType, LengthEnforcerType.CHAR);
if (!charFieldInfo.isEmpty()) {
final List<String> charFieldNames = charFieldInfo.stream().map(cfi -> fieldNames[cfi.fieldIdx()]).collect(Collectors.toList());
validatorBuilder.addCharLengthConstraint(typeLengthEnforcer, charFieldInfo, charFieldNames, fieldNames);
}
// Build BINARY/VARBINARY length enforcer
final List<ConstraintEnforcer.FieldInfo> binaryFieldInfo = getFieldInfoForLengthEnforcer(physicalRowType, LengthEnforcerType.BINARY);
if (!binaryFieldInfo.isEmpty()) {
final List<String> binaryFieldNames = binaryFieldInfo.stream().map(cfi -> fieldNames[cfi.fieldIdx()]).collect(Collectors.toList());
validatorBuilder.addBinaryLengthConstraint(typeLengthEnforcer, binaryFieldInfo, binaryFieldNames, fieldNames);
}
ConstraintEnforcer constraintEnforcer = validatorBuilder.build();
if (constraintEnforcer != null) {
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(CONSTRAINT_VALIDATOR_TRANSFORMATION, constraintEnforcer.getOperatorName(), "ConstraintEnforcer", config), constraintEnforcer, getInputTypeInfo(),
inputTransform.getParallelism(), false);
} else {
// there are no not-null fields, just skip adding the enforcer operator
return inputTransform;
}
} | 3.26 |
flink_CommonExecSink_deriveSinkParallelism_rdh | /**
* Returns the parallelism of sink operator, it assumes the sink runtime provider implements
* {@link ParallelismProvider}. It returns parallelism defined in {@link ParallelismProvider} if
* the parallelism is provided, otherwise it uses parallelism of input transformation.
*/
private int deriveSinkParallelism(Transformation<RowData> inputTransform, SinkRuntimeProvider runtimeProvider) {
final int inputParallelism = inputTransform.getParallelism();
if (isParallelismConfigured(runtimeProvider)) {
int v30 = ((ParallelismProvider)
(runtimeProvider)).getParallelism().get();
if (v30 <= 0) {
throw new TableException(String.format("Invalid configured parallelism %s for table '%s'.", v30, tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString()));
}
return v30;
} else {
return inputParallelism;
}
} | 3.26 |
flink_CommonExecSink_applyKeyBy_rdh | /**
* Apply a primary key partition transformation to guarantee the strict ordering of changelog
* messages.
*/
private Transformation<RowData> applyKeyBy(ExecNodeConfig config, ClassLoader classLoader, Transformation<RowData> inputTransform, int[] primaryKeys, int sinkParallelism, int inputParallelism, boolean needMaterialize) {
final ExecutionConfigOptions.SinkKeyedShuffle sinkShuffleByPk = config.get(ExecutionConfigOptions.TABLE_EXEC_SINK_KEYED_SHUFFLE);
boolean sinkKeyBy = false;
switch (sinkShuffleByPk) {
case NONE :
break;
case AUTO :
// should cover both insert-only and changelog input
sinkKeyBy = (sinkParallelism != inputParallelism) && (sinkParallelism != 1);
break;
case FORCE :
// sink single parallelism has no problem (because none partitioner will cause worse
// disorder)
sinkKeyBy = sinkParallelism != 1;break;
}
if ((!sinkKeyBy) && (!needMaterialize)) {
return inputTransform;
}
final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(classLoader, primaryKeys, getInputTypeInfo());
final KeyGroupStreamPartitioner<RowData, RowData> partitioner = new KeyGroupStreamPartitioner<>(selector, KeyGroupRangeAssignment.DEFAULT_LOWER_BOUND_MAX_PARALLELISM);
Transformation<RowData> partitionedTransform = new PartitionTransformation<>(inputTransform, partitioner);
createTransformationMeta(PARTITIONER_TRANSFORMATION, "Partitioner", "Partitioner", config).fill(partitionedTransform);
partitionedTransform.setParallelism(sinkParallelism, sinkParallelismConfigured);
return partitionedTransform;
} | 3.26 |
flink_CommonExecSink_getTargetRowKind_rdh | /**
* Get the target row-kind that the row data should change to, assuming the current row kind is
* RowKind.INSERT. Return Optional.empty() if it doesn't need to change. Currently, it'll only
* consider row-level delete/update.
*/
private Optional<RowKind> getTargetRowKind() {
if (tableSinkSpec.getSinkAbilities() != null)
{
for (SinkAbilitySpec sinkAbilitySpec : tableSinkSpec.getSinkAbilities()) {
if (sinkAbilitySpec instanceof RowLevelDeleteSpec) {
RowLevelDeleteSpec deleteSpec = ((RowLevelDeleteSpec) (sinkAbilitySpec));
if (deleteSpec.getRowLevelDeleteMode() == RowLevelDeleteMode.DELETED_ROWS) {return Optional.of(RowKind.DELETE);
}} else if (sinkAbilitySpec instanceof RowLevelUpdateSpec) {
RowLevelUpdateSpec updateSpec = ((RowLevelUpdateSpec) (sinkAbilitySpec));
if (updateSpec.getRowLevelUpdateMode() == RowLevelUpdateMode.UPDATED_ROWS) {
return Optional.of(RowKind.UPDATE_AFTER);
}
}
}
}
return Optional.empty();
} | 3.26 |
flink_TypeInfoLogicalTypeConverter_fromLogicalTypeToTypeInfo_rdh | /**
* Use {@link BigDecimalTypeInfo} to retain precision and scale of decimal.
*/
public static TypeInformation fromLogicalTypeToTypeInfo(LogicalType type) {
DataType dataType = fromLogicalTypeToDataType(type).nullable().bridgedTo(ClassLogicalTypeConverter.getDefaultExternalClassForType(type));
return TypeInfoDataTypeConverter.fromDataTypeToTypeInfo(dataType);
} | 3.26 |
flink_TypeInfoLogicalTypeConverter_fromTypeInfoToLogicalType_rdh | /**
* It will lose some information. (Like {@link PojoTypeInfo} will converted to {@link RowType})
* It and {@link TypeInfoLogicalTypeConverter#fromLogicalTypeToTypeInfo} not allows
* back-and-forth conversion.
*/
public static LogicalType fromTypeInfoToLogicalType(TypeInformation typeInfo) {
DataType dataType = TypeConversions.fromLegacyInfoToDataType(typeInfo);
return LogicalTypeDataTypeConverter.fromDataTypeToLogicalType(dataType);
} | 3.26 |
flink_ReusingBuildSecondReOpenableHashJoinIterator_reopenProbe_rdh | /**
* Set new input for probe side
*
* @throws IOException
*/
public void reopenProbe(MutableObjectIterator<V1> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.26 |
flink_AbstractRowTimeUnboundedPrecedingOver_processElement_rdh | /**
* Puts an element from the input stream into state if it is not late. Registers a timer for the
* next watermark.
*
* @param input
* The input value.
* @param ctx
* A {@link Context} that allows querying the timestamp of the element and getting
* TimerService for registering timers and querying the time. The context is only valid
* during the invocation of this method, do not store it.
* @param out
* The collector for returning result values.
* @throws Exception
*/
@Override
public void processElement(RowData input, KeyedProcessFunction<K, RowData, RowData>.Context ctx, Collector<RowData>
out) throws Exception {
// register state-cleanup timer
registerProcessingCleanupTimer(ctx, ctx.timerService().currentProcessingTime());
long timestamp = input.getLong(rowTimeIdx);
long curWatermark = ctx.timerService().currentWatermark();if (timestamp > curWatermark) {
// ensure every key just registers one timer
// default watermark is Long.Min, avoid overflow we use zero when watermark < 0
long triggerTs = (curWatermark < 0) ? 0 : curWatermark + 1;
ctx.timerService().registerEventTimeTimer(triggerTs);
// put row into state
List<RowData> rowList = inputState.get(timestamp);
if (rowList == null) {
rowList = new ArrayList<RowData>();
}
rowList.add(input); inputState.put(timestamp, rowList);
} else {
// discard late record
numLateRecordsDropped.inc();
}
} | 3.26 |
flink_AbstractRowTimeUnboundedPrecedingOver_insertToSortedList_rdh | /**
* Inserts timestamps in order into a linked list. If timestamps arrive in order (as in case of
* using the RocksDB state backend) this is just an append with O(1).
*/
private void insertToSortedList(Long recordTimestamp) {
ListIterator<Long> listIterator = sortedTimestamps.listIterator(sortedTimestamps.size());
boolean isContinue = true;
while (listIterator.hasPrevious() && isContinue) {
Long timestamp = listIterator.previous();
if (recordTimestamp >= timestamp) {listIterator.next();
listIterator.add(recordTimestamp);
isContinue = false;
}
}
if (isContinue) {
sortedTimestamps.addFirst(recordTimestamp);
}
} | 3.26 |
flink_Tumble_over_rdh | /**
* Creates a tumbling window. Tumbling windows are fixed-size, consecutive, non-overlapping
* windows of a specified fixed length. For example, a tumbling window of 5 minutes size groups
* elements in 5 minutes intervals.
*
* @param size
* the size of the window as time or row-count interval.
* @return a partially defined tumbling window
*/
public static TumbleWithSize over(Expression size) {
return new TumbleWithSize(size);
} | 3.26 |
flink_StateBackend_useManagedMemory_rdh | /**
* Whether the state backend uses Flink's managed memory.
*/
default boolean useManagedMemory() {
return false;
} | 3.26 |
flink_StateBackend_getName_rdh | /**
* Return the name of this backend, default is simple class name. {@link org.apache.flink.runtime.state.delegate.DelegatingStateBackend} may return the simple class
* name of the delegated backend.
*/
default String getName() {
return this.getClass().getSimpleName();
}
/**
* Creates a new {@link CheckpointableKeyedStateBackend} | 3.26 |
flink_StateBackend_createKeyedStateBackend_rdh | /**
* Creates a new {@link CheckpointableKeyedStateBackend} with the given managed memory fraction.
* Backends that use managed memory are required to implement this interface.
*/
default <K> CheckpointableKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull
Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry, double managedMemoryFraction) throws Exception {
// ignore managed memory fraction by default
return createKeyedStateBackend(env, jobID, operatorIdentifier, keySerializer, numberOfKeyGroups, keyGroupRange, kvStateRegistry, ttlTimeProvider, metricGroup, stateHandles, cancelStreamRegistry);
} | 3.26 |
flink_StateBackend_supportsNoClaimRestoreMode_rdh | /**
* Tells if a state backend supports the {@link RestoreMode#NO_CLAIM} mode.
*
* <p>If a state backend supports {@code NO_CLAIM} mode, it should create an independent
* snapshot when it receives {@link CheckpointType#FULL_CHECKPOINT} in {@link Snapshotable#snapshot(long, long, CheckpointStreamFactory, CheckpointOptions)}.
*
* @return If the state backend supports {@link RestoreMode#NO_CLAIM} mode.
*/
default boolean supportsNoClaimRestoreMode() {
return false;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.