name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_GenericArraySerializer_snapshotConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public GenericArraySerializerSnapshot<C> snapshotConfiguration() {
return new GenericArraySerializerSnapshot<>(this);
} | 3.26 |
flink_GenericArraySerializer_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return (31 * componentClass.hashCode()) + componentSerializer.hashCode();
} | 3.26 |
flink_ArrayColumnReader_readPrimitiveTypedRow_rdh | // Need to be in consistent with that VectorizedPrimitiveColumnReader#readBatchHelper
// TODO Reduce the duplicated code
private Object readPrimitiveTypedRow(LogicalType type) {
switch (type.getTypeRoot()) {
case CHAR :
case VARCHAR
:
case BINARY :
case VARBINARY :
return dataColumn.readBytes();
case BOOLEAN :
return dataColumn.readBoolean();
case TIME_WITHOUT_TIME_ZONE :
case
DATE :
case INTEGER :
return dataColumn.readInteger();
case TINYINT :
return dataColumn.readTinyInt();
case SMALLINT :
return dataColumn.readSmallInt();
case BIGINT :
return dataColumn.readLong();
case FLOAT :
return dataColumn.readFloat();
case DOUBLE :
return dataColumn.readDouble();
case DECIMAL
:
switch (descriptor.getPrimitiveType().getPrimitiveTypeName()) {
case INT32 :
return dataColumn.readInteger();
case INT64 :
return dataColumn.readLong();
case BINARY :
case FIXED_LEN_BYTE_ARRAY :
return dataColumn.readBytes();
}
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return dataColumn.readTimestamp();
default :
throw new RuntimeException("Unsupported type in the list: " + type);
}
} | 3.26 |
flink_ArrayColumnReader_setChildrenInfo_rdh | /**
* The lengths & offsets will be initialized as default size (1024), it should be set to the
* actual size according to the element number.
*/
private void setChildrenInfo(HeapArrayVector lcv, int itemNum, int elementNum) {
lcv.setSize(itemNum);
long[] lcvLength = new long[elementNum];
long[] lcvOffset = new long[elementNum];
System.arraycopy(lcv.getLengths(), 0, lcvLength, 0, elementNum);
System.arraycopy(lcv.getOffsets(), 0, lcvOffset, 0, elementNum);
lcv.setLengths(lcvLength);
lcv.setOffsets(lcvOffset);
} | 3.26 |
flink_ArrayColumnReader_collectDataFromParquetPage_rdh | /**
* Collects data from a parquet page and returns the final row index where it stopped. The
* returned index can be equal to or less than total.
*
* @param total
* maximum number of rows to collect
* @param lcv
* column vector to do initial setup in data collection time
* @param valueList
* collection of values that will be fed into the vector later
* @param type
* the element type of array
* @return int
*/
private int collectDataFromParquetPage(int total, HeapArrayVector lcv, List<Object> valueList, LogicalType type) {
int index =
0;
/* Here is a nested loop for collecting all values from a parquet page.
A column of array type can be considered as a list of lists, so the two loops are as below:
1. The outer loop iterates on rows (index is a row index, so points to a row in the batch), e.g.:
[0, 2, 3] <- index: 0
[NULL, 3, 4] <- index: 1
2. The inner loop iterates on values within a row (sets all data from parquet data page
for an element in ListColumnVector), so fetchNextValue returns values one-by-one:
0, 2, 3, NULL, 3, 4
As described below, the repetition level (repetitionLevel != 0)
can be used to decide when we'll start to read values for the next list.
*/
while ((!eof) && (index < total)) {
// add element to ListColumnVector one by one
lcv.getOffsets()[index] = valueList.size();
/* Let's collect all values for a single list.
Repetition level = 0 means that a new list started there in the parquet page,
in that case, let's exit from the loop, and start to collect value for a new list.
*/
do {
/* Definition level = 0 when a NULL value was returned instead of a list
(this is not the same as a NULL value in of a list).
*/
if (definitionLevel == 0) {
lcv.setNullAt(index);
}
valueList.add(isCurrentPageDictionaryEncoded ? dictionaryDecodeValue(type, ((Integer) (lastValue))) : lastValue);
} while (fetchNextValue(type) && (repetitionLevel != 0) );
lcv.getLengths()[index] = valueList.size() - lcv.getOffsets()[index];
index++;
}
return index;} | 3.26 |
flink_ArrayColumnReader_fetchNextValue_rdh | /**
* Reads a single value from parquet page, puts it into lastValue. Returns a boolean indicating
* if there is more values to read (true).
*
* @param type
* the element type of array
* @return boolean
*/
private boolean fetchNextValue(LogicalType type) {
int left = readPageIfNeed();
if (left > 0) {
// get the values of repetition and definitionLevel
readRepetitionAndDefinitionLevels();
// read the data if it isn't null
if (definitionLevel == maxDefLevel) {
if (isCurrentPageDictionaryEncoded) {
lastValue = dataColumn.readValueDictionaryId();
} else {
lastValue = readPrimitiveTypedRow(type);
}
} else
{
lastValue = null;
}
return true;
} else {
eof = true;return false;
}
} | 3.26 |
flink_RemoteStorageScanner_start_rdh | /**
* Start the executor.
*/
public void start() {
synchronized(scannerExecutor) {
if (!scannerExecutor.isShutdown()) {
scannerExecutor.schedule(this, lastInterval, TimeUnit.MILLISECONDS);
}
}
} | 3.26 |
flink_RemoteStorageScanner_close_rdh | /**
* Close the executor.
*/
public void close() {
synchronized(scannerExecutor) {
scannerExecutor.shutdownNow();
}
try {
if (!scannerExecutor.awaitTermination(5L, TimeUnit.MINUTES)) {
throw new TimeoutException("Timeout to shutdown the flush thread.");
}
} catch (InterruptedException | TimeoutException e) {
ExceptionUtils.rethrow(e);
}
} | 3.26 |
flink_RemoteStorageScanner_watchSegment_rdh | /**
* Watch the segment for a specific subpartition in the {@link RemoteStorageScanner}.
*
* <p>If a segment with a larger or equal id already exists, the current segment won't be
* watched.
*
* <p>If a segment with a smaller segment id is still being watched, the current segment will
* replace it because the smaller segment should have been consumed. This method ensures that
* only one segment file can be watched for each subpartition.
*
* @param partitionId
* is the id of partition.
* @param subpartitionId
* is the id of subpartition.
* @param segmentId
* is the id of segment.
*/
public void watchSegment(TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId, int segmentId) {
Tuple2<TieredStoragePartitionId, TieredStorageSubpartitionId> key = Tuple2.of(partitionId, subpartitionId);
scannedMaxSegmentIds.compute(key, (segmentKey, maxSegmentId) -> {
if ((maxSegmentId ==
null) || (maxSegmentId < segmentId)) {
requiredSegmentIds.put(segmentKey, segmentId);
}
return maxSegmentId;
});
} | 3.26 |
flink_RemoteStorageScanner_run_rdh | /**
* Iterate the watched segment ids and check related file status.
*/
@Override
public void run() {
try {
Iterator<Map.Entry<Tuple2<TieredStoragePartitionId, TieredStorageSubpartitionId>, Integer>> iterator = requiredSegmentIds.entrySet().iterator();
boolean scanned = false;while (iterator.hasNext()) {Map.Entry<Tuple2<TieredStoragePartitionId, TieredStorageSubpartitionId>, Integer> ids = iterator.next();
TieredStoragePartitionId partitionId = ids.getKey().f0;
TieredStorageSubpartitionId subpartitionId = ids.getKey().f1;
int requiredSegmentId = ids.getValue();
int maxSegmentId = scannedMaxSegmentIds.getOrDefault(ids.getKey(), -1);
if ((maxSegmentId >= requiredSegmentId) && checkSegmentExist(partitionId, subpartitionId, requiredSegmentId)) {
scanned = true;
iterator.remove();
checkNotNull(notifier).notifyAvailable(partitionId, subpartitionId);
} else {
// The segment should be watched again because it's not found.
// If the segment belongs to other tiers and has been consumed, the segment will
// be replaced by newly watched segment with larger segment id. This logic is
// ensured by the method {@code watchSegment}.
scanMaxSegmentId(partitionId, subpartitionId);
}
}
lastInterval = (scanned) ? INITIAL_SCAN_INTERVAL_MS : scanStrategy.getInterval(lastInterval);
start();
} catch
(Throwable throwable) {
// handle un-expected exception as unhandledExceptionHandler is not
// worked for ScheduledExecutorService.
FatalExitExceptionHandler.INSTANCE.uncaughtException(Thread.currentThread(), throwable);
}
} | 3.26 |
flink_RemoteStorageScanner_scanMaxSegmentId_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
/**
* Scan the max segment id of segment files for the specific partition and subpartition. The max
* segment id can be obtained from a file named by max segment id.
*
* @param partitionId
* the partition id.
* @param subpartitionId
* the subpartition id.
*/
private void scanMaxSegmentId(TieredStoragePartitionId
partitionId, TieredStorageSubpartitionId subpartitionId)
{
Path segmentFinishDir = getSegmentFinishDirPath(baseRemoteStoragePath, partitionId, subpartitionId.getSubpartitionId());
FileStatus[] fileStatuses = new FileStatus[0];
try {
if (!remoteFileSystem.exists(segmentFinishDir)) {
return;
}
fileStatuses = remoteFileSystem.listStatus(segmentFinishDir);
currentRetryTime = 0;
} catch (Throwable t) {
if (t instanceof FileNotFoundException) {
return;
}
currentRetryTime++;
tryThrowException(t, "Failed to list the segment finish file.");
}
if (fileStatuses.length != 1) {
return;
}
scannedMaxSegmentIds.put(Tuple2.of(partitionId, subpartitionId), Integer.parseInt(fileStatuses[0].getPath().getName()));
} | 3.26 |
flink_TieredStorageProducerClient_canBeCompressed_rdh | /**
* Whether the buffer can be compressed or not. Note that event is not compressed because it is
* usually small and the size can become even larger after compression.
*/
private boolean canBeCompressed(Buffer buffer) {
return ((bufferCompressor != null) && buffer.isBuffer()) && (buffer.readableBytes() > 0);
} | 3.26 |
flink_TieredStorageProducerClient_write_rdh | /**
* Write records to the producer client. The {@link BufferAccumulator} will accumulate the
* records into buffers.
*
* <p>Note that isBroadcast indicates whether the record is broadcast, while isBroadcastOnly
* indicates whether the result partition is broadcast-only. When the result partition is not
* broadcast-only and the record is a broadcast record, the record will be written to all the
* subpartitions.
*
* @param record
* the written record data
* @param subpartitionId
* the subpartition identifier
* @param dataType
* the data type of the record
* @param isBroadcast
* whether the record is a broadcast record
*/
public void write(ByteBuffer record, TieredStorageSubpartitionId subpartitionId, Buffer.DataType dataType, boolean isBroadcast) throws IOException {
if (isBroadcast && (!isBroadcastOnly)) {
for (int i = 0; i < numSubpartitions; ++i) {
// As the tiered storage subpartition ID is created only for broadcast records,
// which are fewer than normal records, the performance impact of generating new
// TieredStorageSubpartitionId objects is expected to be manageable. If the
// performance is significantly affected, this logic will be optimized accordingly.
bufferAccumulator.receive(record.duplicate(), new TieredStorageSubpartitionId(i), dataType, isBroadcast);
}} else {
bufferAccumulator.receive(record, subpartitionId, dataType, isBroadcast);
}
} | 3.26 |
flink_TieredStorageProducerClient_writeAccumulatedBuffers_rdh | /**
* Write the accumulated buffers of this subpartitionId to the appropriate tiers.
*
* @param subpartitionId
* the subpartition identifier
* @param accumulatedBuffers
* the accumulated buffers of this subpartition
*/
private void writeAccumulatedBuffers(TieredStorageSubpartitionId subpartitionId, List<Buffer> accumulatedBuffers) {
Iterator<Buffer> bufferIterator = accumulatedBuffers.iterator();
int numWriteBytes = 0;
int numWriteBuffers = 0;
while (bufferIterator.hasNext()) {
Buffer buffer = bufferIterator.next();
numWriteBuffers++;
numWriteBytes += buffer.readableBytes();
try {
writeAccumulatedBuffer(subpartitionId, buffer);
} catch (IOException ioe) {
buffer.recycleBuffer();
while (bufferIterator.hasNext()) {
bufferIterator.next().recycleBuffer();
}
ExceptionUtils.rethrow(ioe);
}
}
updateMetricStatistics(numWriteBuffers, numWriteBytes);
} | 3.26 |
flink_TieredStorageProducerClient_writeAccumulatedBuffer_rdh | /**
* Write the accumulated buffer of this subpartitionId to an appropriate tier. After the tier is
* decided, the buffer will be written to the selected tier.
*
* <p>Note that the method only throws an exception when choosing a storage tier, so the caller
* should ensure that the buffer is recycled when throwing an exception.
*
* @param subpartitionId
* the subpartition identifier
* @param accumulatedBuffer
* one accumulated buffer of this subpartition
*/
private void writeAccumulatedBuffer(TieredStorageSubpartitionId subpartitionId, Buffer accumulatedBuffer) throws
IOException {
Buffer compressedBuffer = compressBufferIfPossible(accumulatedBuffer);
if (currentSubpartitionTierAgent[subpartitionId.getSubpartitionId()] == null) {
chooseStorageTierToStartSegment(subpartitionId);
}
if (!currentSubpartitionTierAgent[subpartitionId.getSubpartitionId()].tryWrite(subpartitionId, compressedBuffer, bufferAccumulator)) {
chooseStorageTierToStartSegment(subpartitionId);
checkState(currentSubpartitionTierAgent[subpartitionId.getSubpartitionId()].tryWrite(subpartitionId,
compressedBuffer, bufferAccumulator), "Failed to write the first buffer to the new segment");
}
} | 3.26 |
flink_WatermarkSpec_getRowtimeAttribute_rdh | /**
* Returns the name of a rowtime attribute.
*
* <p>The referenced attribute must be present in the {@link ResolvedSchema} and must be of
* {@link TimestampType}.
*/public String getRowtimeAttribute() {
return rowtimeAttribute;
} | 3.26 |
flink_WatermarkSpec_getWatermarkExpression_rdh | /**
* Returns the {@link ResolvedExpression} for watermark generation.
*/
public ResolvedExpression getWatermarkExpression() {
return watermarkExpression;
} | 3.26 |
flink_SemanticPropUtil_areFieldsCompatible_rdh | // //////////////////// UTIL METHODS ///////////////////////////////
private static boolean areFieldsCompatible(String sourceField, TypeInformation<?> inType, String targetField, TypeInformation<?> outType, boolean throwException) {
try {
// get source type information
TypeInformation<?>
sourceType = getExpressionTypeInformation(sourceField, inType);
// get target type information
TypeInformation<?> targetType = getExpressionTypeInformation(targetField, outType);
return sourceType.equals(targetType);
} catch (InvalidFieldReferenceException e) {
if (throwException) {
throw e;
} else { return false;
}
}
} | 3.26 |
flink_SemanticPropUtil_addSourceFieldOffset_rdh | /**
* Creates SemanticProperties by adding an offset to each input field index of the given
* SemanticProperties.
*
* @param props
* The SemanticProperties to which the offset is added.
* @param numInputFields
* The original number of fields of the input.
* @param offset
* The offset that is added to each input field index.
* @return New SemanticProperties with added offset.
*/
public static SingleInputSemanticProperties addSourceFieldOffset(SingleInputSemanticProperties props,
int numInputFields, int offset) {
SingleInputSemanticProperties offsetProps =
new SingleInputSemanticProperties();
if (props.getReadFields(0) != null) {
FieldSet offsetReadFields = new FieldSet();
for (int r
: props.getReadFields(0)) {
offsetReadFields = offsetReadFields.addField(r + offset);
}offsetProps.addReadFields(offsetReadFields);
}
for (int s = 0; s < numInputFields; s++) {
FieldSet targetFields = props.getForwardingTargetFields(0, s);
for (int t : targetFields) {
offsetProps.addForwardedField(s + offset, t);}
}
return offsetProps;
} | 3.26 |
flink_MetricListener_getHistogram_rdh | /**
* Get registered {@link Histogram} with identifier relative to the root metric group.
*
* @param identifier
* identifier relative to the root metric group
* @return Optional registered histogram
*/
public Optional<Histogram> getHistogram(String... identifier) {
return getMetric(Histogram.class, identifier);
} | 3.26 |
flink_MetricListener_getGauge_rdh | /**
* Get registered {@link Gauge} with identifier relative to the root metric group.
*
* @param identifier
* identifier relative to the root metric group
* @return Optional registered gauge
*/@SuppressWarnings("unchecked")
public <T> Optional<Gauge<T>> getGauge(String... identifier) {
if (!metrics.containsKey(getActualIdentifier(identifier))) {
return Optional.empty();} else {
return Optional.of(((Gauge<T>) (metrics.get(getActualIdentifier(identifier)))));
}
} | 3.26 |
flink_MetricListener_getCounter_rdh | /**
* Get registered {@link Counter} with identifier relative to the root metric group.
*
* @param identifier
* identifier relative to the root metric group
* @return Optional registered counter
*/
public Optional<Counter> getCounter(String...
identifier) {
return getMetric(Counter.class, identifier);
} | 3.26 |
flink_MetricListener_getMetric_rdh | /**
* Get registered {@link Metric} with identifier relative to the root metric group.
*
* <p>For example, identifier of metric "myMetric" registered in group "myGroup" under root
* metric group can be reached by identifier ("myGroup", "myMetric")
*
* @param identifier
* identifier relative to the root metric group
* @return Optional registered metric
*/
public <T extends Metric> Optional<T> getMetric(Class<T> metricType, String... identifier) {
if (!metrics.containsKey(getActualIdentifier(identifier))) {
return Optional.empty();
}
return Optional.of(metricType.cast(metrics.get(getActualIdentifier(identifier))));
} | 3.26 |
flink_MetricListener_getMeter_rdh | /**
* Get registered {@link Meter} with identifier relative to the root metric group.
*
* @param identifier
* identifier relative to the root metric group
* @return Optional registered meter
*/
public Optional<Meter> getMeter(String... identifier) {
return getMetric(Meter.class, identifier);
} | 3.26 |
flink_JoinHintsResolver_matchIdentifier_rdh | /**
* Check whether the given hint option matches the table qualified names. For convenience, we
* follow a simple rule: the matching is successful if the option is the suffix of the table
* qualified names.
*/
private boolean matchIdentifier(String option, String tableIdentifier) {
String[] optionNames = option.split("\\.");
int optionNameLength = optionNames.length;
String[] tableNames = tableIdentifier.split("\\.");
int tableNameLength = tableNames.length;
for (int i = 0; i < Math.min(optionNameLength, tableNameLength); i++) {
String currOptionName = optionNames[(optionNameLength - 1) - i];
String currTableName = tableNames[(tableNameLength - 1) - i];
if (!currOptionName.equals(currTableName)) {
return false;}
}
return true;
} | 3.26 |
flink_JoinHintsResolver_resolve_rdh | /**
* Resolves and validates join hints in the given {@link RelNode} list, an {@link ValidationException} will be raised for invalid hints.
*
* <p>After resolving join hints, the options of the join hints (declared table name or query
* block name) will be replaced to {@link JoinStrategy#LEFT_INPUT} or {@link JoinStrategy#RIGHT_INPUT}
*
* <p>If the declared table name or query name in a join hint could not match the left side or
* right side of this join, that means this join hint is invalid and a {@link ValidationException} will be thrown.
*/
final List<RelNode> resolve(List<RelNode> roots) {
List<RelNode> resolvedRoots = roots.stream().map(node -> node.accept(this)).collect(Collectors.toList());
validateHints();
return resolvedRoots;
} | 3.26 |
flink_AvgAggFunction_getValueExpression_rdh | /**
* If all input are nulls, count will be 0 and we will get null after the division.
*/
@Override
public Expression getValueExpression() {
Expression ifTrue = nullOf(getResultType());
Expression ifFalse = cast(div(sum, count), typeLiteral(getResultType()));
return ifThenElse(equalTo(count, literal(0L)), ifTrue, ifFalse);
} | 3.26 |
flink_ChannelStateWriteRequestExecutorImpl_m0_rdh | /**
* Retrieves and removes the head request of the {@link #deque}, waiting if necessary until an
* element becomes available.
*
* @return The head request, it can be null when the executor is closed.
*/
@Nullable
private ChannelStateWriteRequest m0() throws InterruptedException {
ChannelStateWriteRequest request;
while (!wasClosed) {
request = deque.pollFirst();
if (request == null) {
lock.wait();
} else {return request;
}
}
return null;
} | 3.26 |
flink_Sum0AggFunction_getResultType_rdh | /**
* Built-in Float Sum0 aggregate function.
*/public static class FloatSum0AggFunction extends Sum0AggFunction {
@Override
public DataType getResultType() {
return
DataTypes.FLOAT();
} | 3.26 |
flink_DataStreamScanProvider_produceDataStream_rdh | /**
* Creates a scan Java {@link DataStream} from a {@link StreamExecutionEnvironment}.
*/
@Deprecated
default DataStream<RowData> produceDataStream(StreamExecutionEnvironment execEnv)
{
throw new UnsupportedOperationException("This method is deprecated. " + "Use produceDataStream(ProviderContext, StreamExecutionEnvironment) instead");
} | 3.26 |
flink_KeyedProcessFunction_onTimer_rdh | /**
* Called when a timer set using {@link TimerService} fires.
*
* @param timestamp
* The timestamp of the firing timer.
* @param ctx
* An {@link OnTimerContext} that allows querying the timestamp, the {@link TimeDomain}, and the key of the firing timer and getting a {@link TimerService} for
* registering timers and querying the time. The context is only valid during the invocation
* of this method, do not store it.
* @param out
* The collector for returning result values.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
public void onTimer(long timestamp, OnTimerContext ctx, Collector<O> out) throws Exception {
} | 3.26 |
flink_ParquetProtoWriters_forType_rdh | /**
* Creates a {@link ParquetWriterFactory} for the given type. The type should represent a
* Protobuf message.
*
* @param type
* The class of the type to write.
*/
public static <T extends Message> ParquetWriterFactory<T> forType(Class<T> type) {
ParquetBuilder<T> builder = out -> new ParquetProtoWriterBuilder<>(out, type).build();
return new ParquetWriterFactory<>(builder);
} | 3.26 |
flink_SessionWithGap_on_rdh | /**
* Specifies the time attribute on which rows are grouped.
*
* <p>For streaming tables you can specify grouping by a event-time or processing-time
* attribute.
*
* <p>For batch tables you can specify grouping on a timestamp or long attribute.
*
* @param timeField
* time attribute for streaming and batch tables
* @return a tumbling window on event-time
*/
public SessionWithGapOnTime on(Expression timeField) {
return new SessionWithGapOnTime(timeField, gap);
} | 3.26 |
flink_UserDefinedFunction_close_rdh | /**
* Tear-down method for user-defined function. It can be used for clean up work. By default,
* this method does nothing.
*/
public void close() throws Exception {
// do nothing
} | 3.26 |
flink_UserDefinedFunction_toString_rdh | /**
* Returns the name of the UDF that is used for plan explanation and logging.
*/
@Override
public String toString() {
return getClass().getSimpleName();
} | 3.26 |
flink_UserDefinedFunction_functionIdentifier_rdh | /**
* Returns a unique, serialized representation for this function.
*/
public final String functionIdentifier() {
final String className = getClass().getName();
if (isClassNameSerializable(this)) {
return className;}final String md5 = EncodingUtils.hex(EncodingUtils.md5(EncodingUtils.encodeObjectToString(this)));
return className.concat("$").concat(md5);
} | 3.26 |
flink_UserDefinedFunction_open_rdh | /**
* Setup method for user-defined function. It can be used for initialization work. By default,
* this method does nothing.
*/
public void open(FunctionContext context) throws Exception {
// do nothing
} | 3.26 |
flink_ProcessingTimeSessionWindows_mergeWindows_rdh | /**
* Merge overlapping {@link TimeWindow}s.
*/
@Override
public void mergeWindows(Collection<TimeWindow> windows, MergeCallback<TimeWindow> c) {
TimeWindow.mergeWindows(windows, c);
} | 3.26 |
flink_ProcessingTimeSessionWindows_m0_rdh | /**
* Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions
* based on the element timestamp.
*
* @param size
* The session timeout, i.e. the time gap between sessions
* @return The policy.
*/
public static ProcessingTimeSessionWindows m0(Time size) {
return new ProcessingTimeSessionWindows(size.toMilliseconds());
}
/**
* Creates a new {@code SessionWindows} {@link WindowAssigner} | 3.26 |
flink_AbstractBlobCache_createTemporaryFilename_rdh | /**
* Returns a temporary file inside the BLOB server's incoming directory.
*
* @return a temporary file inside the BLOB server's incoming directory
* @throws IOException
* if creating the directory fails
*/File createTemporaryFilename() throws IOException {
return new File(BlobUtils.getIncomingDirectory(storageDir.deref()), String.format("temp-%08d", tempFileCounter.getAndIncrement()));
} | 3.26 |
flink_AbstractBlobCache_setBlobServerAddress_rdh | /**
* Sets the address of the {@link BlobServer}.
*
* @param blobServerAddress
* address of the {@link BlobServer}.
*/
public void setBlobServerAddress(InetSocketAddress blobServerAddress) {
serverAddress
= checkNotNull(blobServerAddress);
} | 3.26 |
flink_AbstractBlobCache_getPort_rdh | /**
* Returns the port the BLOB server is listening on.
*
* @return BLOB server port or {@code -1} if no server address
*/
public int getPort() {
final InetSocketAddress currentServerAddress = serverAddress;
if (currentServerAddress != null) {
return currentServerAddress.getPort();
}
else {
return -1;
}} | 3.26 |
flink_AbstractBlobCache_m0_rdh | /**
* Returns local copy of the file for the BLOB with the given key.
*
* <p>The method will first attempt to serve the BLOB from its local cache. If the BLOB is not
* in the cache, the method will try to download it from this cache's BLOB server via a
* distributed BLOB store (if available) or direct end-to-end download.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey
* The key of the desired BLOB.
* @return file referring to the local storage location of the BLOB.
* @throws IOException
* Thrown if an I/O error occurs while downloading the BLOBs from the BLOB
* server.
*/
protected File m0(@Nullable
JobID jobId, BlobKey blobKey) throws IOException {
checkArgument(blobKey != null, "BLOB key cannot be null.");
final File localFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey);
readWriteLock.readLock().lock();
try {if (localFile.exists()) {
return localFile;
}
} finally {
readWriteLock.readLock().unlock();
}
// first try the distributed blob store (if available)
// use a temporary file (thread-safe without locking)
File incomingFile = createTemporaryFilename();
try {
try {if (blobView.get(jobId, blobKey, incomingFile)) {
// now move the temp file to our local cache atomically
readWriteLock.writeLock().lock();
try {
BlobUtils.moveTempFileToStore(incomingFile, jobId, blobKey, localFile, log, null);
} finally {
readWriteLock.writeLock().unlock();
}
return localFile;
}
} catch (Exception e) {
log.info("Failed to copy from blob store. Downloading from BLOB server instead.", e);
}final InetSocketAddress currentServerAddress = serverAddress;
if (currentServerAddress != null) {
// fallback: download from the BlobServer
BlobClient.downloadFromBlobServer(jobId, blobKey, incomingFile, currentServerAddress, blobClientConfig, numFetchRetries);readWriteLock.writeLock().lock();
try
{
BlobUtils.moveTempFileToStore(incomingFile, jobId, blobKey, localFile, log, null);
} finally {
readWriteLock.writeLock().unlock();
}
} else {
throw new IOException("Cannot download from BlobServer, because the server address is unknown.");
}return localFile;
} finally {
// delete incomingFile from a failed download
if ((!incomingFile.delete()) && incomingFile.exists()) {
log.warn("Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId);
}
}
} | 3.26 |
flink_VarCharType_ofEmptyLiteral_rdh | /**
* The SQL standard defines that character string literals are allowed to be zero-length strings
* (i.e., to contain no characters) even though it is not permitted to declare a type that is
* zero. This has also implications on variable-length character strings during type inference
* because any fixed-length character string should be convertible to a variable-length one.
*
* <p>This method enables this special kind of character string.
*
* <p>Zero-length character strings have no serializable string representation.
*/
public static VarCharType ofEmptyLiteral() {
return new VarCharType(EMPTY_LITERAL_LENGTH, false);
} | 3.26 |
flink_AsyncSnapshotCallable_logAsyncSnapshotComplete_rdh | /**
* This method is invoked after completion of the snapshot and can be overridden to output a
* logging about the duration of the async part.
*/ protected void logAsyncSnapshotComplete(long startTime) {} | 3.26 |
flink_AsyncSnapshotCallable_toAsyncSnapshotFutureTask_rdh | /**
* Creates a future task from this and registers it with the given {@link CloseableRegistry}.
* The task is unregistered again in {@link FutureTask#done()}.
*/
public AsyncSnapshotTask toAsyncSnapshotFutureTask(@Nonnull
CloseableRegistry taskRegistry) throws IOException {
return new AsyncSnapshotTask(taskRegistry);
} | 3.26 |
flink_SplitFetcher_shutdown_rdh | /**
* Shutdown the split fetcher.
*/
public void shutdown() {
lock.lock();
try {
if (!closed) {
closed = true;
paused = false;
LOG.info("Shutting down split fetcher {}", id);
wakeUpUnsafe(false);
}
} finally
{
lock.unlock();
}
} | 3.26 |
flink_SplitFetcher_addSplits_rdh | /**
* Add splits to the split fetcher. This operation is asynchronous.
*
* @param splitsToAdd
* the splits to add.
*/
public void addSplits(List<SplitT> splitsToAdd) {
lock.lock();
try {
enqueueTaskUnsafe(new AddSplitsTask<>(splitReader, splitsToAdd, assignedSplits));
wakeUpUnsafe(true);
} finally {
lock.unlock();}
} | 3.26 |
flink_SplitFetcher_isIdle_rdh | /**
* Package private for unit test.
*
* @return true if task queue is empty, false otherwise.
*/
boolean isIdle() {
lock.lock();
try {
return (assignedSplits.isEmpty() && taskQueue.isEmpty()) && (runningTask == null);
} finally {
lock.unlock();
}
} | 3.26 |
flink_SplitFetcher_runOnce_rdh | /**
* Package private method to help unit test.
*/
boolean runOnce() {
// first blocking call = get next task. blocks only if there are no active splits and queued
// tasks.
SplitFetcherTask task;
lock.lock();
try {
if (closed) {return false;
}
task = getNextTaskUnsafe();
if (task
== null) {
// (spurious) wakeup, so just repeat
return true;
}
LOG.debug("Prepare to run {}", task);
// store task for #wakeUp
this.runningTask = task;
} finally {
lock.unlock();
}
// execute the task outside of lock, so that it can be woken up
boolean taskFinished;
try
{
taskFinished = task.run();
} catch (Exception e) {
throw
new RuntimeException(String.format("SplitFetcher thread %d received unexpected exception while polling the records", id), e);
}
// re-acquire lock as all post-processing steps, need it
lock.lock();
try {
this.runningTask = null;
processTaskResultUnsafe(task, taskFinished);
} finally {
lock.unlock(); }
return true;
} | 3.26 |
flink_SplitFetcher_removeSplits_rdh | /**
* Notice the split fetcher that some splits finished. This operation is asynchronous.
*
* @param splitsToRemove
* the splits need to be removed.
*/
public void removeSplits(List<SplitT> splitsToRemove) {
lock.lock();
try {
enqueueTaskUnsafe(new RemoveSplitsTask<>(splitReader, splitsToRemove, assignedSplits, splitFinishedHook));
wakeUpUnsafe(true);
} finally {
lock.unlock();
}
} | 3.26 |
flink_SplitFetcher_wakeUp_rdh | /**
* Wake up the fetcher thread. There are only two blocking points in a running fetcher. 1.
* Waiting for the next task in an idle fetcher. 2. Running a task.
*
* <p>They need to be waken up differently. If the fetcher is blocking waiting on the next task
* in the task queue, we should just notify that a task is available. If the fetcher is running
* the user split reader, we should call SplitReader.wakeUp() instead.
*
* <p>The correctness can be thought of in the following way. The purpose of wake up is to let
* the fetcher thread go to the very beginning of the running loop.
*/void wakeUp(boolean taskOnly) {
// Synchronize to make sure the wake up only works for the current invocation of runOnce().
lock.lock();
try {
wakeUpUnsafe(taskOnly);
} finally {
lock.unlock();
}
} | 3.26 |
flink_SplitFetcher_pauseOrResumeSplits_rdh | /**
* Called when some splits of this source instance progressed too much beyond the global
* watermark of all subtasks. If the split reader implements {@link SplitReader}, it will relay
* the information asynchronously through the split fetcher thread.
*
* @param splitsToPause
* the splits to pause
* @param splitsToResume
* the splits to resume
*/
public void pauseOrResumeSplits(Collection<SplitT> splitsToPause, Collection<SplitT> splitsToResume) {
lock.lock();
try {
enqueueTaskUnsafe(new PauseOrResumeSplitsTask<>(splitReader, splitsToPause, splitsToResume, allowUnalignedSourceSplits));
wakeUpUnsafe(true);
} finally {
lock.unlock();
}
} | 3.26 |
flink_TieredStorageMemoryManagerImpl_recycleBuffer_rdh | /**
* Note that this method may be called by the netty thread.
*/
private void recycleBuffer(Object owner, MemorySegment buffer) {
bufferPool.recycle(buffer);
decNumRequestedBuffer(owner);
} | 3.26 |
flink_JobMetricGroup_putVariables_rdh | // ------------------------------------------------------------------------
// Component Metric Group Specifics
// ------------------------------------------------------------------------
@Override
protected void putVariables(Map<String, String> variables) {
variables.put(ScopeFormat.SCOPE_JOB_ID, jobId.toString());
variables.put(ScopeFormat.SCOPE_JOB_NAME, jobName);
} | 3.26 |
flink_ClusterClientFactory_m0_rdh | /**
* Returns the option to be used when trying to execute an application in Application Mode using
* this cluster client factory, or an {@link Optional#empty()} if the environment of this
* cluster client factory does not support Application Mode.
*/
default Optional<String> m0() {
return Optional.empty();
} | 3.26 |
flink_SourceBuilder_fromFormat_rdh | /**
* Creates a new source that is bounded.
*
* @param env
* The stream execution environment.
* @param inputFormat
* The input source to consume.
* @param typeInfo
* The type of the output.
* @param <OUT>
* The output type.
* @return A source that is bounded.
*/
public static <OUT> DataStreamSource<OUT> fromFormat(StreamExecutionEnvironment env, InputFormat<OUT, ?> inputFormat, TypeInformation<OUT> typeInfo) {
InputFormatSourceFunction<OUT> function = new InputFormatSourceFunction<>(inputFormat, typeInfo);
env.clean(function);
final StreamSource<OUT, ?> sourceOperator = new StreamSource<>(function);
return new DataStreamSource<>(env, typeInfo, sourceOperator, true, SOURCE_NAME, Boundedness.BOUNDED);
} | 3.26 |
flink_DataSink_getPreferredResources_rdh | /**
* Returns the preferred resources of this data sink. If no preferred resources have been set,
* this returns the default resource profile.
*
* @return The preferred resources of this data sink.
*/
@PublicEvolving
public ResourceSpec getPreferredResources() {return this.preferredResources;
} | 3.26 |
flink_DataSink_setParallelism_rdh | /**
* Sets the parallelism for this data sink. The degree must be 1 or more.
*
* @param parallelism
* The parallelism for this data sink. A value equal to {@link ExecutionConfig#PARALLELISM_DEFAULT} will use the system default.
* @return This data sink with set parallelism.
*/
public DataSink<T> setParallelism(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism);
this.parallelism = parallelism;
return this;
} | 3.26 |
flink_DataSink_withParameters_rdh | /**
* Pass a configuration to the OutputFormat.
*
* @param parameters
* Configuration parameters
*/
public DataSink<T> withParameters(Configuration parameters) {this.parameters = parameters;return
this;
}
/**
* Sorts each local partition of a {@link org.apache.flink.api.java.tuple.Tuple} data set on the
* specified field in the specified {@link Order} before it is emitted by the output format.
*
* <p><b>Note: Only tuple data sets can be sorted using integer field indices.</b>
*
* <p>The tuple data set can be sorted on multiple fields in different orders by chaining {@link #sortLocalOutput(int, Order)} calls.
*
* @param field
* The Tuple field on which the data set is locally sorted.
* @param order
* The Order in which the specified Tuple field is locally sorted.
* @return This data sink operator with specified output order.
* @see org.apache.flink.api.java.tuple.Tuple
* @see Order
* @deprecated Use {@link DataSet#sortPartition(int, Order)} | 3.26 |
flink_DataSink_getParameters_rdh | /**
*
* @return Configuration for the OutputFormat.
*/
public Configuration getParameters() {
return this.parameters;
} | 3.26 |
flink_DataSink_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return ((("DataSink '" + (this.name == null
? "<unnamed>" : this.name)) + "' (") + this.format.toString()) +
")";
} | 3.26 |
flink_DataSink_translateToDataFlow_rdh | // --------------------------------------------------------------------------------------------
protected GenericDataSinkBase<T> translateToDataFlow(Operator<T> input) {
// select the name (or create a default one)
String name = (this.name != null) ? this.name : this.format.toString();
GenericDataSinkBase<T> sink = new GenericDataSinkBase<>(this.format, new UnaryOperatorInformation<>(this.type, new NothingTypeInfo()), name);
// set input
sink.setInput(input);
// set parameters
if (this.parameters != null) {
sink.getParameters().addAll(this.parameters);
}
// set parallelism
if (this.parallelism > 0) {
// use specified parallelism
sink.setParallelism(this.parallelism);
} else
{
// if no parallelism has been specified, use parallelism of input operator to enable
// chaining
sink.setParallelism(input.getParallelism());
}
if (this.sortKeyPositions != null) {
// configure output sorting
Ordering ordering = new Ordering();
for (int i = 0;
i < this.sortKeyPositions.length; i++) {
ordering.appendOrdering(this.sortKeyPositions[i], null, this.sortOrders[i]);}
sink.setLocalOrder(ordering);
}
return sink;
} | 3.26 |
flink_DataSink_setResources_rdh | /**
* Sets the resources for this data sink, and the minimum and preferred resources are the same
* by default.
*
* @param resources
* The resources for this data sink.
* @return The data sink with set minimum and preferred resources.
*/
private DataSink<T> setResources(ResourceSpec resources) {
OperatorValidationUtils.validateResources(resources);
this.minResources = resources;
this.preferredResources =
resources;
return this;
} | 3.26 |
flink_DataSink_name_rdh | // --------------------------------------------------------------------------------------------
public DataSink<T> name(String name) {
this.name = name;
return this;
} | 3.26 |
flink_HiveServer2Endpoint_GetQueryId_rdh | // CHECKSTYLE.OFF: MethodName
/**
* To be compatible with Hive3, add a default implementation.
*/
public TGetQueryIdResp GetQueryId(TGetQueryIdReq tGetQueryIdReq) throws TException {
throw new TException(new UnsupportedOperationException(String.format(UNSUPPORTED_ERROR_MESSAGE, "GetQueryId")));
} | 3.26 |
flink_HiveServer2Endpoint_waitUntilOperationIsTerminated_rdh | /**
* Similar solution comparing to the {@code org.apache.hive.jdbc.HiveStatement#waitForOperationToComplete}.
*
* <p>The better solution is to introduce an interface similar to {@link TableResult#await()}.
*/
private void waitUntilOperationIsTerminated(SessionHandle
sessionHandle, OperationHandle operationHandle) throws Exception {
OperationInfo v63;
do {
v63 = service.getOperationInfo(sessionHandle, operationHandle);
switch (v63.getStatus()) {
case INITIALIZED :
case PENDING :
case RUNNING :
Thread.sleep(CHECK_INTERVAL_MS);
break;
case CANCELED :
case TIMEOUT :
throw new SqlGatewayException(String.format("The operation %s's status is %s for the session %s.", operationHandle, v63.getStatus(), sessionHandle));
case ERROR :
throw new SqlGatewayException(String.format("The operation %s's status is %s for the session %s.", operationHandle, v63.getStatus(), sessionHandle), v63.getException().orElseThrow(() -> new SqlGatewayException("Impossible! ERROR status should contains the error.")));
case FINISHED :
return;
default :
throw new SqlGatewayException(String.format("Unknown status: %s.", v63.getStatus()));
}
} while (true );
} | 3.26 |
flink_HiveServer2Endpoint_SetClientInfo_rdh | /**
* To be compatible with Hive3, add a default implementation.
*/
public TSetClientInfoResp SetClientInfo(TSetClientInfoReq tSetClientInfoReq) throws TException {
return new TSetClientInfoResp(buildErrorStatus("SetClientInfo"));
} | 3.26 |
flink_HiveServer2Endpoint_equals_rdh | // CHECKSTYLE.ON: MethodName
@Override
public boolean equals(Object o) {
if (this
== o) {
return true;
}
if (!(o instanceof HiveServer2Endpoint)) {
return false;
}
HiveServer2Endpoint that = ((HiveServer2Endpoint) (o));
return ((((((((((Objects.equals(socketAddress, that.socketAddress) && (minWorkerThreads == that.minWorkerThreads)) && (maxWorkerThreads == that.maxWorkerThreads)) && (requestTimeoutMs == that.requestTimeoutMs)) && (backOffSlotLengthMs == that.backOffSlotLengthMs)) && (maxMessageSize == that.maxMessageSize)) && Objects.equals(workerKeepAliveTime, that.workerKeepAliveTime)) && Objects.equals(catalogName, that.catalogName)) && Objects.equals(defaultDatabase, that.defaultDatabase))
&& Objects.equals(allowEmbedded, that.allowEmbedded)) && Objects.equals(isVerbose, that.isVerbose)) && Objects.equals(moduleName, that.moduleName);
} | 3.26 |
flink_TaskEventDispatcher_m0_rdh | /**
* Removes all registered event handlers.
*/
public void m0() {
synchronized(registeredHandlers) {
registeredHandlers.clear();
}
} | 3.26 |
flink_TaskEventDispatcher_publish_rdh | /**
* Publishes the event to the registered {@link EventListener} instances.
*
* <p>This method is either called directly from a {@link LocalInputChannel} or the network I/O
* thread on behalf of a {@link RemoteInputChannel}.
*
* @return whether the event was published to a registered event handler (initiated via {@link #registerPartition(ResultPartitionID)}) or not
*/
@Override
public boolean publish(ResultPartitionID partitionId, TaskEvent event) {
checkNotNull(partitionId);checkNotNull(event);
TaskEventHandler taskEventHandler;
synchronized(registeredHandlers) {
taskEventHandler = registeredHandlers.get(partitionId);
}if (taskEventHandler != null) {
taskEventHandler.publish(event);
return true;
}
return false;
} | 3.26 |
flink_TaskEventDispatcher_subscribeToEvent_rdh | /**
* Subscribes a listener to this dispatcher for events on a partition.
*
* @param partitionId
* ID of the partition to subscribe for (must be registered via {@link #registerPartition(ResultPartitionID)} first!)
* @param eventListener
* the event listener to subscribe
* @param eventType
* event type to subscribe to
*/
public void subscribeToEvent(ResultPartitionID partitionId, EventListener<TaskEvent> eventListener, Class<? extends TaskEvent> eventType) {
checkNotNull(partitionId);checkNotNull(eventListener);
checkNotNull(eventType);
TaskEventHandler taskEventHandler;
synchronized(registeredHandlers)
{
taskEventHandler = registeredHandlers.get(partitionId);
}
if (taskEventHandler == null) {
throw new IllegalStateException(("Partition " + partitionId) + " not registered at task event dispatcher.");}
taskEventHandler.subscribe(eventListener, eventType);
} | 3.26 |
flink_TaskEventDispatcher_registerPartition_rdh | /**
* Registers the given partition for incoming task events allowing calls to {@link #subscribeToEvent(ResultPartitionID, EventListener, Class)}.
*
* @param partitionId
* the partition ID
*/
public void registerPartition(ResultPartitionID partitionId) {
checkNotNull(partitionId);
synchronized(registeredHandlers) {
LOG.debug("registering {}", partitionId);
if (registeredHandlers.put(partitionId, new TaskEventHandler()) != null) {
throw new IllegalStateException(("Partition " + partitionId) + " already registered at task event dispatcher.");
}
}
} | 3.26 |
flink_TaskEventDispatcher_unregisterPartition_rdh | /**
* Removes the given partition from listening to incoming task events, thus forbidding calls to
* {@link #subscribeToEvent(ResultPartitionID, EventListener, Class)}.
*
* @param partitionId
* the partition ID
*/
public void unregisterPartition(ResultPartitionID partitionId) {
checkNotNull(partitionId);synchronized(registeredHandlers) {
LOG.debug("unregistering {}", partitionId);
// NOTE: tolerate un-registration of non-registered task (unregister is always called
// in the cleanup phase of a task even if it never came to the registration - see
// Task.java)
registeredHandlers.remove(partitionId);
}
} | 3.26 |
flink_SlidingWindowAssigner_of_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Creates a new {@code SlidingEventTimeWindows} {@link org.apache.flink.streaming.api.windowing.assigners.WindowAssigner} that assigns elements to
* sliding time windows based on the element timestamp.
*
* @param size
* The size of the generated windows.
* @param slide
* The slide interval of the generated windows.
* @return The time policy.
*/
public static SlidingWindowAssigner of(Duration size, Duration slide) {
return new SlidingWindowAssigner(size.toMillis(), slide.toMillis(), 0, true);
} | 3.26 |
flink_HttpRequestHandler_m0_rdh | /**
* Checks whether the given directory exists and is writable. If it doesn't exist this method
* will attempt to create it.
*
* @param uploadDir
* directory to check
* @throws IOException
* if the directory does not exist and cannot be created, or if the
* directory isn't writable
*/
public static synchronized void m0(File
uploadDir) throws IOException {
if (uploadDir.exists() && uploadDir.canWrite()) {
LOG.info("Using directory {} for web frontend JAR file uploads.", uploadDir);
} else if (uploadDir.mkdirs() && uploadDir.canWrite()) {
LOG.info("Created directory {} for web frontend JAR file uploads.", uploadDir);
} else {
LOG.warn("Jar upload directory {} cannot be created or is not writable.", uploadDir.getAbsolutePath());
throw new IOException(String.format("Jar upload directory %s cannot be created or is not writable.", uploadDir.getAbsolutePath()));
}
} | 3.26 |
flink_JobSubmissionResult_m0_rdh | /**
* Checks if this JobSubmissionResult is also a JobExecutionResult. See {@code getJobExecutionResult} to retrieve the JobExecutionResult.
*
* @return True if this is a JobExecutionResult, false otherwise
*/
public boolean m0() {
return false;
} | 3.26 |
flink_CrossOperator_projectTuple10_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> ProjectCross<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> projectTuple10() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> tType = new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes);
return new ProjectCross<I1, I2, Tuple10<T0, T1, T2, T3,
T4, T5, T6, T7, T8, T9>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple2_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1> ProjectCross<I1, I2, Tuple2<T0, T1>> projectTuple2() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes);
return new ProjectCross<I1, I2, Tuple2<T0, T1>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple21_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> ProjectCross<I1, I2, Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> projectTuple21() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> tType = new TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(fTypes);return new ProjectCross<I1, I2, Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple6_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5> ProjectCross<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType = new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes);
return new ProjectCross<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple25_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16,
T17, T18, T19, T20, T21, T22, T23, T24> ProjectCross<I1, I2, Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> projectTuple25() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple25<T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> tType = new TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11,
T12, T13, T14, T15, T16, T17,
T18, T19, T20, T21, T22, T23, T24>>(fTypes);
return new ProjectCross<I1, I2, Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple14_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> ProjectCross<I1, I2, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> projectTuple14() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11, T12, T13>> tType = new TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(fTypes);
return new ProjectCross<I1, I2, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple18_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> ProjectCross<I1, I2, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> projectTuple18() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17>> v57
= new TupleTypeInfo<Tuple18<T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(fTypes);
return new ProjectCross<I1, I2, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, v57, this, hint);
} | 3.26 |
flink_CrossOperator_projectFirst_rdh | /**
* Continues a ProjectCross transformation and adds fields of the first cross input.
*
* <p>If the first cross input is a {@link Tuple} {@link DataSet}, fields can be selected by
* their index. If the first cross input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectFirst(int...)}
* and {@link org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectSecond(int...)}.
*
* @param firstFieldIndexes
* If the first input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended CrossProjection.
* @see Tuple
* @see DataSet
* @see org.apache.flink.api.java.operators.CrossOperator.CrossProjection
* @see org.apache.flink.api.java.operators.CrossOperator.ProjectCross
*/
protected CrossProjection<I1, I2> projectFirst(int... firstFieldIndexes) {
boolean isFirstTuple;
if ((ds1.getType() instanceof TupleTypeInfo) && (firstFieldIndexes.length >
0)) {
isFirstTuple = true;
} else {
isFirstTuple = false;
}
if ((!isFirstTuple) && (firstFieldIndexes.length != 0)) {
// field index provided for non-Tuple input
throw new IllegalArgumentException("Input is not a Tuple. Call projectFirst() without arguments to include it.");
} else if (firstFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException("You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isFirstTuple) {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + firstFieldIndexes.length);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + firstFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs1;
for (int i = 0; i < firstFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(firstFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = true;
this.fieldIndexes[offset + i] =
firstFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = true;
this.fieldIndexes[offset] =
-1;}
return this;
} | 3.26 |
flink_CrossOperator_m1_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16, T17,
T18> ProjectCross<I1, I2, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>> m1() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4,
T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>> tType = new TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>>(fTypes);
return new ProjectCross<I1, I2, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>>(this.ds1, this.ds2, this.fieldIndexes,
this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple11_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ProjectCross<I1, I2, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> projectTuple11() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> tType = new TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(fTypes);
return new ProjectCross<I1, I2, Tuple11<T0, T1, T2, T3,
T4, T5, T6, T7, T8, T9, T10>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple3_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2> ProjectCross<I1, I2, Tuple3<T0, T1, T2>> projectTuple3() {TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes);
return new ProjectCross<I1, I2, Tuple3<T0, T1, T2>>(this.ds1, this.ds2, this.fieldIndexes,
this.isFieldInFirst, tType, this, hint);} | 3.26 |
flink_CrossOperator_projectTuple24_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> ProjectCross<I1, I2, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17, T18, T19, T20, T21, T22, T23>> projectTuple24() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17,
T18, T19, T20, T21, T22, T23>> tType = new TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22,
T23>>(fTypes);
return new ProjectCross<I1, I2, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21,
T22, T23>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple7_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6> ProjectCross<I1, I2, Tuple7<T0, T1, T2, T3, T4, T5, T6>> projectTuple7() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>> tType = new TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fTypes);
return new ProjectCross<I1, I2, Tuple7<T0, T1, T2, T3, T4, T5, T6>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple15_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> ProjectCross<I1, I2, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> projectTuple15() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
T10, T11, T12, T13, T14>> tType = new TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4,
T5, T6, T7, T8,
T9, T10, T11, T12, T13, T14>>(fTypes);
return new ProjectCross<I1, I2, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectSecond_rdh | /**
* Continues a ProjectCross transformation and adds fields of the second cross input.
*
* <p>If the second cross input is a {@link Tuple} {@link DataSet}, fields can be selected
* by their index. If the second cross input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectFirst(int...)}
* and {@link org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectSecond(int...)}.
*
* @param secondFieldIndexes
* If the second input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended CrossProjection.
* @see Tuple
* @see DataSet
* @see org.apache.flink.api.java.operators.CrossOperator.CrossProjection
* @see org.apache.flink.api.java.operators.CrossOperator.ProjectCross
*/
protected CrossProjection<I1, I2> projectSecond(int... secondFieldIndexes) {
boolean v17;
if ((ds2.getType() instanceof TupleTypeInfo) && (secondFieldIndexes.length > 0)) {
v17 = true;
} else {
v17 = false;
}
if ((!v17) && (secondFieldIndexes.length != 0)) {
// field index provided for non-Tuple input
throw
new IllegalArgumentException("Input is not a Tuple. Call projectSecond() without arguments to include it.");
} else if (secondFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException("You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (v17) {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + secondFieldIndexes.length);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + secondFieldIndexes.length);
// copy field indexes
int maxFieldIndex = f0;
for (int i = 0; i < secondFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(secondFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = false;
this.fieldIndexes[offset + i] = secondFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = false;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.26 |
flink_CrossOperator_projectTuple9_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8> ProjectCross<I1, I2, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> projectTuple9() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> tType = new TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(fTypes);
return new ProjectCross<I1, I2, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_m2_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> ProjectCross<I1, I2, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> m2() {TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> tType = new TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(fTypes);
return new ProjectCross<I1, I2, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTupleX_rdh | // --------------------------------------------------------------------------------------------
// The following lines are generated.
// --------------------------------------------------------------------------------------------
// BEGIN_OF_TUPLE_DEPENDENT_CODE
// GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
/**
* Chooses a projectTupleX according to the length of {@link org.apache.flink.api.java.operators.CrossOperator.CrossProjection#fieldIndexes}.
*
* @return The projected DataSet.
*/
@SuppressWarnings("unchecked")
public <OUT extends Tuple> ProjectCross<I1, I2, OUT> projectTupleX() {
ProjectCross<I1, I2, OUT> projectionCross = null;
switch (fieldIndexes.length) {
case 1 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple1()));
break;
case 2 :projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple2()));
break;
case 3 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple3()));
break;
case 4 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple4()));
break;
case 5 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple5()));
break;
case 6 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple6()));
break;
case 7 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple7()));
break;
case 8 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple8()));
break;
case 9 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple9()));
break;
case 10 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple10()));
break;
case 11 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple11()));
break;
case 12 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple12()));
break;
case 13 :projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple13()));
break;case
14 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple14()));
break;
case 15 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple15()));
break;
case 16 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple16()));
break;
case
17 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple17()));
break;
case
18 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple18()));
break;
case 19 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (m1()));
break;
case 20 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (m2()));
break;
case 21 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple21()));
break;
case 22 :
projectionCross = ((ProjectCross<I1,
I2, OUT>) (projectTuple22()));
break;
case 23 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple23()));
break;
case 24 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple24()));
break;
case 25 :
projectionCross = ((ProjectCross<I1, I2, OUT>) (projectTuple25()));
break;
default :
throw new IllegalStateException("Excessive arity in tuple.");
}
return projectionCross;
}
/**
* Projects a pair of crossed elements to a {@link Tuple} | 3.26 |
flink_CrossOperator_projectTuple13_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> ProjectCross<I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> projectTuple13() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple13<T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType = new TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fTypes);
return new ProjectCross<I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11, T12>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_types_rdh | /**
*
* @deprecated Deprecated method only kept for compatibility.
*/
@SuppressWarnings({ "hiding", "unchecked" })
@Deprecated
@PublicEvolvingpublic <OUT extends Tuple> CrossOperator<I1, I2, OUT> types(Class<?>... types) {
TupleTypeInfo<OUT> typeInfo = ((TupleTypeInfo<OUT>) (this.getResultType()));
if (types.length
!= typeInfo.getArity()) {
throw new InvalidProgramException("Provided types do not match projection.");
}
for (int i = 0; i < types.length;
i++)
{
Class<?> typeClass = types[i];
if (!typeClass.equals(typeInfo.getTypeAt(i).getTypeClass())) {
throw new InvalidProgramException(((("Provided type " + typeClass.getSimpleName()) + " at position ") + i) + " does not match projection");
}
}
return ((CrossOperator<I1, I2, OUT>) (this));
} | 3.26 |
flink_CrossOperator_projectTuple22_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17, T18, T19, T20, T21> ProjectCross<I1, I2, Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>
projectTuple22() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16, T17, T18, T19, T20, T21>> tType = new TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(fTypes);
return new ProjectCross<I1, I2, Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple5_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4> ProjectCross<I1, I2, Tuple5<T0, T1, T2, T3, T4>>
projectTuple5() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType = new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes);
return new ProjectCross<I1, I2, Tuple5<T0, T1, T2, T3, T4>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple17_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> ProjectCross<I1, I2, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
T10, T11, T12, T13, T14, T15, T16>> projectTuple17() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> v55 = new TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(fTypes);
return new ProjectCross<I1, I2, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8,
T9, T10, T11, T12, T13, T14, T15, T16>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst,
v55, this, hint);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.