name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_StructuredOptionsSplitter_escapeWithSingleQuote_rdh | /**
* Escapes the given string with single quotes, if the input string contains a double quote or
* any of the given {@code charsToEscape}. Any single quotes in the input string will be escaped
* by doubling.
*
* <p>Given that the escapeChar is (;)
*
* <p>Examples:
*
* <ul>
* <li>A,B,C,D => A,B,C,D
* <li>A'B'C'D => 'A''B''C''D'
* <li>A;BCD => 'A;BCD'
* <li>AB"C"D => 'AB"C"D'
* <li>AB'"D:B => 'AB''"D:B'
* </ul>
*
* @param string
* a string which needs to be escaped
* @param charsToEscape
* escape chars for the escape conditions
* @return escaped string by single quote
*/static String escapeWithSingleQuote(String string, String...
charsToEscape) {
boolean escape = (Arrays.stream(charsToEscape).anyMatch(string::contains) || string.contains("\"")) || string.contains("'");
if (escape) {
return ("'" + string.replaceAll("'", "''")) + "'";
}
return
string;
} | 3.26 |
flink_BucketWriter_openNewCompactingFile_rdh | /**
* Used to create a new {@link CompactingFileWriter} of the requesting type. Requesting a writer
* of an unsupported type will result in UnsupportedOperationException. By default, only
* RECORD_WISE type is supported, for which a {@link InProgressFileWriter} will be created.
*
* @param type
* the type of this writer.
* @param bucketID
* the id of the bucket this writer is writing to.
* @param path
* the path this writer will write to.
* @param creationTime
* the creation time of the file.
* @return the new {@link InProgressFileWriter}
* @throws IOException
* Thrown if creating a writer fails.
* @throws UnsupportedOperationException
* Thrown if the bucket writer doesn't support the
* requesting type.
*/default CompactingFileWriter openNewCompactingFile(final CompactingFileWriter.Type type, final BucketID bucketID, final Path path, final long creationTime) throws IOException {
if (type == Type.RECORD_WISE) {
return openNewInProgressFile(bucketID, path, creationTime);
}
throw new UnsupportedOperationException();
} | 3.26 |
flink_JsonRowSerializationSchema_build_rdh | /**
* Finalizes the configuration and checks validity.
*
* @return Configured {@link JsonRowSerializationSchema}
*/
public JsonRowSerializationSchema build() {
checkArgument(typeInfo != null, "typeInfo should be set.");return
new JsonRowSerializationSchema(typeInfo);
}
} | 3.26 |
flink_JsonRowSerializationSchema_withTypeInfo_rdh | /**
* Sets type information for JSON serialization schema.
*
* @param typeInfo
* Type information describing the result type. The field names of {@link Row} are used to parse the JSON properties.
*/
public Builder withTypeInfo(TypeInformation<Row> typeInfo) {
checkArgument(typeInfo instanceof RowTypeInfo, "Only RowTypeInfo is supported");
this.typeInfo = ((RowTypeInfo) (typeInfo));
return this;
} | 3.26 |
flink_SlidingWindowCheckMapper_verifyPreviousOccurences_rdh | /**
* Verifies if all values from previous windows appear in the new one. Returns union of all
* events seen so far that were not seen <b>slideFactor</b> number of times yet.
*/
private List<Tuple2<Event, Integer>> verifyPreviousOccurences(List<Tuple2<Event, Integer>> previousWindowValues, List<Event> newValues, Long lastSequenceNumberSeenSoFar, Collector<String> out) {
List<Tuple2<Event, Integer>>
newEventsSeenSoFar = new ArrayList<>();
List<Event> seenEvents = new ArrayList<>();
for (Tuple2<Event, Integer> windowValue : previousWindowValues) {
if (!newValues.contains(windowValue.f0)) { failWithEventNotSeenAlertMessage(windowValue, newValues, out);
} else {
seenEvents.add(windowValue.f0);
preserveOrDiscardIfSeenSlideFactorTimes(newEventsSeenSoFar, windowValue);
}
}
addNotSeenValues(newEventsSeenSoFar, newValues, seenEvents, lastSequenceNumberSeenSoFar, out);
return newEventsSeenSoFar;
} | 3.26 |
flink_BuildSideIterator_setBucket_rdh | // update current bucket status.
private void setBucket(MemorySegment bucket, MemorySegment[] overflowSegments, int bucketInSegmentOffset) {
this.bucketSegment = bucket;
this.overflowSegments = overflowSegments;
this.bucketInSegmentOffset = bucketInSegmentOffset;
this.pointerOffset = bucketInSegmentOffset + BinaryHashBucketArea.BUCKET_POINTER_START_OFFSET;
this.countInBucket = bucket.getShort(bucketInSegmentOffset + BinaryHashBucketArea.HEADER_COUNT_OFFSET);
this.numInBucket = 0;
// reset probedSet with probedFlags offset in this bucket.
this.probedSet.setMemorySegment(bucketSegment, this.bucketInSegmentOffset + BinaryHashBucketArea.PROBED_FLAG_OFFSET);
} | 3.26 |
flink_BuildSideIterator_m0_rdh | /**
* Move to next bucket, return true while move to a on heap bucket, return false while move
* to a spilled bucket or there is no more bucket.
*/
private boolean m0() {
scanCount++;
if (scanCount >= area.numBuckets) {
return false;
}
// move to next bucket, update all the current bucket status with new bucket
// information.
final int bucketArrayPos = scanCount >> area.table.bucketsPerSegmentBits;
final int currentBucketInSegmentOffset = (scanCount & area.table.bucketsPerSegmentMask) << BinaryHashBucketArea.BUCKET_SIZE_BITS;
MemorySegment currentBucket = area.buckets[bucketArrayPos];
setBucket(currentBucket, area.overflowSegments, currentBucketInSegmentOffset);return true;
} | 3.26 |
flink_IntermediateDataSet_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {return ("Intermediate Data Set (" + id) + ")";
} | 3.26 |
flink_IntermediateDataSet_getId_rdh | // --------------------------------------------------------------------------------------------
public IntermediateDataSetID getId() {
return id;
} | 3.26 |
flink_IntermediateDataSet_addConsumer_rdh | // --------------------------------------------------------------------------------------------
public void addConsumer(JobEdge edge) {
// sanity check
checkState(id.equals(edge.getSourceId()), "Incompatible dataset id.");
if (consumers.isEmpty()) {
distributionPattern = edge.getDistributionPattern();
isBroadcast = edge.isBroadcast();
} else {
checkState(distributionPattern == edge.getDistributionPattern(), "Incompatible distribution pattern.");
checkState(isBroadcast == edge.isBroadcast(), "Incompatible broadcast type.");
}
consumers.add(edge);
} | 3.26 |
flink_SourceCoordinatorContext_getAndRemoveUncheckpointedAssignment_rdh | /**
* Get the split to put back. This only happens when a source reader subtask has failed.
*
* @param subtaskId
* the failed subtask id.
* @param restoredCheckpointId
* the checkpoint that the task is recovered to.
* @return A list of splits that needs to be added back to the {@link SplitEnumerator}.
*/
List<SplitT> getAndRemoveUncheckpointedAssignment(int subtaskId, long restoredCheckpointId) {
return assignmentTracker.getAndRemoveUncheckpointedAssignment(subtaskId, restoredCheckpointId);
} | 3.26 |
flink_SourceCoordinatorContext_schedulePeriodTask_rdh | /**
* To avoid period task lost, we should handle the potential exception throw by task.
*/
ScheduledFuture<?> schedulePeriodTask(Runnable command, long initDelay, long period, TimeUnit unit) {
return coordinatorExecutor.scheduleAtFixedRate(() -> {
try {
command.run();
} catch (Throwable t) {
handleUncaughtExceptionFromAsyncCall(t);
}
}, initDelay, period, unit);
} | 3.26 |
flink_SourceCoordinatorContext_m3_rdh | /**
* Unregister a source reader.
*
* @param subtaskId
* the subtask id of the source reader.
* @param attemptNumber
* the attempt number of the source reader.
*/
void m3(int subtaskId, int attemptNumber) {
final Map<Integer, ReaderInfo> attemptReaders = registeredReaders.get(subtaskId);
if
(attemptReaders != null) {
attemptReaders.remove(attemptNumber);
if (attemptReaders.isEmpty()) {
registeredReaders.remove(subtaskId);
}
}
} | 3.26 |
flink_SourceCoordinatorContext_checkSubtaskIndex_rdh | // ---------------- private helper methods -----------------
private void checkSubtaskIndex(int subtaskIndex)
{
if ((subtaskIndex < 0) || (subtaskIndex >= getCoordinatorContext().currentParallelism())) {throw new IllegalArgumentException(String.format("Subtask index %d is out of bounds [0, %s)", subtaskIndex, getCoordinatorContext().currentParallelism()));
}
} | 3.26 |
flink_SourceCoordinatorContext_submitTask_rdh | // ---------------- Executor methods to avoid use coordinatorExecutor directly -----------------
Future<?> submitTask(Runnable task) {
return coordinatorExecutor.submit(task);
} | 3.26 |
flink_SourceCoordinatorContext_onCheckpoint_rdh | /**
* Behavior of SourceCoordinatorContext on checkpoint.
*
* @param checkpointId
* The id of the ongoing checkpoint.
*/
void onCheckpoint(long checkpointId) throws Exception {
assignmentTracker.onCheckpoint(checkpointId);
} | 3.26 |
flink_SourceCoordinatorContext_attemptReady_rdh | // --------- Package private additional methods for the SourceCoordinator ------------
void attemptReady(OperatorCoordinator.SubtaskGateway gateway) {
checkState(coordinatorThreadFactory.isCurrentThreadCoordinatorThread());
subtaskGateways.registerSubtaskGateway(gateway);
} | 3.26 |
flink_SourceCoordinatorContext_callInCoordinatorThread_rdh | /**
* A helper method that delegates the callable to the coordinator thread if the current thread
* is not the coordinator thread, otherwise call the callable right away.
*
* @param callable
* the callable to delegate.
*/
private <V> V callInCoordinatorThread(Callable<V> callable, String errorMessage) {
// Ensure the split assignment is done by the coordinator executor.
if (!coordinatorThreadFactory.isCurrentThreadCoordinatorThread()) {
try {
final
Callable<V> guardedCallable = () -> {
try { return callable.call();
} catch (Throwable t) {
LOG.error("Uncaught Exception in Source Coordinator Executor", t);
ExceptionUtils.rethrowException(t);return null;
}
};
return coordinatorExecutor.submit(guardedCallable).get();
} catch (InterruptedException | ExecutionException e) {
throw new FlinkRuntimeException(errorMessage, e);
}
}
try {
return callable.call();
} catch (Throwable
t) {
LOG.error("Uncaught Exception in Source Coordinator Executor", t);
throw new FlinkRuntimeException(errorMessage, t);
}
} | 3.26 |
flink_SourceCoordinatorContext_failJob_rdh | /**
* Fail the job with the given cause.
*
* @param cause
* the cause of the job failure.
*/
void failJob(Throwable cause) {
operatorCoordinatorContext.failJob(cause);
} | 3.26 |
flink_SourceCoordinatorContext_registerSourceReader_rdh | /**
* Register a source reader.
*
* @param subtaskId
* the subtask id of the source reader.
* @param attemptNumber
* the attempt number of the source reader.
* @param location
* the location of the source reader.
*/
void registerSourceReader(int subtaskId,
int attemptNumber, String location) {
final Map<Integer, ReaderInfo> attemptReaders = registeredReaders.computeIfAbsent(subtaskId, k -> new ConcurrentHashMap<>());
checkState(!attemptReaders.containsKey(attemptNumber),
"ReaderInfo of subtask %s (#%s) already exists.", subtaskId, attemptNumber);
attemptReaders.put(attemptNumber, new ReaderInfo(subtaskId, location));
sendCachedSplitsToNewlyRegisteredReader(subtaskId, attemptNumber);
} | 3.26 |
flink_SourceCoordinatorContext_runInCoordinatorThread_rdh | /**
* {@inheritDoc } If the runnable throws an Exception, the corresponding job is failed.
*/
@Override
public void runInCoordinatorThread(Runnable runnable) {
// when using a ScheduledThreadPool, uncaught exception handler catches only
// exceptions thrown by the threadPool, so manually call it when the exception is
// thrown by the runnable
coordinatorExecutor.execute(new ThrowableCatchingRunnable(throwable -> coordinatorThreadFactory.uncaughtException(Thread.currentThread(), throwable), runnable));
} | 3.26 |
flink_SourceCoordinatorContext_onCheckpointComplete_rdh | /**
* Invoked when a successful checkpoint has been taken.
*
* @param checkpointId
* the id of the successful checkpoint.
*/
void onCheckpointComplete(long checkpointId) {
assignmentTracker.onCheckpointComplete(checkpointId);
} | 3.26 |
flink_SegmentPartitionFile_getTieredStoragePath_rdh | // ------------------------------------------------------------------------
// File-related utilities
// ------------------------------------------------------------------------
public static String getTieredStoragePath(String basePath) {
return String.format("%s/%s", basePath, TIERED_STORAGE_DIR);
} | 3.26 |
flink_MapView_contains_rdh | /**
* Checks if the map view contains a value for a given key.
*
* @param key
* The key to check.
* @return True if there exists a value for the given key, false otherwise.
* @throws Exception
* Thrown if the system cannot access the map.
*/public
boolean contains(K key) throws Exception {
return map.containsKey(key);
} | 3.26 |
flink_MapView_isEmpty_rdh | /**
* Returns true if the map view contains no key-value mappings, otherwise false.
*
* @return True if the map view contains no key-value mappings, otherwise false.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public boolean isEmpty() throws Exception {
return map.isEmpty();
} | 3.26 |
flink_MapView_entries_rdh | /**
* Returns all entries of the map view.
*
* @return An iterable of all the key-value pairs in the map view.
* @throws Exception
* Thrown if the system cannot access the map.
*/
public Iterable<Map.Entry<K, V>> entries() throws
Exception {
return map.entrySet();
} | 3.26 |
flink_MapView_newMapViewDataType_rdh | // --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Utility method for creating a {@link DataType} of {@link MapView} explicitly.
*/
public static DataType newMapViewDataType(DataType keyDataType, DataType valueDataType) {
return DataTypes.STRUCTURED(MapView.class, DataTypes.FIELD("map", DataTypes.MAP(keyDataType, valueDataType).bridgedTo(Map.class)));
} | 3.26 |
flink_MapView_put_rdh | /**
* Inserts a value for the given key into the map view. If the map view already contains a value
* for the key, the existing value is overwritten.
*
* @param key
* The key for which the value is inserted.
* @param value
* The value that is inserted for the key.
* @throws Exception
* Thrown if the system cannot put data.
*/
public void put(K key, V value) throws Exception {
map.put(key, value);} | 3.26 |
flink_MapView_remove_rdh | /**
* Deletes the value for the given key.
*
* @param key
* The key for which the value is deleted.
* @throws Exception
* Thrown if the system cannot access the map.
*/
public void remove(K key) throws Exception {
map.remove(key);
} | 3.26 |
flink_MapView_putAll_rdh | /**
* Inserts all mappings from the specified map to this map view.
*
* @param map
* The map whose entries are inserted into this map view.
* @throws Exception
* Thrown if the system cannot access the map.
*/
public void putAll(Map<K, V> map) throws Exception {
this.map.putAll(map);
} | 3.26 |
flink_MapView_values_rdh | /**
* Returns all the values in the map view.
*
* @return An iterable of all the values in the map.
* @throws Exception
* Thrown if the system cannot access the map.
*/
public Iterable<V> values() throws Exception {
return map.values();
} | 3.26 |
flink_MapView_iterator_rdh | /**
* Returns an iterator over all entries of the map view.
*
* @return An iterator over all the mappings in the map.
* @throws Exception
* Thrown if the system cannot access the map.
*/
public Iterator<Map.Entry<K, V>> iterator() throws Exception {
return map.entrySet().iterator(); } | 3.26 |
flink_MapView_setMap_rdh | /**
* Replaces the entire view's content with the content of the given {@link Map}.
*/
public void setMap(Map<K, V> map) {
this.map = map;
} | 3.26 |
flink_MapView_clear_rdh | /**
* Removes all entries of this map.
*/
@Override
public void clear() {
map.clear();
} | 3.26 |
flink_MapView_keys_rdh | /**
* Returns all the keys in the map view.
*
* @return An iterable of all the keys in the map.
* @throws Exception
* Thrown if the system cannot access the map.
*/
public Iterable<K> keys() throws Exception {
return
map.keySet();
} | 3.26 |
flink_MapView_get_rdh | /**
* Return the value for the specified key or {@code null} if the key is not in the map view.
*
* @param key
* The look up key.
* @return The value for the specified key.
* @throws Exception
* Thrown if the system cannot get data.
*/
public V get(K key)
throws Exception {
return map.get(key);
} | 3.26 |
flink_JoinedRowData_replace_rdh | /**
* Replaces the {@link RowData} backing this {@link JoinedRowData}.
*
* <p>This method replaces the backing rows in place and does not return a new object. This is
* done for performance reasons.
*/
public JoinedRowData replace(RowData row1,
RowData row2) {
this.row1 = row1;
this.row2 = row2;
return this;
} | 3.26 |
flink_MutableHashTable_ensureNumBuffersReturned_rdh | /**
* This method makes sure that at least a certain number of memory segments is in the list of
* free segments. Free memory can be in the list of free segments, or in the return-queue where
* segments used to write behind are put. The number of segments that are in that return-queue,
* but are actually reclaimable is tracked. This method makes sure at least a certain number of
* buffers is reclaimed.
*
* @param minRequiredAvailable
* The minimum number of buffers that needs to be reclaimed.
*/
final void ensureNumBuffersReturned(final int minRequiredAvailable) {
if (minRequiredAvailable > (this.availableMemory.size() + this.writeBehindBuffersAvailable)) {
throw new IllegalArgumentException("More buffers requested available than totally available.");
}
try {
while (this.availableMemory.size() < minRequiredAvailable) {
this.availableMemory.add(this.writeBehindBuffers.take());
this.writeBehindBuffersAvailable--;
}
} catch (InterruptedException iex) {
throw new RuntimeException("Hash Join was interrupted.");
}
} | 3.26 |
flink_MutableHashTable_buildBloomFilterForBucket_rdh | /**
* Set all the bucket memory except bucket header as the bit set of bloom filter, and use hash
* code of build records to build bloom filter.
*/
final void buildBloomFilterForBucket(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
final int count = bucket.getShort(bucketInSegmentPos + HEADER_COUNT_OFFSET);
if (count <= 0) {
return;
}
int[] hashCodes = new int[count];
// As the hashcode and bloom filter occupy same bytes, so we read all hashcode out at first
// and then write back to bloom filter.
for (int i = 0; i < count; i++) {
hashCodes[i] = bucket.getInt((bucketInSegmentPos + BUCKET_HEADER_LENGTH) + (i *
HASH_CODE_LEN));
}
this.bloomFilter.setBitsLocation(bucket,
bucketInSegmentPos + BUCKET_HEADER_LENGTH);
for (int hashCode : hashCodes) {
this.bloomFilter.addHash(hashCode);
}
buildBloomFilterForExtraOverflowSegments(bucketInSegmentPos, bucket, p);
} | 3.26 |
flink_MutableHashTable_getPartitioningFanOutNoEstimates_rdh | /**
* Gets the number of partitions to be used for an initial hash-table, when no estimates are
* available.
*
* <p>The current logic makes sure that there are always between 10 and 127 partitions, and
* close to 0.1 of the number of buffers.
*
* @param numBuffers
* The number of buffers available.
* @return The number of partitions to use.
*/
public static int getPartitioningFanOutNoEstimates(int numBuffers) {
return Math.max(10, Math.min(numBuffers / 10, MAX_NUM_PARTITIONS));} | 3.26 |
flink_MutableHashTable_releaseTable_rdh | /**
* Releases the table (the array of buckets) and returns the occupied memory segments to the
* list of free segments.
*/
protected void releaseTable() {
// set the counters back
this.numBuckets = 0;
if (this.buckets != null) {
for (MemorySegment bucket : this.buckets) {
this.availableMemory.add(bucket);
}
this.buckets = null;
}
} | 3.26 |
flink_MutableHashTable_hash_rdh | /**
* The level parameter is needed so that we can have different hash functions when we
* recursively apply the partitioning, so that the working set eventually fits into memory.
*/
public static int hash(int code, int level) {
final int rotation = level * 11;
code = Integer.rotateLeft(code, rotation);
return MathUtils.jenkinsHash(code);
} | 3.26 |
flink_MutableHashTable_getNumWriteBehindBuffers_rdh | // --------------------------------------------------------------------------------------------
// Utility Computational Functions
// --------------------------------------------------------------------------------------------
/**
* Determines the number of buffers to be used for asynchronous write behind. It is currently
* computed as the logarithm of the number of buffers to the base 4, rounded up, minus 2. The
* upper limit for the number of write behind buffers is however set to six.
*
* @param numBuffers
* The number of available buffers.
* @return The number
*/
public static int getNumWriteBehindBuffers(int numBuffers) {
int numIOBufs = ((int) ((Math.log(numBuffers) / Math.log(4)) - 1.5));
return numIOBufs > 6 ? 6 : numIOBufs;
} | 3.26 |
flink_MutableHashTable_getNewInMemoryPartition_rdh | // --------------------------------------------------------------------------------------------
// Setup and Tear Down of Structures
// --------------------------------------------------------------------------------------------
/**
* Returns a new inMemoryPartition object. This is required as a plug for
* ReOpenableMutableHashTable.
*/
protected HashPartition<BT, PT> getNewInMemoryPartition(int number, int recursionLevel) {
return new HashPartition<BT, PT>(this.buildSideSerializer,
this.probeSideSerializer, number, recursionLevel, this.availableMemory.remove(this.availableMemory.size()
- 1), this, this.segmentSize);
} | 3.26 |
flink_MutableHashTable_moveToNextBucket_rdh | /**
* Move to next bucket, return true while move to a on heap bucket, return false while move
* to a spilled bucket or there is no more bucket.
*/
private boolean moveToNextBucket() {
scanCount++;
if (scanCount > (f3 - 1)) {
return false;
}
// move to next bucket, update all the current bucket status with new bucket
// information.
final int bucketArrayPos = scanCount >> this.bucketsPerSegmentBits;
final int currentBucketInSegmentOffset = (scanCount & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
MemorySegment currentBucket = this.buckets[bucketArrayPos];
final int partitionNumber = currentBucket.get(currentBucketInSegmentOffset + HEADER_PARTITION_OFFSET);
final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
if (p.isInMemory()) {
setBucket(currentBucket, p.overflowSegments, p, currentBucketInSegmentOffset);
return true;
} else {
return false;}
} | 3.26 |
flink_MutableHashTable_m0_rdh | /**
* Gets the next buffer to be used with the hash-table, either for an in-memory partition, or
* for the table buckets. This method returns <tt>null</tt>, if no more buffer is available.
* Spilling a partition may free new buffers then.
*
* @return The next buffer to be used by the hash-table, or null, if no buffer remains.
*/
final MemorySegment
m0() {
// check if the list directly offers memory
int s = this.availableMemory.size();
if (s > 0) {
return this.availableMemory.remove(s - 1);
}
// check if there are write behind buffers that actually are to be used for the hash table
if (this.writeBehindBuffersAvailable > 0) {
// grab at least one, no matter what
MemorySegment toReturn;
try {
toReturn = this.writeBehindBuffers.take();
} catch (InterruptedException iex) {
throw new RuntimeException("Hybrid Hash Join was interrupted while taking a buffer.");
}
this.writeBehindBuffersAvailable--;
// grab as many more buffers as are available directly
MemorySegment currBuff;
while ((this.writeBehindBuffersAvailable > 0) && ((currBuff = this.writeBehindBuffers.poll()) != null)) {
this.availableMemory.add(currBuff);this.writeBehindBuffersAvailable--;
}
return toReturn;
} else {
// no memory available
return null;
}
} | 3.26 |
flink_MutableHashTable_spillPartition_rdh | // --------------------------------------------------------------------------------------------
// Memory Handling
// --------------------------------------------------------------------------------------------
/**
* Selects a partition and spills it. The number of the spilled partition is returned.
*
* @return The number of the spilled partition.
*/
protected int spillPartition() throws IOException {
// find the largest partition
ArrayList<HashPartition<BT, PT>> partitions = this.partitionsBeingBuilt;int largestNumBlocks = 0;
int largestPartNum = -1;
for (int i = 0; i < partitions.size(); i++) {
HashPartition<BT, PT> p = partitions.get(i);
if (p.isInMemory() && (p.getNumOccupiedMemorySegments() > largestNumBlocks)) {largestNumBlocks = p.getNumOccupiedMemorySegments();
largestPartNum = i;
}
}
final HashPartition<BT, PT> p =
partitions.get(largestPartNum);
if (useBloomFilters) {
buildBloomFilterForBucketsInPartition(largestPartNum, p);
}
// spill the partition
int numBuffersFreed = p.spillPartition(this.availableMemory, this.ioManager, this.currentEnumerator.next(), this.writeBehindBuffers);
this.writeBehindBuffersAvailable += numBuffersFreed;
// grab as many buffers as are available directly
MemorySegment currBuff;
while ((this.writeBehindBuffersAvailable
> 0) && ((currBuff = this.writeBehindBuffers.poll()) != null)) {
this.availableMemory.add(currBuff);
this.writeBehindBuffersAvailable--;
}
return largestPartNum;
} | 3.26 |
flink_MutableHashTable_open_rdh | /**
* Opens the hash join. This method reads the build-side input and constructs the initial hash
* table, gradually spilling partitions that do not fit into memory.
*
* @param buildSide
* Build side input.
* @param probeSide
* Probe side input.
* @param buildOuterJoin
* Whether outer join on build side.
* @throws IOException
* Thrown, if an I/O problem occurs while spilling a partition.
*/
public void open(final MutableObjectIterator<BT> buildSide, final MutableObjectIterator<PT> probeSide, boolean buildOuterJoin) throws IOException {
this.buildSideOuterJoin = buildOuterJoin;
// sanity checks
if (!this.closed.compareAndSet(true, false)) {
throw new IllegalStateException("Hash Join cannot be opened, because it is currently not closed.");
}
// grab the write behind buffers first
for (int i = this.numWriteBehindBuffers; i > 0; --i) {
this.writeBehindBuffers.add(this.availableMemory.remove(this.availableMemory.size() - 1));
}
// open builds the initial table by consuming the build-side input
this.currentRecursionDepth = 0;
buildInitialTable(buildSide);
// the first prober is the probe-side input
this.probeIterator = new
ProbeIterator<PT>(probeSide, this.probeSideSerializer.createInstance());
// the bucket iterator can remain constant over the time
this.bucketIterator = new HashBucketIterator<BT, PT>(this.buildSideSerializer, this.recordComparator, f1, buildOuterJoin);
} | 3.26 |
flink_MutableHashTable_close_rdh | /**
* Closes the hash table. This effectively releases all internal structures and closes all open
* files and removes them. The call to this method is valid both as a cleanup after the complete
* inputs were properly processed, and as an cancellation call, which cleans up all resources
* that are currently held by the hash join.
*/
public void close() {// make sure that we close only once
if (!this.closed.compareAndSet(false, true)) {
return;
}
// clear the iterators, so the next call to next() will notice
this.bucketIterator = null;
this.probeIterator = null;
// release the table structure
releaseTable();
// clear the memory in the partitions
clearPartitions();
// clear the current probe side channel, if there is one
if (this.currentSpilledProbeSide != null) {
try {
this.currentSpilledProbeSide.closeAndDelete();
} catch (Throwable t) {
LOG.warn("Could not close and delete the temp file for the current spilled partition probe side.", t);
}
}
// clear the partitions that are still to be done (that have files on disk)
for (int i = 0; i < this.partitionsPending.size(); i++) {
final HashPartition<BT, PT> p = this.partitionsPending.get(i);
p.clearAllMemory(this.availableMemory);
}
// return the write-behind buffers
for (int i = 0; i < (this.numWriteBehindBuffers + this.writeBehindBuffersAvailable); i++) {
try {
this.availableMemory.add(this.writeBehindBuffers.take());
} catch (InterruptedException iex) {throw new RuntimeException("Hashtable closing was interrupted");
}
}
this.writeBehindBuffersAvailable = 0;
} | 3.26 |
flink_MutableHashTable_assignPartition_rdh | /**
* Assigns a partition to a bucket.
*
* @param bucket
* The bucket to get the partition for.
* @param numPartitions
* The number of partitions.
* @return The partition for the bucket.
*/
public static byte assignPartition(int bucket, byte numPartitions) {
return ((byte) (bucket % numPartitions));
} | 3.26 |
flink_MutableHashTable_setBucket_rdh | // update current bucket status.
private void setBucket(MemorySegment bucket, MemorySegment[] overflowSegments, HashPartition<BT, PT> partition, int bucketInSegmentOffset) {
this.bucketSegment = bucket;
this.overflowSegments = overflowSegments;
this.partition = partition;
this.bucketInSegmentOffset = bucketInSegmentOffset;
this.countInSegment = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
this.numInSegment = 0;
// reset probedSet with probedFlags offset in this bucket.
this.probedSet.setMemorySegment(bucketSegment, this.bucketInSegmentOffset + HEADER_PROBED_FLAGS_OFFSET);
} | 3.26 |
flink_MutableHashTable_buildInitialTable_rdh | // ------------------------------------------------------------------------
// Hash Table Building
// ------------------------------------------------------------------------
/**
* Creates the initial hash table. This method sets up partitions, hash index, and inserts the
* data from the given iterator.
*
* @param input
* The iterator with the build side data.
* @throws IOException
* Thrown, if an element could not be fetched and deserialized from the
* iterator, or if serialization fails.
*/
protected void buildInitialTable(final MutableObjectIterator<BT> input) throws IOException {
// create the partitions
final int partitionFanOut = getPartitioningFanOutNoEstimates(this.availableMemory.size());
if (partitionFanOut > MAX_NUM_PARTITIONS) {
throw new RuntimeException("Hash join partitions estimate exeeds maximum number of partitions.");
}
createPartitions(partitionFanOut, 0);
// set up the table structure. the write behind buffers are taken away, as are one buffer
// per partition
final int numBuckets = getInitialTableSize(this.availableMemory.size(), this.segmentSize, partitionFanOut, this.avgRecordLen);
initTable(numBuckets, ((byte) (partitionFanOut)));
final TypeComparator<BT> buildTypeComparator = this.buildSideComparator;
BT record = this.buildSideSerializer.createInstance();
// go over the complete input and insert every element into the hash table
while
(this.running && ((record = input.next(record)) != null)) {
final int hashCode =
hash(buildTypeComparator.hash(record), 0);
insertIntoTable(record, hashCode);
}
if (!this.running) {
return;
}
// finalize the partitions
for
(int i = 0; i < this.partitionsBeingBuilt.size(); i++) {
HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i);
p.finalizeBuildPhase(this.ioManager, this.currentEnumerator, this.writeBehindBuffers);
}
} | 3.26 |
flink_MutableHashTable_clearPartitions_rdh | /**
* This method clears all partitions currently residing (partially) in memory. It releases all
* memory and deletes all spilled partitions.
*
* <p>This method is intended for a hard cleanup in the case that the join is aborted.
*/
protected void clearPartitions() {
for (int v104 = this.partitionsBeingBuilt.size() - 1; v104 >= 0; --v104) {
final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(v104);
try {
p.clearAllMemory(this.availableMemory);
} catch (Exception e) {
LOG.error("Error during partition cleanup.", e);
}
}
this.partitionsBeingBuilt.clear();
} | 3.26 |
flink_MutableHashTable_nextSegment_rdh | /**
* This is the method called by the partitions to request memory to serialize records. It
* automatically spills partitions, if memory runs out.
*
* @return The next available memory segment.
*/
@Override
public MemorySegment nextSegment() {
final MemorySegment seg = m0();
if (seg != null) {
return seg;
} else {
try {
spillPartition();} catch (IOException ioex) {
throw new RuntimeException("Error spilling Hash Join Partition" + (ioex.getMessage() == null ? "." : ": " + ioex.getMessage()), ioex);
}
MemorySegment fromSpill = m0();
if (fromSpill == null) {
throw new RuntimeException("BUG in Hybrid Hash Join: Spilling did not free a buffer.");
} else {
return fromSpill;
}
}
} | 3.26 |
flink_BaseVectorizedColumnReader_nextInt_rdh | /**
* Reading zero always.
*/protected static final class NullIntIterator implements IntIterator {
@Override
public int nextInt() {
return 0;
} | 3.26 |
flink_FreeingBufferRecycler_recycle_rdh | /**
* Frees the given memory segment.
*
* @param memorySegment
* The memory segment to be recycled.
*/
@Override
public void recycle(MemorySegment memorySegment) {
memorySegment.free();
} | 3.26 |
flink_FlinkSemiAntiJoinFilterTransposeRule_onMatch_rdh | // implement RelOptRule
public void onMatch(RelOptRuleCall call) {
LogicalJoin join = call.rel(0);
LogicalFilter filter = call.rel(1);
RelNode newJoin = LogicalJoin.create(filter.getInput(), join.getRight(), join.getHints(), join.getCondition(), join.getVariablesSet(), join.getJoinType());final RelFactories.FilterFactory factory = RelFactories.DEFAULT_FILTER_FACTORY;
RelNode newFilter = factory.createFilter(newJoin, filter.getCondition(), ImmutableSet.of());
call.transformTo(newFilter);
} | 3.26 |
flink_FlinkSemiAntiJoinFilterTransposeRule_matches_rdh | // ~ Methods ----------------------------------------------------------------
@Override
public boolean matches(RelOptRuleCall call) {
LogicalJoin join = call.rel(0);
return (join.getJoinType() == JoinRelType.SEMI) || (join.getJoinType() == JoinRelType.ANTI);
} | 3.26 |
flink_ExternalResourceUtils_getExternalResourceAmountMap_rdh | /**
* Get the map of resource name and amount of all of enabled external resources.
*/
@VisibleForTesting
static Map<String, Long> getExternalResourceAmountMap(Configuration config) {
final Set<String> resourceSet = getExternalResourceSet(config);
if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Map<String, Long> externalResourceAmountMap = new HashMap<>();
for (String resourceName : resourceSet) {
final ConfigOption<Long> amountOption = key(ExternalResourceOptions.getAmountConfigOptionForResource(resourceName)).longType().noDefaultValue();
final Optional<Long> amountOpt = config.getOptional(amountOption);
if (!amountOpt.isPresent())
{
LOG.warn("The amount of the {} should be configured. Will ignore that resource.", resourceName);
} else if (amountOpt.get() <= 0) {
LOG.warn("The amount of the {} should be positive while finding {}. Will ignore that resource.", amountOpt.get(), resourceName);
} else {
externalResourceAmountMap.put(resourceName, amountOpt.get());
}
}
return externalResourceAmountMap;
} | 3.26 |
flink_ExternalResourceUtils_createStaticExternalResourceInfoProvider_rdh | /**
* Instantiate {@link StaticExternalResourceInfoProvider} for all of enabled external resources.
*/
@VisibleForTesting
static ExternalResourceInfoProvider createStaticExternalResourceInfoProvider(Map<String, Long> externalResourceAmountMap, Map<String, ExternalResourceDriver> externalResourceDrivers) {
final Map<String, Set<? extends ExternalResourceInfo>> v21 = new HashMap<>();
for (Map.Entry<String, ExternalResourceDriver> v22 : externalResourceDrivers.entrySet()) {final String resourceName = v22.getKey(); final ExternalResourceDriver
externalResourceDriver = v22.getValue();
if (externalResourceAmountMap.containsKey(resourceName)) {
try {
final Set<? extends ExternalResourceInfo> externalResourceInfos;
externalResourceInfos = externalResourceDriver.retrieveResourceInfo(externalResourceAmountMap.get(resourceName));v21.put(resourceName, externalResourceInfos);
} catch (Exception e) {
LOG.warn("Failed to retrieve information of external resource {}.", resourceName, e);
}
} else {
LOG.warn("Could not found legal amount configuration for {}.", resourceName);
}
} return new StaticExternalResourceInfoProvider(v21);
} | 3.26 |
flink_ExternalResourceUtils_getExternalResourceSet_rdh | /**
* Get the enabled external resource list from configuration.
*/
private static Set<String> getExternalResourceSet(Configuration config) {
if (config.getValue(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST).equals(ExternalResourceOptions.NONE)) {
return Collections.emptySet();
}
return new HashSet<>(config.get(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST));
} | 3.26 |
flink_ExternalResourceUtils_externalResourceDriversFromConfig_rdh | /**
* Instantiate the {@link ExternalResourceDriver ExternalResourceDrivers} for all of enabled
* external resources. {@link ExternalResourceDriver ExternalResourceDrivers} are mapped to its
* resource name.
*/
@VisibleForTesting
static Map<String, ExternalResourceDriver> externalResourceDriversFromConfig(Configuration config, PluginManager
pluginManager) {
final Set<String> resourceSet = getExternalResourceSet(config);if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Iterator<ExternalResourceDriverFactory> factoryIterator = pluginManager.load(ExternalResourceDriverFactory.class);
final Map<String, ExternalResourceDriverFactory> externalResourceFactories = new HashMap<>();
factoryIterator.forEachRemaining(externalResourceDriverFactory -> externalResourceFactories.put(externalResourceDriverFactory.getClass().getName(), externalResourceDriverFactory));
final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>();for (String resourceName : resourceSet) {
final ConfigOption<String> driverClassOption = key(ExternalResourceOptions.getExternalResourceDriverFactoryConfigOptionForResource(resourceName)).stringType().noDefaultValue();
final String driverFactoryClassName = config.getString(driverClassOption);
if (StringUtils.isNullOrWhitespaceOnly(driverFactoryClassName)) {
LOG.warn("Could not find driver class name for {}. Please make sure {} is configured.", resourceName, driverClassOption.key());
continue;
}
ExternalResourceDriverFactory externalResourceDriverFactory = externalResourceFactories.get(driverFactoryClassName);
if (externalResourceDriverFactory != null) {
DelegatingConfiguration delegatingConfiguration = new DelegatingConfiguration(config, ExternalResourceOptions.getExternalResourceParamConfigPrefixForResource(resourceName));
try {
externalResourceDrivers.put(resourceName, externalResourceDriverFactory.createExternalResourceDriver(delegatingConfiguration));
LOG.info("Add external resources driver for {}.", resourceName);
} catch (Exception e) {
LOG.warn("Could not instantiate driver with factory {} for {}. {}", driverFactoryClassName, resourceName, e);
}
} else {
LOG.warn("Could not find factory class {} for {}.", driverFactoryClassName, resourceName);
}
}
return externalResourceDrivers;
} | 3.26 |
flink_ExternalResourceUtils_createStaticExternalResourceInfoProviderFromConfig_rdh | /**
* Instantiate {@link StaticExternalResourceInfoProvider} for all of enabled external resources.
*/public static ExternalResourceInfoProvider createStaticExternalResourceInfoProviderFromConfig(Configuration configuration, PluginManager pluginManager) {
final Map<String, Long> externalResourceAmountMap = getExternalResourceAmountMap(configuration);
LOG.info("Enabled external resources: {}", externalResourceAmountMap.keySet());
return createStaticExternalResourceInfoProvider(externalResourceAmountMap, externalResourceDriversFromConfig(configuration, pluginManager));
} | 3.26 |
flink_ExternalResourceUtils_m0_rdh | /**
* Get the collection of all enabled external resources.
*/
public static Collection<ExternalResource> m0(Configuration config) {
return getExternalResourceAmountMap(config).entrySet().stream().map(entry -> new ExternalResource(entry.getKey(), entry.getValue())).collect(Collectors.toList());
} | 3.26 |
flink_ExternalResourceUtils_generateExternalResourcesString_rdh | /**
* Generate the string expression of the given external resources.
*/
public static String generateExternalResourcesString(Collection<ExternalResource> extendedResources) {
return extendedResources.stream().map(resource -> (resource.getName() + "=") + resource.getValue()).collect(Collectors.joining(", "));
} | 3.26 |
flink_ExternalSorterBuilder_build_rdh | /**
* Creates a push-based {@link PushSorter}. The {@link PushSorter#getIterator()} will return
* when the {@link PushSorter#finishReading()} is called. Will spawn two threads: sort, spill.
*/
public PushSorter<T> build() throws MemoryAllocationException {
PushFactory<T> pushFactory = new PushFactory<>();
ExternalSorter<T> tExternalSorter = doBuild(pushFactory);
return new PushSorter<T>() {
private final SorterInputGateway<T> recordProducer = pushFactory.sorterInputGateway;
@Override
public void writeRecord(T record) throws IOException, InterruptedException {
recordProducer.writeRecord(record);
}
@Override
public void finishReading() {
recordProducer.finishReading();
}
@Override
public MutableObjectIterator<T> getIterator() throws InterruptedException {
return tExternalSorter.getIterator();
}
@Override
public void close() {
tExternalSorter.close();
}
};
} | 3.26 |
flink_FileSystemBlobStore_put_rdh | // - Put ------------------------------------------------------------------
@Override
public boolean put(File localFile, JobID jobId, BlobKey blobKey) throws IOException {
createBasePathIfNeeded();
String toBlobPath = BlobUtils.getStorageLocationPath(basePath, jobId, blobKey);
try (FSDataOutputStream os = fileSystem.create(new Path(toBlobPath), WriteMode.OVERWRITE)) {
LOG.debug("Copying from {} to {}.", localFile, toBlobPath);
Files.copy(localFile, os);
os.sync();
}
return true;
} | 3.26 |
flink_FileSystemBlobStore_delete_rdh | // - Delete ---------------------------------------------------------------
@Override
public boolean delete(JobID jobId, BlobKey blobKey) {
return delete(BlobUtils.getStorageLocationPath(basePath, jobId, blobKey));
} | 3.26 |
flink_FileSystemBlobStore_get_rdh | // - Get ------------------------------------------------------------------
@Override
public boolean get(JobID jobId, BlobKey blobKey, File localFile) throws IOException {return get(BlobUtils.getStorageLocationPath(basePath, jobId, blobKey), localFile, blobKey);} | 3.26 |
flink_KryoSerializer_getKryoInstance_rdh | // --------------------------------------------------------------------------------------------
/**
* Returns the Chill Kryo Serializer which is implicitly added to the classpath via
* flink-runtime. Falls back to the default Kryo serializer if it can't be found.
*
* @return The Kryo serializer instance.
*/
private Kryo getKryoInstance() {
try {
// check if ScalaKryoInstantiator is in class path (coming from Twitter's Chill
// library).
// This will be true if Flink's Scala API is used.
Class<?> chillInstantiatorClazz = Class.forName("org.apache.flink.runtime.types.FlinkScalaKryoInstantiator");
Object chillInstantiator = chillInstantiatorClazz.newInstance();
// obtain a Kryo instance through Twitter Chill
Method m = chillInstantiatorClazz.getMethod("newKryo");
return ((Kryo) (m.invoke(chillInstantiator)));
} catch (ClassNotFoundException | InstantiationException | NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
if (LOG.isDebugEnabled()) {
LOG.info("Kryo serializer scala extensions are not available.", e);
} else {
LOG.info("Kryo serializer scala extensions are not available.");
}
Kryo.DefaultInstantiatorStrategy initStrategy = new Kryo.DefaultInstantiatorStrategy();
initStrategy.setFallbackInstantiatorStrategy(new StdInstantiatorStrategy());
Kryo kryo = new Kryo();
kryo.setInstantiatorStrategy(initStrategy);
if (flinkChillPackageRegistrar != null) {
flinkChillPackageRegistrar.registerSerializers(kryo);
}
return kryo;
}} | 3.26 |
flink_KryoSerializer_readObject_rdh | // --------------------------------------------------------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
// kryoRegistrations may be null if this Kryo serializer is deserialized from an old version
if (kryoRegistrations == null) {
this.kryoRegistrations = buildKryoRegistrations(type, registeredTypes, registeredTypesWithSerializerClasses, registeredTypesWithSerializers);
}
} | 3.26 |
flink_KryoSerializer_isImmutableType_rdh | // ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
} | 3.26 |
flink_KryoSerializer_buildKryoRegistrations_rdh | // --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Utility method that takes lists of registered types and their serializers, and resolve them
* into a single list such that the result will resemble the final registration result in Kryo.
*/
private static LinkedHashMap<String, KryoRegistration> buildKryoRegistrations(Class<?> serializedType, LinkedHashSet<Class<?>> registeredTypes, LinkedHashMap<Class<?>, Class<? extends Serializer<?>>> registeredTypesWithSerializerClasses, LinkedHashMap<Class<?>, ExecutionConfig.SerializableSerializer<?>> registeredTypesWithSerializers) {final
LinkedHashMap<String, KryoRegistration> kryoRegistrations = new LinkedHashMap<>();
kryoRegistrations.put(serializedType.getName(), new KryoRegistration(serializedType));
for (Class<?> registeredType : checkNotNull(registeredTypes)) {
kryoRegistrations.put(registeredType.getName(), new KryoRegistration(registeredType));
}
for (Map.Entry<Class<?>, Class<? extends Serializer<?>>> registeredTypeWithSerializerClassEntry : checkNotNull(registeredTypesWithSerializerClasses).entrySet())
{
kryoRegistrations.put(registeredTypeWithSerializerClassEntry.getKey().getName(), new KryoRegistration(registeredTypeWithSerializerClassEntry.getKey(), registeredTypeWithSerializerClassEntry.getValue()));
}
for (Map.Entry<Class<?>,
ExecutionConfig.SerializableSerializer<?>> registeredTypeWithSerializerEntry : checkNotNull(registeredTypesWithSerializers).entrySet()) {kryoRegistrations.put(registeredTypeWithSerializerEntry.getKey().getName(), new KryoRegistration(registeredTypeWithSerializerEntry.getKey(), registeredTypeWithSerializerEntry.getValue()));
}
// add Avro support if flink-avro is available; a dummy otherwise
AvroUtils.getAvroUtils().addAvroGenericDataArrayRegistration(kryoRegistrations);
return kryoRegistrations;
} | 3.26 |
flink_KryoSerializer_m1_rdh | // --------------------------------------------------------------------------------------------
@Override
public int m1() {
int result = type.hashCode();
result = (31 * result) + kryoRegistrations.hashCode();
result = (31 * result) + defaultSerializers.hashCode();
result = (31 * result) + defaultSerializerClasses.hashCode();
return result;
} | 3.26 |
flink_KryoSerializer_enterExclusiveThread_rdh | // --------------------------------------------------------------------------------------------
// For testing
// --------------------------------------------------------------------------------------------
private void enterExclusiveThread() {
// we use simple get, check, set here, rather than CAS
// we don't need lock-style correctness, this is only a sanity-check and we thus
// favor speed at the cost of some false negatives in this check
Thread previous = currentThread;
Thread thisThread = Thread.currentThread();
if (previous == null) {
currentThread
= thisThread;
} else if (previous != thisThread) {
throw new IllegalStateException((("Concurrent access to KryoSerializer. Thread 1: " + thisThread.getName()) + " , Thread 2: ") + previous.getName());
}
} | 3.26 |
flink_KryoSerializer_snapshotConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<T> snapshotConfiguration() {
return new KryoSerializerSnapshot<>(type, defaultSerializers, defaultSerializerClasses, kryoRegistrations);
} | 3.26 |
flink_TtlUtils_expired_rdh | /**
* Common functions related to State TTL.
*/ public class TtlUtils {
static <V> boolean expired(@Nullable
TtlValue<V> ttlValue, long ttl, TtlTimeProvider timeProvider) {
return expired(ttlValue, ttl, timeProvider.currentTimestamp());
} | 3.26 |
flink_ProjectOperator_projectTuple14_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> ProjectOperator<T, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> projectTuple14() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> tType = new TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(fTypes);
return new ProjectOperator<T, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple6_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5> ProjectOperator<T, Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType = new
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes);
return new ProjectOperator<T, Tuple6<T0, T1, T2, T3, T4, T5>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple25_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> ProjectOperator<T, Tuple25<T0, T1, T2, T3,
T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> projectTuple25() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> tType = new TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(fTypes);
return new ProjectOperator<T, Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17, T18, T19, T20, T21, T22, T23, T24>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple21_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17, T18, T19, T20> ProjectOperator<T, Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> projectTuple21() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> tType = new TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(fTypes);
return new ProjectOperator<T,
Tuple21<T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple2_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1> ProjectOperator<T, Tuple2<T0, T1>> projectTuple2() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple2<T0, T1>> tType
= new TupleTypeInfo<Tuple2<T0, T1>>(fTypes);
return new ProjectOperator<T,
Tuple2<T0, T1>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple18_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> ProjectOperator<T, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> projectTuple18() {
TypeInformation<?>[] v42 = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16, T17>> tType = new TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(v42);
return new ProjectOperator<T, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple19_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> ProjectOperator<T, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>> projectTuple19() {TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
T10, T11, T12, T13, T14, T15, T16, T17, T18>> tType = new TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>>(fTypes);
return new ProjectOperator<T, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>>(this.ds, this.fieldIndexes, tType);} | 3.26 |
flink_ProjectOperator_projectTuple15_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> ProjectOperator<T, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> projectTuple15() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> tType = new TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(fTypes);
return new ProjectOperator<T,
Tuple15<T0, T1, T2, T3, T4,
T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple24_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> ProjectOperator<T, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>> projectTuple24() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple24<T0,
T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>> tType = new TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(fTypes);
return new ProjectOperator<T, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple7_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6> ProjectOperator<T, Tuple7<T0, T1, T2, T3, T4, T5, T6>> projectTuple7() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes,
ds.getType());
TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>> tType = new
TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fTypes);
return new
ProjectOperator<T, Tuple7<T0, T1, T2, T3, T4, T5, T6>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple3_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2> ProjectOperator<T, Tuple3<T0, T1, T2>> projectTuple3() {
TypeInformation<?>[] fTypes
= extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes);
return new ProjectOperator<T, Tuple3<T0, T1, T2>>(this.ds,
this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple20_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4,
T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> ProjectOperator<T, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> projectTuple20() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16, T17, T18, T19>> tType = new TupleTypeInfo<Tuple20<T0,
T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(fTypes);
return new ProjectOperator<T, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16,
T17, T18, T19>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple11_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ProjectOperator<T, Tuple11<T0, T1, T2,
T3, T4, T5, T6, T7, T8, T9, T10>> projectTuple11() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> tType = new TupleTypeInfo<Tuple11<T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10>>(fTypes);
return new ProjectOperator<T, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(this.ds,
this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTupleX_rdh | // --------------------------------------------------------------------------------------------
// The following lines are generated.
// --------------------------------------------------------------------------------------------
// BEGIN_OF_TUPLE_DEPENDENT_CODE
// GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
/**
* Chooses a projectTupleX according to the length of {@link org.apache.flink.api.java.operators.ProjectOperator.Projection#fieldIndexes}.
*
* @return The projected DataSet.
* @see org.apache.flink.api.java.operators.ProjectOperator.Projection
*/
@SuppressWarnings("unchecked")
public <OUT extends Tuple> ProjectOperator<T, OUT> projectTupleX() {
ProjectOperator<T, OUT> projOperator;
switch
(fieldIndexes.length) {
case 1
:
projOperator =
((ProjectOperator<T, OUT>) (projectTuple1()));
break;
case 2 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple2()));
break;
case 3 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple3()));
break;
case 4 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple4()));
break;
case 5 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple5()));
break;
case 6 :
projOperator =
((ProjectOperator<T, OUT>) (projectTuple6()));
break;
case 7 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple7()));
break;
case 8 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple8()));
break;
case 9 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple9()));
break;
case 10 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple10()));
break;
case 11 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple11()));
break;
case 12 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple12()));
break;
case 13
:
projOperator = ((ProjectOperator<T, OUT>) (projectTuple13()));
break;
case 14 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple14()));
break;
case 15 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple15()));
break;
case 16 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple16()));
break;
case 17 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple17()));
break;
case 18 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple18()));
break;
case 19 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple19()));
break;
case 20 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple20()));
break;
case 21 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple21()));
break;
case 22 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple22()));
break;
case 23 :projOperator = ((ProjectOperator<T,
OUT>) (projectTuple23()));
break;
case 24 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple24()));
break;
case 25 :
projOperator = ((ProjectOperator<T, OUT>) (projectTuple25()));
break;
default :
throw new IllegalStateException("Excessive arity in tuple.");
}
return projOperator;
}
/**
* Projects a {@link Tuple} {@link DataSet} | 3.26 |
flink_ProjectOperator_projectTuple9_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8> ProjectOperator<T, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> projectTuple9() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> tType = new TupleTypeInfo<Tuple9<T0, T1,
T2, T3, T4, T5, T6, T7, T8>>(fTypes);
return new ProjectOperator<T, Tuple9<T0, T1, T2,
T3, T4, T5, T6, T7, T8>>(this.ds, this.fieldIndexes, tType);
}
/**
* Projects a {@link Tuple} {@link DataSet} | 3.26 |
flink_ProjectOperator_types_rdh | /**
*
* @deprecated Deprecated method only kept for compatibility.
*/
@SuppressWarnings("unchecked")
@Deprecated
@PublicEvolving
public <R extends Tuple> ProjectOperator<IN, R> types(Class<?>... types) {
TupleTypeInfo<R> typeInfo = ((TupleTypeInfo<R>) (this.getResultType()));
if (types.length != typeInfo.getArity()) {throw new InvalidProgramException("Provided types do not match projection.");
}
for (int i = 0; i < types.length; i++) {
Class<?> typeClass = types[i];
if (!typeClass.equals(typeInfo.getTypeAt(i).getTypeClass())) {
throw new
InvalidProgramException(((("Provided type " + typeClass.getSimpleName()) + " at position ") + i) + " does not match projection");
}
}return ((ProjectOperator<IN, R>) (this));
}
/**
* A projection of {@link DataSet} | 3.26 |
flink_ProjectOperator_projectTuple17_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> ProjectOperator<T, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> projectTuple17() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple17<T0, T1, T2,
T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> tType = new TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16>>(fTypes);
return new ProjectOperator<T, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple5_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4> ProjectOperator<T, Tuple5<T0, T1,
T2, T3, T4>> projectTuple5() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType = new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes);
return new ProjectOperator<T, Tuple5<T0, T1, T2, T3, T4>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple22_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> ProjectOperator<T, Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>> projectTuple22() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>> tType = new TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(fTypes);
return new ProjectOperator<T, Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple13_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2,
T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> ProjectOperator<T, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> projectTuple13() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType = new TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fTypes);
return
new ProjectOperator<T, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(this.ds, this.fieldIndexes, tType); } | 3.26 |
flink_ProjectOperator_projectTuple16_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> ProjectOperator<T, Tuple16<T0, T1, T2,
T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> projectTuple16() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>
tType = new TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(fTypes);
return new ProjectOperator<T, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple4_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3> ProjectOperator<T, Tuple4<T0, T1, T2, T3>> projectTuple4() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes);
return new ProjectOperator<T, Tuple4<T0, T1, T2, T3>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_projectTuple23_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> ProjectOperator<T, Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> projectTuple23() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> tType = new TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(fTypes);
return new ProjectOperator<T, Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.