name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ResultPartitionFactory_createBufferPoolFactory_rdh | /**
* The minimum pool size should be <code>numberOfSubpartitions + 1</code> for two
* considerations:
*
* <p>1. StreamTask can only process input if there is at-least one available buffer on output
* side, so it might cause stuck problem if the minimum pool size is exactly equal to the number
* of subpartitions, because every subpartition might maintain a partial unfilled buffer.
*
* <p>2. Increases one more buffer for every output LocalBufferPool to avoid performance
* regression if processing input is based on at-least one buffer available on output side.
*/
@VisibleForTesting
SupplierWithException<BufferPool, IOException> createBufferPoolFactory(int numberOfSubpartitions, ResultPartitionType type) {
return () -> {
Pair<Integer, Integer> pair = NettyShuffleUtils.getMinMaxNetworkBuffersPerResultPartition(configuredNetworkBuffersPerChannel, floatingNetworkBuffersPerGate, sortShuffleMinParallelism, sortShuffleMinBuffers, numberOfSubpartitions, tieredStorage.isPresent(), tieredStorage.map(storage -> storage.getTieredStorageConfiguration().getTotalExclusiveBufferNum()).orElse(0), type);
return bufferPoolFactory.createBufferPool(pair.getLeft(), pair.getRight(), numberOfSubpartitions, maxBuffersPerChannel, isOverdraftBufferNeeded(type) ? maxOverdraftBuffersPerGate : 0);
};
} | 3.26 |
flink_SchedulerBase_startScheduling_rdh | // ------------------------------------------------------------------------
// SchedulerNG
// ------------------------------------------------------------------------
@Override
public final void startScheduling() {
mainThreadExecutor.assertRunningInMainThread();
registerJobMetrics(jobManagerJobMetricGroup, executionGraph, this::getNumberOfRestarts, deploymentStateTimeMetrics, executionGraph::registerJobStatusListener, executionGraph.getStatusTimestamp(JobStatus.INITIALIZING), jobStatusMetricsSettings);
operatorCoordinatorHandler.startAllOperatorCoordinators();
startSchedulingInternal();
} | 3.26 |
flink_SchedulerBase_getExecutionGraph_rdh | /**
* ExecutionGraph is exposed to make it easier to rework tests to be based on the new scheduler.
* ExecutionGraph is expected to be used only for state check. Yet at the moment, before all the
* actions are factored out from ExecutionGraph and its sub-components, some actions may still
* be performed directly on it.
*/
@VisibleForTesting
public ExecutionGraph getExecutionGraph() {
return executionGraph;
} | 3.26 |
flink_SchedulerBase_computeVertexParallelismStore_rdh | /**
* Compute the {@link VertexParallelismStore} for all vertices of a given job graph, which will
* set defaults and ensure that the returned store contains valid parallelisms.
*
* @param jobGraph
* the job graph to retrieve vertices from
* @return the computed parallelism store
*/
public static VertexParallelismStore computeVertexParallelismStore(JobGraph jobGraph) {
return computeVertexParallelismStore(jobGraph.getVertices());} | 3.26 |
flink_SchedulerBase_getJobId_rdh | // ------------------------------------------------------------------------
// access utils for testing
// ------------------------------------------------------------------------
@VisibleForTesting
JobID getJobId() {
return f0.getJobID();
} | 3.26 |
flink_SchedulerBase_getDefaultMaxParallelism_rdh | /**
* Get a default value to use for a given vertex's max parallelism if none was specified.
*
* @param vertex
* the vertex to compute a default max parallelism for
* @return the computed max parallelism
*/
public static int getDefaultMaxParallelism(JobVertex vertex)
{
return KeyGroupRangeAssignment.computeDefaultMaxParallelism(normalizeParallelism(vertex.getParallelism()));
} | 3.26 |
flink_SchedulerBase_deliverOperatorEventToCoordinator_rdh | // Note: It may be worthwhile to move the OperatorCoordinators out
// of the scheduler (have them owned by the JobMaster directly).
// Then we could avoid routing these events through the scheduler and
// doing this lazy initialization dance. However, this would require
// that the Scheduler does not eagerly construct the CheckpointCoordinator
// in the ExecutionGraph and does not eagerly restore the savepoint while
// doing that. Because during savepoint restore, the OperatorCoordinators
// (or at least their holders) already need to exist, to accept the restored
// state. But some components they depend on (Scheduler and MainThreadExecutor)
// are not fully usable and accessible at that point.
// ------------------------------------------------------------------------
@Override
public void deliverOperatorEventToCoordinator(final ExecutionAttemptID taskExecutionId, final OperatorID operatorId, final OperatorEvent evt) throws FlinkException {
operatorCoordinatorHandler.deliverOperatorEventToCoordinator(taskExecutionId, operatorId, evt);
} | 3.26 |
flink_StreamElement_asWatermarkStatus_rdh | /**
* Casts this element into a WatermarkStatus.
*
* @return This element as a WatermarkStatus.
* @throws java.lang.ClassCastException
* Thrown, if this element is actually not a Watermark
* Status.
*/
public final WatermarkStatus asWatermarkStatus() {return ((WatermarkStatus) (this));
} | 3.26 |
flink_StreamElement_asWatermark_rdh | /**
* Casts this element into a Watermark.
*
* @return This element as a Watermark.
* @throws java.lang.ClassCastException
* Thrown, if this element is actually not a Watermark.
*/public final Watermark asWatermark() {
return ((Watermark) (this));
} | 3.26 |
flink_StreamElement_isLatencyMarker_rdh | /**
* Checks whether this element is a latency marker.
*
* @return True, if this element is a latency marker, false otherwise.
*/
public final boolean isLatencyMarker() {
return getClass() == LatencyMarker.class;
} | 3.26 |
flink_StreamElement_asRecord_rdh | /**
* Casts this element into a StreamRecord.
*
* @return This element as a stream record.
* @throws java.lang.ClassCastException
* Thrown, if this element is actually not a stream record.
*/
@SuppressWarnings("unchecked")
public final <E> StreamRecord<E> asRecord() {
return ((StreamRecord<E>) (this));
} | 3.26 |
flink_StreamElement_isWatermarkStatus_rdh | /**
* Checks whether this element is a watermark status.
*
* @return True, if this element is a watermark status, false otherwise.
*/
public final boolean
isWatermarkStatus() {
return getClass() == WatermarkStatus.class;
} | 3.26 |
flink_StreamElement_asLatencyMarker_rdh | /**
* Casts this element into a LatencyMarker.
*
* @return This element as a LatencyMarker.
* @throws java.lang.ClassCastException
* Thrown, if this element is actually not a LatencyMarker.
*/
public final LatencyMarker asLatencyMarker() {
return
((LatencyMarker) (this));
} | 3.26 |
flink_FileChannelManagerImpl_close_rdh | /**
* Remove all the temp directories.
*/
@Override
public void close() throws Exception {
// Marks shutdown and exits if it has already shutdown.
if (!isShutdown.compareAndSet(false, true)) {
return;
}
IOUtils.closeAll(Arrays.stream(paths).filter(File::exists).map(FileChannelManagerImpl::getFileCloser).collect(Collectors.toList()));ShutdownHookUtil.removeShutdownHook(shutdownHook, String.format("%s-%s", getClass().getSimpleName(), f0), LOG);
} | 3.26 |
flink_ClusterClient_reportHeartbeat_rdh | /**
* The client reports the heartbeat to the dispatcher for aliveness.
*
* @param jobId
* The jobId for the client and the job.
* @return */
default CompletableFuture<Void> reportHeartbeat(JobID jobId, long expiredTimestamp) {
return FutureUtils.completedVoidFuture();
} | 3.26 |
flink_ClusterClient_getAccumulators_rdh | /**
* Requests and returns the accumulators for the given job identifier. Accumulators can be
* requested while a is running or after it has finished. The default class loader is used to
* deserialize the incoming accumulator results.
*
* @param jobID
* The job identifier of a job.
* @return A Map containing the accumulator's name and its value.
*/
default CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID)
{
return getAccumulators(jobID, ClassLoader.getSystemClassLoader());
} | 3.26 |
flink_ClusterClient_listCompletedClusterDatasetIds_rdh | /**
* Return a set of ids of the completed cluster datasets.
*
* @return A set of ids of the completely cached intermediate dataset.
*/default CompletableFuture<Set<AbstractID>> listCompletedClusterDatasetIds() {
return CompletableFuture.completedFuture(Collections.emptySet());
} | 3.26 |
flink_NFA_extractCurrentMatches_rdh | /**
* Extracts all the sequences of events from the start to the given computation state. An event
* sequence is returned as a map which contains the events and the names of the states to which
* the events were mapped.
*
* @param sharedBufferAccessor
* The accessor to {@link SharedBuffer} from which to extract the
* matches
* @param computationState
* The end computation state of the extracted event sequences
* @return Collection of event sequences which end in the given computation state
* @throws Exception
* Thrown if the system cannot access the state.
*/
private Map<String, List<EventId>> extractCurrentMatches(final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState) throws Exception {
if (computationState.getPreviousBufferEntry() == null) {
return new HashMap<>();
}
List<Map<String, List<EventId>>> paths = sharedBufferAccessor.extractPatterns(computationState.getPreviousBufferEntry(), computationState.getVersion());
if (paths.isEmpty()) {
return new HashMap<>();
}
// for a given computation state, we cannot have more than one matching patterns.
Preconditions.checkState(paths.size() ==
1);
return paths.get(0);
} | 3.26 |
flink_NFA_open_rdh | /**
* Initialization method for the NFA. It is called before any element is passed and thus
* suitable for one time setup work.
*
* @param cepRuntimeContext
* runtime context of the enclosing operator
* @param conf
* The configuration containing the parameters attached to the contract.
*/
public void open(RuntimeContext cepRuntimeContext, Configuration conf) throws Exception {
for (State<T> state : getStates()) {
for (StateTransition<T> transition : state.getStateTransitions()) {
IterativeCondition condition = transition.getCondition();
FunctionUtils.setFunctionRuntimeContext(condition, cepRuntimeContext);
FunctionUtils.openFunction(condition, DefaultOpenContext.INSTANCE);
}
}
} | 3.26 |
flink_NFA_computeNextStates_rdh | /**
* Computes the next computation states based on the given computation state, the current event,
* its timestamp and the internal state machine. The algorithm is:
*
* <ol>
* <li>Decide on valid transitions and number of branching paths. See {@link OutgoingEdges}
* <li>Perform transitions:
* <ol>
* <li>IGNORE (links in {@link SharedBuffer} will still point to the previous event)
* <ul>
* <li>do not perform for Start State - special case
* <li>if stays in the same state increase the current stage for future use with
* number of outgoing edges
* <li>if after PROCEED increase current stage and add new stage (as we change the
* state)
* <li>lock the entry in {@link SharedBuffer} as it is needed in the created
* branch
* </ul>
* <li>TAKE (links in {@link SharedBuffer} will point to the current event)
* <ul>
* <li>add entry to the shared buffer with version of the current computation
* state
* <li>add stage and then increase with number of takes for the future computation
* states
* <li>peek to the next state if it has PROCEED path to a Final State, if true
* create Final ComputationState to emit results
* </ul>
* </ol>
* <li>Handle the Start State, as it always have to remain
* <li>Release the corresponding entries in {@link SharedBuffer}.
* </ol>
*
* @param sharedBufferAccessor
* The accessor to shared buffer that we need to change
* @param computationState
* Current computation state
* @param event
* Current event which is processed
* @param timerService
* timer service which provides access to time related features
* @return Collection of computation states which result from the current one
* @throws Exception
* Thrown if the system cannot access the state.
*/
private Collection<ComputationState> computeNextStates(final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState, final EventWrapper event, final TimerService timerService) throws Exception {
final ConditionContext context = new ConditionContext(sharedBufferAccessor, computationState, timerService, event.getTimestamp());
final OutgoingEdges<T> outgoingEdges = createDecisionGraph(context, computationState, event.getEvent());
// Create the computing version based on the previously computed edges
// We need to defer the creation of computation states until we know how many edges start
// at this computation state so that we can assign proper version
final List<StateTransition<T>> v37 = outgoingEdges.getEdges();
int takeBranchesToVisit = Math.max(0, outgoingEdges.getTotalTakeBranches() - 1);
int ignoreBranchesToVisit = outgoingEdges.getTotalIgnoreBranches();
int totalTakeToSkip = Math.max(0, outgoingEdges.getTotalTakeBranches() - 1);
final List<ComputationState> resultingComputationStates = new ArrayList<>();
for (StateTransition<T> edge : v37) {
switch (edge.getAction()) {
case IGNORE :
{
if (!isStartState(computationState)) {
final
DeweyNumber version;
if (isEquivalentState(edge.getTargetState(), getState(computationState))) {
// Stay in the same state (it can be either looping one or
// singleton)
final int toIncrease = calculateIncreasingSelfState(outgoingEdges.getTotalIgnoreBranches(), outgoingEdges.getTotalTakeBranches());
version = computationState.getVersion().increase(toIncrease);
} else {
// IGNORE after PROCEED
version = computationState.getVersion().increase(totalTakeToSkip + ignoreBranchesToVisit).addStage();
ignoreBranchesToVisit--;
}
addComputationState(sharedBufferAccessor, resultingComputationStates, edge.getTargetState(), computationState.getPreviousBufferEntry(), version, computationState.getStartTimestamp(), computationState.getPreviousTimestamp(), computationState.getStartEventID());
}
}
break;
case TAKE :
final State<T> nextState = edge.getTargetState();
final State<T> v46 = edge.getSourceState();
final NodeId previousEntry = computationState.getPreviousBufferEntry();
final DeweyNumber
currentVersion = computationState.getVersion().increase(takeBranchesToVisit);
final DeweyNumber nextVersion = new DeweyNumber(currentVersion).addStage();
takeBranchesToVisit--;
final NodeId newEntry = sharedBufferAccessor.put(v46.getName(), event.getEventId(), previousEntry, currentVersion);
final long startTimestamp;
final EventId startEventId;
if
(isStartState(computationState)) {startTimestamp = event.getTimestamp();
startEventId = event.getEventId();
} else {
startTimestamp = computationState.getStartTimestamp();
startEventId = computationState.getStartEventID();
}
final long previousTimestamp = event.getTimestamp();
addComputationState(sharedBufferAccessor, resultingComputationStates, nextState, newEntry, nextVersion, startTimestamp, previousTimestamp, startEventId);
// check if newly created state is optional (have a PROCEED path to Final state)
final State<T> finalState = findFinalStateAfterProceed(context, nextState, event.getEvent());
if (finalState !=
null) {
addComputationState(sharedBufferAccessor, resultingComputationStates, finalState, newEntry, nextVersion, startTimestamp, previousTimestamp, startEventId);
}
break;
}
}if (isStartState(computationState)) {
int v55 = calculateIncreasingSelfState(outgoingEdges.getTotalIgnoreBranches(), outgoingEdges.getTotalTakeBranches());
DeweyNumber startVersion = computationState.getVersion().increase(v55);
ComputationState startState = ComputationState.createStartState(computationState.getCurrentStateName(), startVersion);resultingComputationStates.add(startState);
}
if (computationState.getPreviousBufferEntry() != null) {
// release the shared entry referenced by the current computation state.
sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry(), computationState.getVersion());
}
return resultingComputationStates;
} | 3.26 |
flink_NFA_close_rdh | /**
* Tear-down method for the NFA.
*/
public void close() throws Exception {
for (State<T> v10 : getStates()) {
for (StateTransition<T> transition : v10.getStateTransitions()) {
IterativeCondition condition = transition.getCondition();
FunctionUtils.closeFunction(condition);
}
}
} | 3.26 |
flink_NFA_advanceTime_rdh | /**
* Prunes states assuming there will be no events with timestamp <b>lower</b> than the given
* one. It clears the sharedBuffer and also emits all timed out partial matches.
*
* @param sharedBufferAccessor
* the accessor to SharedBuffer object that we need to work upon
* while processing
* @param nfaState
* The NFAState object that we need to affect while processing
* @param timestamp
* timestamp that indicates that there will be no more events with lower
* timestamp
* @return all pending matches and timed outed partial matches
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Tuple2<Collection<Map<String, List<T>>>, Collection<Tuple2<Map<String, List<T>>, Long>>> advanceTime(final SharedBufferAccessor<T> sharedBufferAccessor, final NFAState nfaState, final long
timestamp, final AfterMatchSkipStrategy afterMatchSkipStrategy) throws Exception {
final List<Map<String, List<T>>> result = new ArrayList<>();
final Collection<Tuple2<Map<String, List<T>>, Long>> timeoutResult = new ArrayList<>();
final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR);
final PriorityQueue<ComputationState> potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR);
for (ComputationState computationState :
nfaState.getPartialMatches()) {
String
currentStateName = computationState.getCurrentStateName();
boolean isTimeoutForPreviousEvent = windowTimes.containsKey(currentStateName) && isStateTimedOut(computationState, timestamp, computationState.getPreviousTimestamp(), windowTimes.get(currentStateName));
boolean isTimeoutForFirstEvent = isStateTimedOut(computationState, timestamp, computationState.getStartTimestamp(), windowTime);
if (isTimeoutForPreviousEvent || isTimeoutForFirstEvent) {
nfaState.setStateChanged();
if (getState(computationState).isPending()) {
// save pending states for after-match pruning, where those states will be
// released
potentialMatches.add(computationState);
continue;
}
if (handleTimeout) {
// extract the timed out event pattern
Map<String, List<T>> timedOutPattern = sharedBufferAccessor.materializeMatch(extractCurrentMatches(sharedBufferAccessor, computationState));timeoutResult.add(Tuple2.of(timedOutPattern, isTimeoutForPreviousEvent ? computationState.getPreviousTimestamp() + windowTimes.get(computationState.getCurrentStateName()) : computationState.getStartTimestamp() + windowTime));
}
// release timeout states
sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry(), computationState.getVersion());
} else {
newPartialMatches.add(computationState);
}
}
// If a timeout partial match "frees" some completed matches
// Or if completed not-followed-by matches need pruning
processMatchesAccordingToSkipStrategy(sharedBufferAccessor, nfaState, afterMatchSkipStrategy, potentialMatches, newPartialMatches, result);
nfaState.setNewPartialMatches(newPartialMatches);
sharedBufferAccessor.advanceTime(timestamp);
return Tuple2.of(result, timeoutResult);
} | 3.26 |
flink_JoinOperationFactory_create_rdh | /**
* Creates a valid {@link JoinQueryOperation} operation.
*
* <p>It performs validations such as:
*
* <ul>
* <li>condition returns boolean
* <li>the condition is either always true or contains equi join
* <li>left and right side of the join do not contain ambiguous column names
* <li>that correlated join is an INNER join
* </ul>
*
* @param left
* left side of the relational operation
* @param right
* right side of the relational operation
* @param joinType
* what sort of join to create
* @param condition
* join condition to apply
* @param correlated
* if the join should be a correlated join
* @return valid join operation
*/
QueryOperation
create(QueryOperation left, QueryOperation right, JoinType joinType, ResolvedExpression condition, boolean correlated) {
verifyConditionType(condition);
validateNamesAmbiguity(left, right);
validateCondition(right, joinType, condition, correlated);
return new JoinQueryOperation(left, right, joinType, condition, correlated);
} | 3.26 |
flink_AnotherDummyFSFileSystem_getUri_rdh | // ------------------------------------------------------------------------
@Override
public URI getUri() {
return FS_URI;
} | 3.26 |
flink_RocksDBHandle_restoreInstanceDirectoryFromPath_rdh | /**
* This recreates the new working directory of the recovered RocksDB instance and links/copies
* the contents from a local state.
*/
private void restoreInstanceDirectoryFromPath(Path source) throws IOException {
final Path instanceRocksDBDirectory = Paths.get(f0);
final Path[] files = FileUtils.listDirectory(source);
if (!new File(f0).mkdirs()) {
String errMsg = "Could not create RocksDB data directory: " + f0;
logger.error(errMsg);
throw new IOException(errMsg);
}
for (Path file : files) {
final String v7 =
file.getFileName().toString();
final Path targetFile = instanceRocksDBDirectory.resolve(v7);
if (v7.endsWith(SST_FILE_SUFFIX)) {
try {
// hardlink'ing the immutable sst-files.
Files.createLink(targetFile, file);
continue;
} catch (IOException ioe) {final String logMessage = String.format(("Could not hard link sst file %s. Trying to copy it over. This might " + "increase the recovery time. In order to avoid this, configure ") + "RocksDB's working directory and the local state directory to be on the same volume.", v7);
if (logger.isDebugEnabled()) {
logger.debug(logMessage, ioe);
} else {
logger.info(logMessage);
}
}
}
// true copy for all other files and files that could not be hard linked.
Files.copy(file, targetFile, StandardCopyOption.REPLACE_EXISTING);
}
} | 3.26 |
flink_BridgingSqlFunction_of_rdh | /**
* Creates an instance of a scalar or table built-in function during translation.
*/
public static BridgingSqlFunction of(RelOptCluster cluster, BuiltInFunctionDefinition functionDefinition) {
return m0(cluster, ContextResolvedFunction.permanent(FunctionIdentifier.of(functionDefinition.getName()), functionDefinition));
} | 3.26 |
flink_BridgingSqlFunction_getRowTypeInference_rdh | /**
* The conversion to a row type is handled on the caller side. This allows us to perform it
* SQL/Table API-specific. This is in particular important to set the aliases of fields
* correctly (see {@link FlinkRelBuilder#pushFunctionScan(RelBuilder, SqlOperator, int,
* Iterable, List)}).
*/@Override
public SqlReturnTypeInference getRowTypeInference() {
return getReturnTypeInference();
} | 3.26 |
flink_FileStateHandle_discardState_rdh | /**
* Discard the state by deleting the file that stores the state. If the parent directory of the
* state is empty after deleting the state file, it is also deleted.
*
* @throws Exception
* Thrown, if the file deletion (not the directory deletion) fails.
*/
@Override
public void discardState() throws Exception {
final FileSystem fs = getFileSystem();
IOException actualException = null;
boolean success = true;
try {
success = fs.delete(filePath, false);
} catch (IOException e) {
actualException = e;}
if ((!success) || (actualException != null)) {
if (fs.exists(filePath)) {
throw Optional.ofNullable(actualException).orElse(new IOException(("Unknown error caused the file '" + filePath) + "' to not be deleted."));
}
}
} | 3.26 |
flink_FileStateHandle_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof FileStateHandle)) {
return false;
}FileStateHandle that = ((FileStateHandle) (o));
return filePath.equals(that.filePath);
} | 3.26 |
flink_FileStateHandle_getFilePath_rdh | /**
* Gets the path where this handle's state is stored.
*
* @return The path where this handle's state is stored.
*/
public Path getFilePath() {return filePath;
} | 3.26 |
flink_FileStateHandle_getStateSize_rdh | /**
* Returns the file size in bytes.
*
* @return The file size in bytes.
*/
@Override
public long getStateSize() {
return stateSize;
} | 3.26 |
flink_FileStateHandle_getFileSystem_rdh | /**
* Gets the file system that stores the file state.
*
* @return The file system that stores the file state.
* @throws IOException
* Thrown if the file system cannot be accessed.
*/
private FileSystem getFileSystem() throws IOException {
return FileSystem.get(filePath.toUri());
} | 3.26 |
flink_ThreadInfoRequestCoordinator_triggerThreadInfoRequest_rdh | /**
* Triggers collection of thread info stats of a job vertex by combining thread info responses
* from given subtasks. A thread info response of a subtask in turn consists of {@code numSamples}, collected with {@code delayBetweenSamples} milliseconds delay between them.
*
* @param executionsWithGateways
* Execution attempts together with TaskExecutors running them.
* @param numSamples
* Number of thread info samples to collect from each subtask.
* @param delayBetweenSamples
* Delay between consecutive samples (ms).
* @param maxStackTraceDepth
* Maximum depth of the stack traces collected within thread info
* samples.
* @return A future of the completed thread info stats.
*/
public CompletableFuture<VertexThreadInfoStats> triggerThreadInfoRequest(Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionsWithGateways, int numSamples, Duration delayBetweenSamples, int maxStackTraceDepth) {
checkNotNull(executionsWithGateways, "Tasks to sample");
checkArgument(executionsWithGateways.size() > 0, "No tasks to sample");
checkArgument(numSamples >= 1, "No number of samples");
checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth");
// Execution IDs of running tasks grouped by the task manager
Collection<ImmutableSet<ExecutionAttemptID>> runningSubtasksIds = executionsWithGateways.keySet();
synchronized(lock) {
if (isShutDown) {
return FutureUtils.completedExceptionally(new IllegalStateException("Shut down"));
}
final int
requestId = requestIdCounter++;log.debug("Triggering thread info request {}", requestId);
final PendingThreadInfoRequest pending
= new PendingThreadInfoRequest(requestId,
runningSubtasksIds);
// requestTimeout is treated as the time on top of the expected sampling duration.
// Discard the request if it takes too long. We don't send cancel
// messages to the task managers, but only wait for the responses
// and then ignore them.
long expectedDuration = numSamples * delayBetweenSamples.toMillis();
Time timeout = Time.milliseconds(expectedDuration + requestTimeout.toMillis());
// Add the pending request before scheduling the discard task to
// prevent races with removing it again.
pendingRequests.put(requestId, pending);
ThreadInfoSamplesRequest requestParams = new ThreadInfoSamplesRequest(requestId, numSamples, delayBetweenSamples, maxStackTraceDepth);
requestThreadInfo(executionsWithGateways, requestParams, timeout);
return pending.getStatsFuture();
}
} | 3.26 |
flink_ThreadInfoRequestCoordinator_requestThreadInfo_rdh | /**
* Requests thread infos from given subtasks. The response would be ignored if it does not
* return within timeout.
*/
private void requestThreadInfo(Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>>
executionWithGateways, ThreadInfoSamplesRequest requestParams, Time timeout) {
// Trigger samples collection from all subtasks
for (Map.Entry<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionWithGateway : executionWithGateways.entrySet()) {
CompletableFuture<TaskExecutorThreadInfoGateway> executorGatewayFuture = executionWithGateway.getValue();
CompletableFuture<TaskThreadInfoResponse> threadInfo = executorGatewayFuture.thenCompose(executorGateway -> executorGateway.requestThreadInfoSamples(executionWithGateway.getKey(), requestParams, timeout));
threadInfo.whenCompleteAsync((TaskThreadInfoResponse threadInfoSamplesResponse,Throwable throwable) -> {
if
(threadInfoSamplesResponse != null) {
handleSuccessfulResponse(requestParams.getRequestId(), executionWithGateway.getKey(), threadInfoSamplesResponse.getSamples());
}
else {
handleFailedResponse(requestParams.getRequestId(), throwable);
}
}, executor);
}
} | 3.26 |
flink_SortPartitionOperatorBase_executeOnCollections_rdh | // --------------------------------------------------------------------------------------------
@Override
protected List<IN> executeOnCollections(List<IN> inputData, RuntimeContext runtimeContext, ExecutionConfig executionConfig) {
TypeInformation<IN> inputType
= getInput().getOperatorInfo().getOutputType();
int[] sortColumns = this.partitionOrdering.getFieldPositions();
boolean[] sortOrderings = this.partitionOrdering.getFieldSortDirections();
final TypeComparator<IN> sortComparator;
if (inputType instanceof CompositeType) {
sortComparator = ((CompositeType<IN>) (inputType)).createComparator(sortColumns, sortOrderings, 0, executionConfig);
} else if (inputType instanceof AtomicType) {sortComparator = ((AtomicType) (inputType)).createComparator(sortOrderings[0], executionConfig);
} else {
throw new UnsupportedOperationException(("Partition sorting does not support type " + inputType) + " yet.");
}
Collections.sort(inputData, new Comparator<IN>() {@Override
public int compare(IN o1, IN o2) {
return sortComparator.compare(o1, o2);
}
});
return inputData;
} | 3.26 |
flink_BroadcastPartitioner_selectChannel_rdh | /**
* Note: Broadcast mode could be handled directly for all the output channels in record writer,
* so it is no need to select channels via this method.
*/
@Override
public int selectChannel(SerializationDelegate<StreamRecord<T>> record) {
throw new UnsupportedOperationException("Broadcast partitioner does not support select channels.");
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getTaskHeap_rdh | /**
* Returns the configured heap size used by the tasks.
*/
public Long getTaskHeap() {
return taskHeap;
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getFrameworkHeap_rdh | /**
* Returns the configured heap size used by the framework.
*/
public Long getFrameworkHeap() {
return frameworkHeap;
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getFrameworkOffHeap_rdh | /**
* Returns the configured off-heap size used by the framework.
*/
public Long getFrameworkOffHeap() {
return frameworkOffHeap;
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getNetworkMemory_rdh | /**
* Returns the configured maximum network memory.
*/
public Long getNetworkMemory() {
return networkMemory;
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_m0_rdh | /**
* Factory method for initializing a TaskExecutorMemoryConfiguration based on the passed
* Configuration.
*
* @param config
* The Configuration used for initializing the TaskExecutorMemoryConfiguration.
* @return The newly instantiated TaskExecutorMemoryConfiguration.
*/
public static TaskExecutorMemoryConfiguration m0(Configuration
config) {
return new TaskExecutorMemoryConfiguration(getConfigurationValue(config, FRAMEWORK_HEAP_MEMORY), getConfigurationValue(config, TASK_HEAP_MEMORY), getConfigurationValue(config, FRAMEWORK_OFF_HEAP_MEMORY), getConfigurationValue(config, TASK_OFF_HEAP_MEMORY), getConfigurationValue(config, NETWORK_MEMORY_MAX), getConfigurationValue(config, MANAGED_MEMORY_SIZE), getConfigurationValue(config, JVM_METASPACE), getConfigurationValue(config, JVM_OVERHEAD_MAX), calculateTotalFlinkMemoryFromComponents(config), calculateTotalProcessMemoryFromComponents(config));
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getJvmMetaspace_rdh | /**
* Returns the maximum Metaspace size allowed for the task manager.
*/
public Long getJvmMetaspace() {
return jvmMetaspace;
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getTaskOffHeap_rdh | /**
* Returns the configured off-heap size used by the tasks.
*/
public Long getTaskOffHeap() {
return taskOffHeap;
} | 3.26 |
flink_TaskExecutorMemoryConfiguration_getManagedMemoryTotal_rdh | /**
* Returns the total amount of memory reserved for by the MemoryManager.
*/
public Long getManagedMemoryTotal() {
return managedMemoryTotal;
} | 3.26 |
flink_SubQueryInputTypeStrategy_getArgumentCount_rdh | /**
* {@link InputTypeStrategy} for {@link BuiltInFunctionDefinitions#IN}.
*/
@Internalpublic class SubQueryInputTypeStrategy implements InputTypeStrategy {
@Override
public ArgumentCount getArgumentCount() {return ConstantArgumentCount.from(2);
} | 3.26 |
flink_SourceCoordinatorSerdeUtils_readAndVerifyCoordinatorSerdeVersion_rdh | /**
* Read and verify the serde version.
*/
static int readAndVerifyCoordinatorSerdeVersion(DataInputStream in) throws IOException {
int version = in.readInt();
if (version > CURRENT_VERSION) {
throw new IOException("Unsupported source coordinator serde version " + version);
}
return version;
} | 3.26 |
flink_SourceCoordinatorSerdeUtils_writeCoordinatorSerdeVersion_rdh | /**
* Write the current serde version.
*/
static void writeCoordinatorSerdeVersion(DataOutputStream out) throws IOException {out.writeInt(CURRENT_VERSION);
} | 3.26 |
flink_DataSinkTask_getLogString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Utility function that composes a string for logging purposes. The string includes the given
* message and the index of the task in its task group together with the number of tasks in the
* task group.
*
* @param message
* The main message for the log.
* @return The string ready for logging.
*/
private String getLogString(String message) {
return BatchTask.constructLogString(message, this.getEnvironment().getTaskInfo().getTaskName(), this);
} | 3.26 |
flink_DataSinkTask_initOutputFormat_rdh | /**
* Initializes the OutputFormat implementation and configuration.
*
* @throws RuntimeException
* Throws if instance of OutputFormat implementation can not be
* obtained.
*/
private void initOutputFormat() {
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
// obtain task configuration (including stub parameters)
Configuration taskConf = getTaskConfiguration();
this.config = new TaskConfig(taskConf);
final Pair<OperatorID, OutputFormat<IT>> operatorIDAndOutputFormat;
InputOutputFormatContainer formatContainer = new InputOutputFormatContainer(config, userCodeClassLoader);
try {operatorIDAndOutputFormat = formatContainer.getUniqueOutputFormat();
this.format = operatorIDAndOutputFormat.getValue();
// check if the class is a subclass, if the check is required
if (!OutputFormat.class.isAssignableFrom(this.format.getClass())) {
throw new RuntimeException(((("The class '" + this.format.getClass().getName()) + "' is not a subclass of '")
+ OutputFormat.class.getName()) + "' as is required.");
}
} catch (ClassCastException ccex) {
throw new RuntimeException("The stub class is not a proper subclass of " + OutputFormat.class.getName(), ccex);
}
Thread thread = Thread.currentThread();
ClassLoader original
= thread.getContextClassLoader();
// configure the stub. catch exceptions here extra, to report them as originating from the
// user code
try {
thread.setContextClassLoader(userCodeClassLoader);this.format.configure(formatContainer.getParameters(operatorIDAndOutputFormat.getKey()));
}
catch (Throwable t) {
throw new RuntimeException("The user defined 'configure()' method in the Output Format caused an error: " + t.getMessage(), t);
} finally {
thread.setContextClassLoader(original);
}
} | 3.26 |
flink_DataSinkTask_initInputReaders_rdh | /**
* Initializes the input readers of the DataSinkTask.
*
* @throws RuntimeException
* Thrown in case of invalid task input configuration.
*/
@SuppressWarnings("unchecked")
private void initInputReaders() throws Exception {
int numGates =
0;
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int
groupSize = this.config.getGroupSize(0);
numGates += groupSize;
if (groupSize == 1) {
// non-union case
inputReader = new MutableRecordReader<DeserializationDelegate<IT>>(getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
inputReader = new MutableRecordReader<IOReadableWritable>(new UnionInputGate(getEnvironment().getAllInputGates()), getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
this.inputTypeSerializerFactory = this.config.getInputSerializer(0, getUserCodeClassLoader());
@SuppressWarnings({ "rawtypes" })
final MutableObjectIterator<?> iter = new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer());
this.reader = ((MutableObjectIterator<IT>) (iter));
// final sanity check
if (numGates != this.config.getNumInputs()) {
throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent.");
}
} | 3.26 |
flink_TransientBlobCache_m0_rdh | /**
* Returns the blob expiry times - for testing purposes only!
*
* @return blob expiry times (internal state!)
*/
@VisibleForTesting
ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> m0() {
return blobExpiryTimes;
} | 3.26 |
flink_TransientBlobCache_getStorageLocation_rdh | /**
* Returns a file handle to the file associated with the given blob key on the blob server.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param key
* identifying the file
* @return file handle to the file
* @throws IOException
* if creating the directory fails
*/
@VisibleForTesting
public File getStorageLocation(@Nullable
JobID jobId, BlobKey key) throws IOException {
return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key);
} | 3.26 |
flink_DataOutputEncoder_writeFixed_rdh | // --------------------------------------------------------------------------------------------
// bytes
// --------------------------------------------------------------------------------------------
@Override
public void writeFixed(byte[] bytes, int start, int len) throws IOException {
out.write(bytes, start, len);
} | 3.26 |
flink_DataOutputEncoder_writeString_rdh | // --------------------------------------------------------------------------------------------
// strings
// --------------------------------------------------------------------------------------------
@Override
public void writeString(String str) throws IOException {
byte[] bytes = Utf8.getBytesFor(str);
writeBytes(bytes, 0, bytes.length);
} | 3.26 |
flink_DataOutputEncoder_writeNull_rdh | // --------------------------------------------------------------------------------------------
// primitives
// --------------------------------------------------------------------------------------------
@Override
public void writeNull() {
} | 3.26 |
flink_DataOutputEncoder_writeVarLongCount_rdh | // --------------------------------------------------------------------------------------------
// utils
// --------------------------------------------------------------------------------------------
public static void writeVarLongCount(DataOutput out, long val) throws IOException {
if (val < 0) {
throw new IOException("Illegal count (must be non-negative): " + val);
}
while ((val & (~0x7fL)) != 0) {
out.write(((int) (val)) | 0x80);
val >>>= 7;
}
out.write(((int) (val)));
} | 3.26 |
flink_DataOutputEncoder_writeIndex_rdh | // --------------------------------------------------------------------------------------------
// union
// --------------------------------------------------------------------------------------------
@Override
public void writeIndex(int unionIndex) throws IOException {
out.writeInt(unionIndex);
} | 3.26 |
flink_DataOutputEncoder_writeArrayStart_rdh | // --------------------------------------------------------------------------------------------
// collection types
// --------------------------------------------------------------------------------------------
@Override
public void writeArrayStart() {
} | 3.26 |
flink_AvroRowSerializationSchema_convertRowToAvroRecord_rdh | // --------------------------------------------------------------------------------------------
private GenericRecord
convertRowToAvroRecord(Schema schema, Row row) {
final List<Schema.Field> fields = schema.getFields();
final int length = fields.size();
final GenericRecord record = new GenericData.Record(schema);
for (int i = 0; i < length; i++) {
final Schema.Field field = fields.get(i);
record.put(i, convertFlinkType(field.schema(), row.getField(i)));
}
return record;
} | 3.26 |
flink_KeyedStateTransformation_window_rdh | /**
* Windows this transformation into a {@code WindowedOperatorTransformation}, which bootstraps
* state that can be restored by a {@code WindowOperator}. Elements are put into windows by a
* {@link WindowAssigner}. The grouping of elements is done both by key and by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger} that is used if a {@code Trigger} is not specified.
*
* @param assigner
* The {@code WindowAssigner} that assigns elements to windows.
*/
public <W extends Window> WindowedStateTransformation<T, K, W> window(WindowAssigner<? super T, W> assigner) {
return
new WindowedStateTransformation<>(stream, operatorMaxParallelism, keySelector, keyType, assigner);
} | 3.26 |
flink_KeyedStateTransformation_transform_rdh | /**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory
* A factory returning transformation logic type of the return stream
* @return An {@link StateBootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public StateBootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) {
return new StateBootstrapTransformation<>(stream, operatorMaxParallelism, factory, keySelector, keyType);
} | 3.26 |
flink_StreamProjection_projectTuple17_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> SingleOutputStreamOperator<Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> projectTuple17() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
T15, T16>>
tType = new TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple4_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/public
<T0, T1, T2, T3> SingleOutputStreamOperator<Tuple4<T0, T1, T2, T3>> projectTuple4() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple4<T0, T1, T2, T3>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple13_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> SingleOutputStreamOperator<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> projectTuple13() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple13<T0, T1, T2, T3,
T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType = new TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11,
T12>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple22_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> SingleOutputStreamOperator<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18,
T19, T20, T21>> projectTuple22() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>> tType = new TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8,
T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple8_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7> SingleOutputStreamOperator<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() {
TypeInformation<?>[] fTypes
= m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTupleX_rdh | /**
* Chooses a projectTupleX according to the length of {@link org.apache.flink.streaming.api.datastream.StreamProjection#fieldIndexes}.
*
* @return The projected DataStream.
* @see org.apache.flink.api.java.operators.ProjectOperator.Projection
*/
@SuppressWarnings("unchecked")
public <OUT extends Tuple> SingleOutputStreamOperator<OUT> projectTupleX() {
SingleOutputStreamOperator<OUT> projOperator = null;
switch (fieldIndexes.length) {
case 1 :
projOperator = ((SingleOutputStreamOperator<OUT>) (m0()));
break;
case 2 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple2()));
break;
case 3 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple3()));
break;
case 4 :
projOperator =
((SingleOutputStreamOperator<OUT>) (projectTuple4()));
break;
case 5
:
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple5()));
break;
case 6 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple6()));
break;
case 7 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple7()));
break;
case 8 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple8()));
break;case 9 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple9()));
break;
case 10 :projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple10()));
break;
case 11 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple11()));
break;
case 12 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple12()));
break;case 13 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple13()));
break;
case 14 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple14()));
break;
case 15 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple15()));
break;
case 16 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple16()));
break;
case 17
:
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple17()));
break;
case 18 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple18()));
break;
case 19 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple19()));
break;case 20 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple20()));
break;
case 21 :projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple21()));
break;
case 22 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple22()));
break;
case 23 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple23()));
break;
case 24 :projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple24()));
break;
case 25 :
projOperator = ((SingleOutputStreamOperator<OUT>) (projectTuple25()));
break;
default :
throw new IllegalStateException("Excessive arity in tuple.");
}
return projOperator;
}
/**
* Projects a {@link Tuple} {@link DataStream} | 3.26 |
flink_StreamProjection_projectTuple9_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8> SingleOutputStreamOperator<Tuple9<T0, T1, T2, T3, T4, T5,
T6, T7, T8>> projectTuple9() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> tType = new TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple5_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4> SingleOutputStreamOperator<Tuple5<T0, T1, T2, T3, T4>> projectTuple5() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType = new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple5<T0, T1, T2, T3, T4>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple16_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/public <T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> SingleOutputStreamOperator<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> projectTuple16() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> tType = new TupleTypeInfo<Tuple16<T0,
T1, T2,
T3, T4,
T5, T6, T7,
T8, T9, T10, T11, T12, T13, T14, T15>>(fTypes);return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple12_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> SingleOutputStreamOperator<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> projectTuple12() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType = new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple23_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> SingleOutputStreamOperator<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
T15, T16, T17, T18, T19, T20, T21, T22>> projectTuple23() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> tType = new TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple18_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> SingleOutputStreamOperator<Tuple18<T0, T1,
T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> projectTuple18() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple18<T0, T1,
T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> tType = new TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
T12, T13,
T14, T15,
T16,
T17>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple25_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> SingleOutputStreamOperator<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> projectTuple25() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> tType = new TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple14_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> SingleOutputStreamOperator<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> projectTuple14() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> tType = new TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple7_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6> SingleOutputStreamOperator<Tuple7<T0, T1, T2, T3, T4, T5, T6>> projectTuple7() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>> tType = new TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN,
Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple10_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> SingleOutputStreamOperator<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> projectTuple10() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> tType = new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fieldIndexes,
tType.createSerializer(dataStream.getExecutionConfig())));
}
/**
* Projects a {@link Tuple} {@link DataStream} | 3.26 |
flink_StreamProjection_projectTuple3_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2> SingleOutputStreamOperator<Tuple3<T0, T1, T2>> projectTuple3() {
TypeInformation<?>[]
fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes);
return dataStream.transform("Projection",
tType, new StreamProject<IN, Tuple3<T0, T1, T2>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple21_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18,
T19, T20> SingleOutputStreamOperator<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> projectTuple21() {
TypeInformation<?>[] v43 = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> tType = new TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(v43);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple24_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> SingleOutputStreamOperator<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22,
T23>> projectTuple24() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>
tType = new TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(fTypes);return
dataStream.transform("Projection", tType, new StreamProject<IN, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple6_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5> SingleOutputStreamOperator<Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType
= new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple6<T0, T1, T2, T3, T4, T5>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple15_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> SingleOutputStreamOperator<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> projectTuple15() {
TypeInformation<?>[] v31 = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> tType = new TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(v31);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple2_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1> SingleOutputStreamOperator<Tuple2<T0, T1>> projectTuple2() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple2<T0, T1>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple20_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8,
T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> SingleOutputStreamOperator<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> projectTuple20() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> tType = new TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(fTypes);
return
dataStream.transform("Projection", tType, new StreamProject<IN, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
T12, T13, T14, T15,
T16, T17, T18, T19>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_StreamProjection_projectTuple19_rdh | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> SingleOutputStreamOperator<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>> projectTuple19() {
TypeInformation<?>[] fTypes = m1(fieldIndexes, dataStream.getType());TupleTypeInfo<Tuple19<T0, T1, T2, T3,
T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>> tType = new TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>>(fTypes);
return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.26 |
flink_OptimizedPlan_getJobName_rdh | /**
* Returns the name of the program.
*
* @return The name of the program.
*/
public String getJobName() {
return this.jobName;
} | 3.26 |
flink_OptimizedPlan_accept_rdh | // ------------------------------------------------------------------------
/**
* Applies the given visitor top down to all nodes, starting at the sinks.
*
* @param visitor
* The visitor to apply to the nodes in this plan.
* @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor)
*/
@Override
public void accept(Visitor<PlanNode> visitor) {
for (SinkPlanNode node : this.dataSinks) {
node.accept(visitor);
}
} | 3.26 |
flink_OptimizedPlan_getAllNodes_rdh | /**
* Gets all the nodes from this OptimizedPlan.
*
* @return All nodes.
*/
public Collection<PlanNode> getAllNodes() {
return
allNodes;
} | 3.26 |
flink_CoGroupedStreams_equalTo_rdh | /**
* Specifies a {@link KeySelector} for elements from the second input.
*
* @param keySelector
* The KeySelector to be used for extracting the second input's key for
* partitioning.
*/
public EqualTo equalTo(KeySelector<T2, KEY> keySelector) {
Preconditions.checkNotNull(keySelector);
final TypeInformation<KEY> otherKey = TypeExtractor.getKeySelectorTypes(keySelector, input2.getType());
return equalTo(keySelector, otherKey);
} | 3.26 |
flink_CoGroupedStreams_where_rdh | /**
* Specifies a {@link KeySelector} for elements from the first input with explicit type
* information.
*
* @param keySelector
* The KeySelector to be used for extracting the first input's key for
* partitioning.
* @param keyType
* The type information describing the key type.
*/
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector, TypeInformation<KEY> keyType) {
Preconditions.checkNotNull(keySelector);
Preconditions.checkNotNull(keyType);
return new Where<>(input1.clean(keySelector), keyType);
} | 3.26 |
flink_CoGroupedStreams_m0_rdh | /**
* Sets the {@code Trigger} that should be used to trigger window emission.
*/
@PublicEvolving
public WithWindow<T1, T2, KEY, W> m0(Trigger<? super TaggedUnion<T1, T2>, ? super W> newTrigger) {
return new WithWindow<>(input1, input2, keySelector1, keySelector2, keyType, windowAssigner, newTrigger, evictor, allowedLateness);
}
/**
* Sets the {@code Evictor} | 3.26 |
flink_CoGroupedStreams_apply_rdh | /**
* Completes the co-group operation with the user function that is executed for windowed
* groups.
*
* <p>Note: This method's return type does not support setting an operator-specific
* parallelism. Due to binary backwards compatibility, this cannot be altered. Use the
* {@link #with(CoGroupFunction, TypeInformation)} method to set an operator-specific
* parallelism.
*/
public <T> DataStream<T> apply(CoGroupFunction<T1, T2, T> function, TypeInformation<T> resultType) {
// clean the closure
function = input1.getExecutionEnvironment().clean(function);
UnionTypeInfo<T1, T2> unionType = new UnionTypeInfo<>(input1.getType(), input2.getType());UnionKeySelector<T1, T2, KEY>
unionKeySelector = new UnionKeySelector<>(keySelector1, keySelector2);
SingleOutputStreamOperator<TaggedUnion<T1, T2>> taggedInput1 = input1.map(new Input1Tagger<T1, T2>());
taggedInput1.getTransformation().setParallelism(input1.getParallelism(), false);
taggedInput1.returns(unionType);
SingleOutputStreamOperator<TaggedUnion<T1, T2>> taggedInput2 = input2.map(new Input2Tagger<T1, T2>());
taggedInput2.getTransformation().setParallelism(input2.getParallelism(), false);
taggedInput2.returns(unionType);
DataStream<TaggedUnion<T1, T2>> unionStream = taggedInput1.union(taggedInput2);
// we explicitly create the keyed stream to manually pass the key type information in
windowedStream = new KeyedStream<TaggedUnion<T1, T2>, KEY>(unionStream, unionKeySelector, keyType).window(windowAssigner);
if (trigger != null) {
windowedStream.trigger(trigger);
}
if (evictor != null) {
windowedStream.evictor(evictor);
}
if (allowedLateness != null) {
windowedStream.allowedLateness(allowedLateness);
}
return windowedStream.apply(new CoGroupWindowFunction<T1, T2, T, KEY, W>(function), resultType);
}
/**
* Completes the co-group operation with the user function that is executed for windowed
* groups.
*
* <p><b>Note:</b> This is a temporary workaround while the {@link #apply(CoGroupFunction,
* TypeInformation)} method has the wrong return type and hence does not allow one to set an
* operator-specific parallelism
*
* @deprecated This method will be removed once the {@link #apply(CoGroupFunction,
TypeInformation)} | 3.26 |
flink_CoGroupedStreams_allowedLateness_rdh | /**
* Sets the time by which elements are allowed to be late.
*
* @see WindowedStream#allowedLateness(Time)
*/
@PublicEvolving
public WithWindow<T1,
T2, KEY, W> allowedLateness(Time newLateness) {
return new WithWindow<>(input1, input2, keySelector1, keySelector2, keyType, windowAssigner, trigger, evictor, newLateness);
} | 3.26 |
flink_MessageParameter_getKey_rdh | /**
* Returns the key of this parameter, e.g. "jobid".
*
* @return key of this parameter
*/
public final String getKey() {
return f0;
} | 3.26 |
flink_MessageParameter_resolve_rdh | /**
* Resolves this parameter for the given value.
*
* @param value
* value to resolve this parameter with
*/
public final void resolve(X value) {
Preconditions.checkState(!resolved, "This parameter was already resolved.");
this.value = Preconditions.checkNotNull(value);this.resolved = true;
} | 3.26 |
flink_MessageParameter_m0_rdh | /**
* Returns the resolved value of this parameter as a string, or {@code null} if it isn't
* resolved yet.
*
* @return resolved value, or null if it wasn't resolved yet
*/
final String m0() {
return value == null ? null : convertToString(value);
} | 3.26 |
flink_MessageParameter_getValue_rdh | /**
* Returns the resolved value of this parameter, or {@code null} if it isn't resolved yet.
*
* @return resolved value, or null if it wasn't resolved yet
*/
public final X getValue() {
return value;
} | 3.26 |
flink_NotDuplicatingCheckpointStateToolset_canFastDuplicate_rdh | /**
* An empty implementation of {@link CheckpointStateToolset}.
*/public final class NotDuplicatingCheckpointStateToolset implements CheckpointStateToolset {
@Override
public boolean canFastDuplicate(StreamStateHandle stateHandle) throws IOException {
return false;
} | 3.26 |
flink_LegacySinkTransformation_setStateKeySelector_rdh | /**
* Sets the {@link KeySelector} that must be used for partitioning keyed state of this Sink.
*
* @param stateKeySelector
* The {@code KeySelector} to set
*/
public void setStateKeySelector(KeySelector<T, ?> stateKeySelector) {
this.stateKeySelector = stateKeySelector;
updateManagedMemoryStateBackendUseCase(stateKeySelector != null);
} | 3.26 |
flink_LegacySinkTransformation_getOperatorFactory_rdh | /**
* Returns the {@link StreamOperatorFactory} of this {@code LegacySinkTransformation}.
*/
public StreamOperatorFactory<Object> getOperatorFactory() {
return operatorFactory;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.