code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
@VisibleForTesting public int numKeyValueStateEntries(Object namespace) { int sum = 0; for (StateTable<?, ?, ?> state : registeredKVStates.values()) { sum += state.sizeOfNamespace(namespace); } return sum; }
Returns the total number of state entries across all keys for the given namespace.
public static TumblingProcessingTimeWindows of(Time size, Time offset) { return new TumblingProcessingTimeWindows(size.toMilliseconds(), offset.toMilliseconds()); }
Creates a new {@code TumblingProcessingTimeWindows} {@link WindowAssigner} that assigns elements to time windows based on the element timestamp and offset. <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time windows start at 0:15:00,1:15:00,2:15:00,etc. <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as China which is using UTC+08:00,and you want a time window with size of one day, and window begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}. The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than UTC time. @param size The size of the generated windows. @param offset The offset which window start would be shifted by. @return The time policy.
public static <T> PatternStream<T> pattern(DataStream<T> input, Pattern<T, ?> pattern) { return new PatternStream<>(input, pattern); }
Creates a {@link PatternStream} from an input data stream and a pattern. @param input DataStream containing the input events @param pattern Pattern specification which shall be detected @param <T> Type of the input events @return Resulting pattern stream
public static <T> PatternStream<T> pattern( DataStream<T> input, Pattern<T, ?> pattern, EventComparator<T> comparator) { final PatternStream<T> stream = new PatternStream<>(input, pattern); return stream.withComparator(comparator); }
Creates a {@link PatternStream} from an input data stream and a pattern. @param input DataStream containing the input events @param pattern Pattern specification which shall be detected @param comparator Comparator to sort events with equal timestamps @param <T> Type of the input events @return Resulting pattern stream
static void skipSerializedStates(DataInputView in) throws IOException { TypeSerializer<String> nameSerializer = StringSerializer.INSTANCE; TypeSerializer<State.StateType> stateTypeSerializer = new EnumSerializer<>(State.StateType.class); TypeSerializer<StateTransitionAction> actionSerializer = new EnumSerializer<>(StateTransitionAction.class); final int noOfStates = in.readInt(); for (int i = 0; i < noOfStates; i++) { nameSerializer.deserialize(in); stateTypeSerializer.deserialize(in); } for (int i = 0; i < noOfStates; i++) { String srcName = nameSerializer.deserialize(in); int noOfTransitions = in.readInt(); for (int j = 0; j < noOfTransitions; j++) { String src = nameSerializer.deserialize(in); Preconditions.checkState(src.equals(srcName), "Source Edge names do not match (" + srcName + " - " + src + ")."); nameSerializer.deserialize(in); actionSerializer.deserialize(in); try { skipCondition(in); } catch (ClassNotFoundException e) { e.printStackTrace(); } } } }
Skips bytes corresponding to serialized states. In flink 1.6+ the states are no longer kept in state.
private static <T, ET> boolean hasNullValueEdges(DataSet<Edge<T, ET>> edges) { TypeInformation<?> genericTypeInfo = edges.getType(); @SuppressWarnings("unchecked") TupleTypeInfo<Tuple3<T, T, ET>> tupleTypeInfo = (TupleTypeInfo<Tuple3<T, T, ET>>) genericTypeInfo; return tupleTypeInfo.getTypeAt(2).equals(ValueTypeInfo.NULL_VALUE_TYPE_INFO); }
Check whether the edge type of the {@link DataSet} is {@link NullValue}. @param edges data set for introspection @param <T> graph ID type @param <ET> edge value type @return whether the edge type of the {@link DataSet} is {@link NullValue}
@Override public void start(LeaderRetrievalListener listener) { checkNotNull(listener, "Listener must not be null."); synchronized (startStopLock) { checkState(!started, "StandaloneLeaderRetrievalService can only be started once."); started = true; // directly notify the listener, because we already know the leading JobManager's address listener.notifyLeaderAddress(leaderAddress, leaderId); } }
------------------------------------------------------------------------
public static boolean isInFixedLengthPart(InternalType type) { if (type instanceof DecimalType) { return ((DecimalType) type).precision() <= DecimalType.MAX_COMPACT_PRECISION; } else { return MUTABLE_FIELD_TYPES.contains(type); } }
If it is a fixed-length field, we can call this BinaryRow's setXX method for in-place updates. If it is variable-length field, can't use this method, because the underlying data is stored continuously.
public boolean anyNull() { // Skip the header. if ((segments[0].getLong(0) & FIRST_BYTE_ZERO) != 0) { return true; } for (int i = 8; i < nullBitsSizeInBytes; i += 8) { if (segments[0].getLong(i) != 0) { return true; } } return false; }
The bit is 1 when the field is null. Default is 0.
private static <R extends JarRequestBody, M extends MessageParameters> List<String> getProgramArgs( HandlerRequest<R, M> request, Logger log) throws RestHandlerException { JarRequestBody requestBody = request.getRequestBody(); @SuppressWarnings("deprecation") List<String> programArgs = tokenizeArguments( fromRequestBodyOrQueryParameter( emptyToNull(requestBody.getProgramArguments()), () -> getQueryParameter(request, ProgramArgsQueryParameter.class), null, log)); List<String> programArgsList = fromRequestBodyOrQueryParameter( requestBody.getProgramArgumentsList(), () -> request.getQueryParameter(ProgramArgQueryParameter.class), null, log); if (!programArgsList.isEmpty()) { if (!programArgs.isEmpty()) { throw new RestHandlerException( "Confusing request: programArgs and programArgsList are specified, please, use only programArgsList", HttpResponseStatus.BAD_REQUEST); } return programArgsList; } else { return programArgs; } }
Parse program arguments in jar run or plan request.
@VisibleForTesting static List<String> tokenizeArguments(@Nullable final String args) { if (args == null) { return Collections.emptyList(); } final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN.matcher(args); final List<String> tokens = new ArrayList<>(); while (matcher.find()) { tokens.add(matcher.group() .trim() .replace("\"", "") .replace("\'", "")); } return tokens; }
Takes program arguments as a single string, and splits them into a list of string. <pre> tokenizeArguments("--foo bar") = ["--foo" "bar"] tokenizeArguments("--foo \"bar baz\"") = ["--foo" "bar baz"] tokenizeArguments("--foo 'bar baz'") = ["--foo" "bar baz"] tokenizeArguments(null) = [] </pre> <strong>WARNING: </strong>This method does not respect escaped quotes.
private void serializeComputationStates(Queue<ComputationState> states, DataOutputView target) throws IOException { target.writeInt(states.size()); for (ComputationState computationState : states) { serializeSingleComputationState(computationState, target); } }
/* De/serialization methods
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); // the nested serializer will be null if this was read from a savepoint taken with versions // lower than Flink 1.7; in this case, we explicitly create instance for the nested serializer. if (versionSerializer == null || nodeIdSerializer == null || eventIdSerializer == null) { this.versionSerializer = DeweyNumber.DeweyNumberSerializer.INSTANCE; this.eventIdSerializer = EventId.EventIdSerializer.INSTANCE; this.nodeIdSerializer = new NodeId.NodeIdSerializer(); } }
/* Backwards compatible deserializing of NFAStateSerializer.
public <R> SingleOutputStreamOperator<R> process(final PatternProcessFunction<T, R> patternProcessFunction) { final TypeInformation<R> returnType = TypeExtractor.getUnaryOperatorReturnType( patternProcessFunction, PatternProcessFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); return process(patternProcessFunction, returnType); }
Applies a process function to the detected pattern sequence. For each pattern sequence the provided {@link PatternProcessFunction} is called. In order to process timed out partial matches as well one can use {@link TimedOutPartialMatchHandler} as additional interface. @param patternProcessFunction The pattern process function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements from the pattern process function.
public <R> SingleOutputStreamOperator<R> process( final PatternProcessFunction<T, R> patternProcessFunction, final TypeInformation<R> outTypeInfo) { return builder.build( outTypeInfo, builder.clean(patternProcessFunction)); }
Applies a process function to the detected pattern sequence. For each pattern sequence the provided {@link PatternProcessFunction} is called. In order to process timed out partial matches as well one can use {@link TimedOutPartialMatchHandler} as additional interface. @param patternProcessFunction The pattern process function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @param outTypeInfo Explicit specification of output type. @return {@link DataStream} which contains the resulting elements from the pattern process function.
public <R> SingleOutputStreamOperator<R> select(final PatternSelectFunction<T, R> patternSelectFunction) { // we have to extract the output type from the provided pattern selection function manually // because the TypeExtractor cannot do that if the method is wrapped in a MapFunction final TypeInformation<R> returnType = TypeExtractor.getUnaryOperatorReturnType( patternSelectFunction, PatternSelectFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); return select(patternSelectFunction, returnType); }
Applies a select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternSelectFunction} is called. The pattern select function can produce exactly one resulting element. @param patternSelectFunction The pattern select function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements from the pattern select function.
public <R> SingleOutputStreamOperator<R> select( final PatternSelectFunction<T, R> patternSelectFunction, final TypeInformation<R> outTypeInfo) { final PatternProcessFunction<T, R> processFunction = fromSelect(builder.clean(patternSelectFunction)).build(); return process(processFunction, outTypeInfo); }
Applies a select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternSelectFunction} is called. The pattern select function can produce exactly one resulting element. @param patternSelectFunction The pattern select function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @param outTypeInfo Explicit specification of output type. @return {@link DataStream} which contains the resulting elements from the pattern select function.
public <L, R> SingleOutputStreamOperator<R> select( final OutputTag<L> timedOutPartialMatchesTag, final PatternTimeoutFunction<T, L> patternTimeoutFunction, final PatternSelectFunction<T, R> patternSelectFunction) { final TypeInformation<R> rightTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternSelectFunction, PatternSelectFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); return select( timedOutPartialMatchesTag, patternTimeoutFunction, rightTypeInfo, patternSelectFunction); }
Applies a select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternSelectFunction} is called. The pattern select function can produce exactly one resulting element. <p>Applies a timeout function to a partial pattern sequence which has timed out. For each partial pattern sequence the provided {@link PatternTimeoutFunction} is called. The pattern timeout function can produce exactly one resulting element. <p>You can get the stream of timed-out data resulting from the {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the select operation with the same {@link OutputTag}. @param timedOutPartialMatchesTag {@link OutputTag} that identifies side output with timed out patterns @param patternTimeoutFunction The pattern timeout function which is called for each partial pattern sequence which has timed out. @param patternSelectFunction The pattern select function which is called for each detected pattern sequence. @param <L> Type of the resulting timeout elements @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements with the resulting timeout elements in a side output.
public <L, R> SingleOutputStreamOperator<R> select( final OutputTag<L> timedOutPartialMatchesTag, final PatternTimeoutFunction<T, L> patternTimeoutFunction, final TypeInformation<R> outTypeInfo, final PatternSelectFunction<T, R> patternSelectFunction) { final PatternProcessFunction<T, R> processFunction = fromSelect(builder.clean(patternSelectFunction)) .withTimeoutHandler(timedOutPartialMatchesTag, builder.clean(patternTimeoutFunction)) .build(); return process(processFunction, outTypeInfo); }
Applies a select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternSelectFunction} is called. The pattern select function can produce exactly one resulting element. <p>Applies a timeout function to a partial pattern sequence which has timed out. For each partial pattern sequence the provided {@link PatternTimeoutFunction} is called. The pattern timeout function can produce exactly one resulting element. <p>You can get the stream of timed-out data resulting from the {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the select operation with the same {@link OutputTag}. @param timedOutPartialMatchesTag {@link OutputTag} that identifies side output with timed out patterns @param patternTimeoutFunction The pattern timeout function which is called for each partial pattern sequence which has timed out. @param outTypeInfo Explicit specification of output type. @param patternSelectFunction The pattern select function which is called for each detected pattern sequence. @param <L> Type of the resulting timeout elements @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements with the resulting timeout elements in a side output.
@Deprecated public <L, R> SingleOutputStreamOperator<Either<L, R>> select( final PatternTimeoutFunction<T, L> patternTimeoutFunction, final PatternSelectFunction<T, R> patternSelectFunction) { final TypeInformation<R> mainTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternSelectFunction, PatternSelectFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); final TypeInformation<L> timeoutTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternTimeoutFunction, PatternTimeoutFunction.class, 0, 1, TypeExtractor.NO_INDEX, builder.getInputType(), null, false); final TypeInformation<Either<L, R>> outTypeInfo = new EitherTypeInfo<>(timeoutTypeInfo, mainTypeInfo); final OutputTag<L> outputTag = new OutputTag<>(UUID.randomUUID().toString(), timeoutTypeInfo); final PatternProcessFunction<T, R> processFunction = fromSelect(builder.clean(patternSelectFunction)) .withTimeoutHandler(outputTag, builder.clean(patternTimeoutFunction)) .build(); final SingleOutputStreamOperator<R> mainStream = process(processFunction, mainTypeInfo); final DataStream<L> timedOutStream = mainStream.getSideOutput(outputTag); return mainStream .connect(timedOutStream) .map(new CoMapTimeout<>()) .returns(outTypeInfo); }
Applies a select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternSelectFunction} is called. The pattern select function can produce exactly one resulting element. <p>Applies a timeout function to a partial pattern sequence which has timed out. For each partial pattern sequence the provided {@link PatternTimeoutFunction} is called. The pattern timeout function can produce exactly one resulting element. @param patternTimeoutFunction The pattern timeout function which is called for each partial pattern sequence which has timed out. @param patternSelectFunction The pattern select function which is called for each detected pattern sequence. @param <L> Type of the resulting timeout elements @param <R> Type of the resulting elements @deprecated Use {@link PatternStream#select(OutputTag, PatternTimeoutFunction, PatternSelectFunction)} that returns timed out events as a side-output @return {@link DataStream} which contains the resulting elements or the resulting timeout elements wrapped in an {@link Either} type.
public <R> SingleOutputStreamOperator<R> flatSelect(final PatternFlatSelectFunction<T, R> patternFlatSelectFunction) { // we have to extract the output type from the provided pattern selection function manually // because the TypeExtractor cannot do that if the method is wrapped in a MapFunction final TypeInformation<R> outTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternFlatSelectFunction, PatternFlatSelectFunction.class, 0, 1, new int[]{1, 0}, builder.getInputType(), null, false); return flatSelect(patternFlatSelectFunction, outTypeInfo); }
Applies a flat select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternFlatSelectFunction} is called. The pattern flat select function can produce an arbitrary number of resulting elements. @param patternFlatSelectFunction The pattern flat select function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements from the pattern flat select function.
public <R> SingleOutputStreamOperator<R> flatSelect( final PatternFlatSelectFunction<T, R> patternFlatSelectFunction, final TypeInformation<R> outTypeInfo) { final PatternProcessFunction<T, R> processFunction = fromFlatSelect(builder.clean(patternFlatSelectFunction)) .build(); return process(processFunction, outTypeInfo); }
Applies a flat select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternFlatSelectFunction} is called. The pattern flat select function can produce an arbitrary number of resulting elements. @param patternFlatSelectFunction The pattern flat select function which is called for each detected pattern sequence. @param <R> Type of the resulting elements @param outTypeInfo Explicit specification of output type. @return {@link DataStream} which contains the resulting elements from the pattern flat select function.
public <L, R> SingleOutputStreamOperator<R> flatSelect( final OutputTag<L> timedOutPartialMatchesTag, final PatternFlatTimeoutFunction<T, L> patternFlatTimeoutFunction, final PatternFlatSelectFunction<T, R> patternFlatSelectFunction) { final TypeInformation<R> rightTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternFlatSelectFunction, PatternFlatSelectFunction.class, 0, 1, new int[]{1, 0}, builder.getInputType(), null, false); return flatSelect( timedOutPartialMatchesTag, patternFlatTimeoutFunction, rightTypeInfo, patternFlatSelectFunction); }
Applies a flat select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternFlatSelectFunction} is called. The pattern select function can produce exactly one resulting element. <p>Applies a timeout function to a partial pattern sequence which has timed out. For each partial pattern sequence the provided {@link PatternFlatTimeoutFunction} is called. The pattern timeout function can produce exactly one resulting element. <p>You can get the stream of timed-out data resulting from the {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the select operation with the same {@link OutputTag}. @param timedOutPartialMatchesTag {@link OutputTag} that identifies side output with timed out patterns @param patternFlatTimeoutFunction The pattern timeout function which is called for each partial pattern sequence which has timed out. @param patternFlatSelectFunction The pattern select function which is called for each detected pattern sequence. @param <L> Type of the resulting timeout elements @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements with the resulting timeout elements in a side output.
public <L, R> SingleOutputStreamOperator<R> flatSelect( final OutputTag<L> timedOutPartialMatchesTag, final PatternFlatTimeoutFunction<T, L> patternFlatTimeoutFunction, final TypeInformation<R> outTypeInfo, final PatternFlatSelectFunction<T, R> patternFlatSelectFunction) { final PatternProcessFunction<T, R> processFunction = fromFlatSelect(builder.clean(patternFlatSelectFunction)) .withTimeoutHandler(timedOutPartialMatchesTag, builder.clean(patternFlatTimeoutFunction)) .build(); return process(processFunction, outTypeInfo); }
Applies a flat select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternFlatSelectFunction} is called. The pattern select function can produce exactly one resulting element. <p>Applies a timeout function to a partial pattern sequence which has timed out. For each partial pattern sequence the provided {@link PatternFlatTimeoutFunction} is called. The pattern timeout function can produce exactly one resulting element. <p>You can get the stream of timed-out data resulting from the {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the select operation with the same {@link OutputTag}. @param timedOutPartialMatchesTag {@link OutputTag} that identifies side output with timed out patterns @param patternFlatTimeoutFunction The pattern timeout function which is called for each partial pattern sequence which has timed out. @param patternFlatSelectFunction The pattern select function which is called for each detected pattern sequence. @param outTypeInfo Explicit specification of output type. @param <L> Type of the resulting timeout elements @param <R> Type of the resulting elements @return {@link DataStream} which contains the resulting elements with the resulting timeout elements in a side output.
@Deprecated public <L, R> SingleOutputStreamOperator<Either<L, R>> flatSelect( final PatternFlatTimeoutFunction<T, L> patternFlatTimeoutFunction, final PatternFlatSelectFunction<T, R> patternFlatSelectFunction) { final TypeInformation<L> timedOutTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternFlatTimeoutFunction, PatternFlatTimeoutFunction.class, 0, 1, new int[]{2, 0}, builder.getInputType(), null, false); final TypeInformation<R> mainTypeInfo = TypeExtractor.getUnaryOperatorReturnType( patternFlatSelectFunction, PatternFlatSelectFunction.class, 0, 1, new int[]{1, 0}, builder.getInputType(), null, false); final OutputTag<L> outputTag = new OutputTag<>(UUID.randomUUID().toString(), timedOutTypeInfo); final PatternProcessFunction<T, R> processFunction = fromFlatSelect(builder.clean(patternFlatSelectFunction)) .withTimeoutHandler(outputTag, builder.clean(patternFlatTimeoutFunction)) .build(); final SingleOutputStreamOperator<R> mainStream = process(processFunction, mainTypeInfo); final DataStream<L> timedOutStream = mainStream.getSideOutput(outputTag); final TypeInformation<Either<L, R>> outTypeInfo = new EitherTypeInfo<>(timedOutTypeInfo, mainTypeInfo); return mainStream .connect(timedOutStream) .map(new CoMapTimeout<>()) .returns(outTypeInfo); }
Applies a flat select function to the detected pattern sequence. For each pattern sequence the provided {@link PatternFlatSelectFunction} is called. The pattern flat select function can produce an arbitrary number of resulting elements. <p>Applies a timeout function to a partial pattern sequence which has timed out. For each partial pattern sequence the provided {@link PatternFlatTimeoutFunction} is called. The pattern timeout function can produce an arbitrary number of resulting elements. @param patternFlatTimeoutFunction The pattern flat timeout function which is called for each partial pattern sequence which has timed out. @param patternFlatSelectFunction The pattern flat select function which is called for each detected pattern sequence. @param <L> Type of the resulting timeout events @param <R> Type of the resulting events @deprecated Use {@link PatternStream#flatSelect(OutputTag, PatternFlatTimeoutFunction, PatternFlatSelectFunction)} that returns timed out events as a side-output @return {@link DataStream} which contains the resulting events from the pattern flat select function or the resulting timeout events from the pattern flat timeout function wrapped in an {@link Either} type.
public Map<String, OptionalFailure<Accumulator<?, ?>>> aggregateUserAccumulators() { Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(); for (ExecutionVertex vertex : getAllExecutionVertices()) { Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators(); if (next != null) { AccumulatorHelper.mergeInto(userAccumulators, next); } } return userAccumulators; }
Merges all accumulator results from the tasks previously executed in the Executions. @return The accumulator map
@Override public Map<String, SerializedValue<OptionalFailure<Object>>> getAccumulatorsSerialized() { return aggregateUserAccumulators() .entrySet() .stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> serializeAccumulator(entry.getKey(), entry.getValue()))); }
Gets a serialized accumulator map. @return The accumulator map with serialized accumulator values.
@Override public StringifiedAccumulatorResult[] getAccumulatorResultsStringified() { Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = aggregateUserAccumulators(); return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); }
Returns the a stringified version of the user-defined accumulators. @return an Array containing the StringifiedAccumulatorResult objects
public void attachJobGraph(List<JobVertex> topologiallySorted) throws JobException { assertRunningInJobMasterMainThread(); LOG.debug("Attaching {} topologically sorted vertices to existing job graph with {} " + "vertices and {} intermediate results.", topologiallySorted.size(), tasks.size(), intermediateResults.size()); final ArrayList<ExecutionJobVertex> newExecJobVertices = new ArrayList<>(topologiallySorted.size()); final long createTimestamp = System.currentTimeMillis(); for (JobVertex jobVertex : topologiallySorted) { if (jobVertex.isInputVertex() && !jobVertex.isStoppable()) { this.isStoppable = false; } // create the execution job vertex and attach it to the graph ExecutionJobVertex ejv = new ExecutionJobVertex( this, jobVertex, 1, rpcTimeout, globalModVersion, createTimestamp); ejv.connectToPredecessors(this.intermediateResults); ExecutionJobVertex previousTask = this.tasks.putIfAbsent(jobVertex.getID(), ejv); if (previousTask != null) { throw new JobException(String.format("Encountered two job vertices with ID %s : previous=[%s] / new=[%s]", jobVertex.getID(), ejv, previousTask)); } for (IntermediateResult res : ejv.getProducedDataSets()) { IntermediateResult previousDataSet = this.intermediateResults.putIfAbsent(res.getId(), res); if (previousDataSet != null) { throw new JobException(String.format("Encountered two intermediate data set with ID %s : previous=[%s] / new=[%s]", res.getId(), res, previousDataSet)); } } this.verticesInCreationOrder.add(ejv); this.numVerticesTotal += ejv.getParallelism(); newExecJobVertices.add(ejv); } terminationFuture = new CompletableFuture<>(); failoverStrategy.notifyNewVertices(newExecJobVertices); }
--------------------------------------------------------------------------------------------
public void suspend(Throwable suspensionCause) { assertRunningInJobMasterMainThread(); if (state.isTerminalState()) { // stay in a terminal state return; } else if (transitionState(state, JobStatus.SUSPENDED, suspensionCause)) { initFailureCause(suspensionCause); // make sure no concurrent local actions interfere with the cancellation incrementGlobalModVersion(); // cancel ongoing scheduling action if (schedulingFuture != null) { schedulingFuture.cancel(false); } final ArrayList<CompletableFuture<Void>> executionJobVertexTerminationFutures = new ArrayList<>(verticesInCreationOrder.size()); for (ExecutionJobVertex ejv: verticesInCreationOrder) { executionJobVertexTerminationFutures.add(ejv.suspend()); } final ConjunctFuture<Void> jobVerticesTerminationFuture = FutureUtils.waitForAll(executionJobVertexTerminationFutures); checkState(jobVerticesTerminationFuture.isDone(), "Suspend needs to happen atomically"); jobVerticesTerminationFuture.whenComplete( (Void ignored, Throwable throwable) -> { if (throwable != null) { LOG.debug("Could not properly suspend the execution graph.", throwable); } onTerminalState(state); LOG.info("Job {} has been suspended.", getJobID()); }); } else { throw new IllegalStateException(String.format("Could not suspend because transition from %s to %s failed.", state, JobStatus.SUSPENDED)); } }
Suspends the current ExecutionGraph. <p>The JobStatus will be directly set to {@link JobStatus#SUSPENDED} iff the current state is not a terminal state. All ExecutionJobVertices will be canceled and the onTerminalState() is executed. <p>The {@link JobStatus#SUSPENDED} state is a local terminal state which stops the execution of the job but does not remove the job from the HA job store so that it can be recovered by another JobManager. @param suspensionCause Cause of the suspension
public void failGlobal(Throwable t) { assertRunningInJobMasterMainThread(); while (true) { JobStatus current = state; // stay in these states if (current == JobStatus.FAILING || current == JobStatus.SUSPENDED || current.isGloballyTerminalState()) { return; } else if (transitionState(current, JobStatus.FAILING, t)) { initFailureCause(t); // make sure no concurrent local or global actions interfere with the failover final long globalVersionForRestart = incrementGlobalModVersion(); final CompletableFuture<Void> ongoingSchedulingFuture = schedulingFuture; // cancel ongoing scheduling action if (ongoingSchedulingFuture != null) { ongoingSchedulingFuture.cancel(false); } // we build a future that is complete once all vertices have reached a terminal state final ArrayList<CompletableFuture<?>> futures = new ArrayList<>(verticesInCreationOrder.size()); // cancel all tasks (that still need cancelling) for (ExecutionJobVertex ejv : verticesInCreationOrder) { futures.add(ejv.cancelWithFuture()); } final ConjunctFuture<Void> allTerminal = FutureUtils.waitForAll(futures); allTerminal.whenComplete( (Void ignored, Throwable throwable) -> { if (throwable != null) { transitionState( JobStatus.FAILING, JobStatus.FAILED, new FlinkException("Could not cancel all execution job vertices properly.", throwable)); } else { allVerticesInTerminalState(globalVersionForRestart); } }); return; } // else: concurrent change to execution state, retry } }
Fails the execution graph globally. This failure will not be recovered by a specific failover strategy, but results in a full restart of all tasks. <p>This global failure is meant to be triggered in cases where the consistency of the execution graph' state cannot be guaranteed any more (for example when catching unexpected exceptions that indicate a bug or an unexpected call race), and where a full restart is the safe way to get consistency back. @param t The exception that caused the failure.
@Override public ArchivedExecutionConfig getArchivedExecutionConfig() { // create a summary of all relevant data accessed in the web interface's JobConfigHandler try { ExecutionConfig executionConfig = jobInformation.getSerializedExecutionConfig().deserializeValue(userClassLoader); if (executionConfig != null) { return executionConfig.archive(); } } catch (IOException | ClassNotFoundException e) { LOG.error("Couldn't create ArchivedExecutionConfig for job {} ", getJobID(), e); } return null; }
Returns the serializable {@link ArchivedExecutionConfig}. @return ArchivedExecutionConfig which may be null in case of errors
void vertexFinished() { assertRunningInJobMasterMainThread(); final int numFinished = verticesFinished.incrementAndGet(); if (numFinished == numVerticesTotal) { // done :-) // check whether we are still in "RUNNING" and trigger the final cleanup if (state == JobStatus.RUNNING) { // we do the final cleanup in the I/O executor, because it may involve // some heavier work try { for (ExecutionJobVertex ejv : verticesInCreationOrder) { ejv.getJobVertex().finalizeOnMaster(getUserClassLoader()); } } catch (Throwable t) { ExceptionUtils.rethrowIfFatalError(t); failGlobal(new Exception("Failed to finalize execution on master", t)); return; } // if we do not make this state transition, then a concurrent // cancellation or failure happened if (transitionState(JobStatus.RUNNING, JobStatus.FINISHED)) { onTerminalState(JobStatus.FINISHED); } } } }
Called whenever a vertex reaches state FINISHED (completed successfully). Once all vertices are in the FINISHED state, the program is successfully done.
private void allVerticesInTerminalState(long expectedGlobalVersionForRestart) { assertRunningInJobMasterMainThread(); // we are done, transition to the final state JobStatus current; while (true) { current = this.state; if (current == JobStatus.RUNNING) { failGlobal(new Exception("ExecutionGraph went into allVerticesInTerminalState() from RUNNING")); } else if (current == JobStatus.CANCELLING) { if (transitionState(current, JobStatus.CANCELED)) { onTerminalState(JobStatus.CANCELED); break; } } else if (current == JobStatus.FAILING) { if (tryRestartOrFail(expectedGlobalVersionForRestart)) { break; } // concurrent job status change, let's check again } else if (current.isGloballyTerminalState()) { LOG.warn("Job has entered globally terminal state without waiting for all " + "job vertices to reach final state."); break; } else { failGlobal(new Exception("ExecutionGraph went into final state from state " + current)); break; } } // done transitioning the state }
This method is a callback during cancellation/failover and called when all tasks have reached a terminal state (cancelled/failed/finished).
private boolean tryRestartOrFail(long globalModVersionForRestart) { JobStatus currentState = state; if (currentState == JobStatus.FAILING || currentState == JobStatus.RESTARTING) { final Throwable failureCause = this.failureCause; synchronized (progressLock) { if (LOG.isDebugEnabled()) { LOG.debug("Try to restart or fail the job {} ({}) if no longer possible.", getJobName(), getJobID(), failureCause); } else { LOG.info("Try to restart or fail the job {} ({}) if no longer possible.", getJobName(), getJobID()); } final boolean isFailureCauseAllowingRestart = !(failureCause instanceof SuppressRestartsException); final boolean isRestartStrategyAllowingRestart = restartStrategy.canRestart(); boolean isRestartable = isFailureCauseAllowingRestart && isRestartStrategyAllowingRestart; if (isRestartable && transitionState(currentState, JobStatus.RESTARTING)) { LOG.info("Restarting the job {} ({}).", getJobName(), getJobID()); RestartCallback restarter = new ExecutionGraphRestartCallback(this, globalModVersionForRestart); restartStrategy.restart(restarter, getJobMasterMainThreadExecutor()); return true; } else if (!isRestartable && transitionState(currentState, JobStatus.FAILED, failureCause)) { final String cause1 = isFailureCauseAllowingRestart ? null : "a type of SuppressRestartsException was thrown"; final String cause2 = isRestartStrategyAllowingRestart ? null : "the restart strategy prevented it"; LOG.info("Could not restart the job {} ({}) because {}.", getJobName(), getJobID(), StringUtils.concatenateWithAnd(cause1, cause2), failureCause); onTerminalState(JobStatus.FAILED); return true; } else { // we must have changed the state concurrently, thus we cannot complete this operation return false; } } } else { // this operation is only allowed in the state FAILING or RESTARTING return false; } }
Try to restart the job. If we cannot restart the job (e.g. no more restarts allowed), then try to fail the job. This operation is only permitted if the current state is FAILING or RESTARTING. @return true if the operation could be executed; false if a concurrent job status change occurred
public boolean updateState(TaskExecutionState state) { assertRunningInJobMasterMainThread(); final Execution attempt = currentExecutions.get(state.getID()); if (attempt != null) { try { Map<String, Accumulator<?, ?>> accumulators; switch (state.getExecutionState()) { case RUNNING: return attempt.switchToRunning(); case FINISHED: // this deserialization is exception-free accumulators = deserializeAccumulators(state); attempt.markFinished(accumulators, state.getIOMetrics()); return true; case CANCELED: // this deserialization is exception-free accumulators = deserializeAccumulators(state); attempt.completeCancelling(accumulators, state.getIOMetrics()); return true; case FAILED: // this deserialization is exception-free accumulators = deserializeAccumulators(state); attempt.markFailed(state.getError(userClassLoader), accumulators, state.getIOMetrics()); return true; default: // we mark as failed and return false, which triggers the TaskManager // to remove the task attempt.fail(new Exception("TaskManager sent illegal state update: " + state.getExecutionState())); return false; } } catch (Throwable t) { ExceptionUtils.rethrowIfFatalErrorOrOOM(t); // failures during updates leave the ExecutionGraph inconsistent failGlobal(t); return false; } } else { return false; } }
Updates the state of one of the ExecutionVertex's Execution attempts. If the new status if "FINISHED", this also updates the accumulators. @param state The state update. @return True, if the task update was properly applied, false, if the execution attempt was not found.
private Map<String, Accumulator<?, ?>> deserializeAccumulators(TaskExecutionState state) { AccumulatorSnapshot serializedAccumulators = state.getAccumulators(); if (serializedAccumulators != null) { try { return serializedAccumulators.deserializeUserAccumulators(userClassLoader); } catch (Throwable t) { // we catch Throwable here to include all form of linking errors that may // occur if user classes are missing in the classpath LOG.error("Failed to deserialize final accumulator results.", t); } } return null; }
Deserializes accumulators from a task state update. <p>This method never throws an exception! @param state The task execution state from which to deserialize the accumulators. @return The deserialized accumulators, of null, if there are no accumulators or an error occurred.
public void scheduleOrUpdateConsumers(ResultPartitionID partitionId) throws ExecutionGraphException { assertRunningInJobMasterMainThread(); final Execution execution = currentExecutions.get(partitionId.getProducerId()); if (execution == null) { throw new ExecutionGraphException("Cannot find execution for execution Id " + partitionId.getPartitionId() + '.'); } else if (execution.getVertex() == null){ throw new ExecutionGraphException("Execution with execution Id " + partitionId.getPartitionId() + " has no vertex assigned."); } else { execution.getVertex().scheduleOrUpdateConsumers(partitionId); } }
Schedule or updates consumers of the given result partition. @param partitionId specifying the result partition whose consumer shall be scheduled or updated @throws ExecutionGraphException if the schedule or update consumers operation could not be executed
public void updateAccumulators(AccumulatorSnapshot accumulatorSnapshot) { Map<String, Accumulator<?, ?>> userAccumulators; try { userAccumulators = accumulatorSnapshot.deserializeUserAccumulators(userClassLoader); ExecutionAttemptID execID = accumulatorSnapshot.getExecutionAttemptID(); Execution execution = currentExecutions.get(execID); if (execution != null) { execution.setAccumulators(userAccumulators); } else { LOG.debug("Received accumulator result for unknown execution {}.", execID); } } catch (Exception e) { LOG.error("Cannot update accumulators for job {}.", getJobID(), e); } }
Updates the accumulators during the runtime of a job. Final accumulator results are transferred through the UpdateTaskExecutionState message. @param accumulatorSnapshot The serialized flink and user-defined accumulators
private Set<AllocationID> computeAllPriorAllocationIds() { HashSet<AllocationID> allPreviousAllocationIds = new HashSet<>(getNumberOfExecutionJobVertices()); for (ExecutionVertex executionVertex : getAllExecutionVertices()) { AllocationID latestPriorAllocation = executionVertex.getLatestPriorAllocation(); if (latestPriorAllocation != null) { allPreviousAllocationIds.add(latestPriorAllocation); } } return allPreviousAllocationIds; }
Computes and returns a set with the prior allocation ids from all execution vertices in the graph.
private Set<AllocationID> computeAllPriorAllocationIdsIfRequiredByScheduling() { // This is a temporary optimization to avoid computing all previous allocations if not required // This can go away when we progress with the implementation of the Scheduler. if (slotProvider instanceof Scheduler && ((Scheduler) slotProvider).requiresPreviousExecutionGraphAllocations()) { return computeAllPriorAllocationIds(); } else { return Collections.emptySet(); } }
Returns the result of {@link #computeAllPriorAllocationIds()}, but only if the scheduling really requires it. Otherwise this method simply returns an empty set.
public static void clean(Object func, boolean checkSerializable) { if (func == null) { return; } final Class<?> cls = func.getClass(); // First find the field name of the "this$0" field, this can // be "this$x" depending on the nesting boolean closureAccessed = false; for (Field f: cls.getDeclaredFields()) { if (f.getName().startsWith("this$")) { // found a closure referencing field - now try to clean closureAccessed |= cleanThis0(func, cls, f.getName()); } } if (checkSerializable) { try { InstantiationUtil.serializeObject(func); } catch (Exception e) { String functionType = getSuperClassOrInterfaceName(func.getClass()); String msg = functionType == null ? (func + " is not serializable.") : ("The implementation of the " + functionType + " is not serializable."); if (closureAccessed) { msg += " The implementation accesses fields of its enclosing class, which is " + "a common reason for non-serializability. " + "A common solution is to make the function a proper (non-inner) class, or " + "a static inner class."; } else { msg += " The object probably contains or references non serializable fields."; } throw new InvalidProgramException(msg, e); } } }
Tries to clean the closure of the given object, if the object is a non-static inner class. @param func The object whose closure should be cleaned. @param checkSerializable Flag to indicate whether serializability should be checked after the closure cleaning attempt. @throws InvalidProgramException Thrown, if 'checkSerializable' is true, and the object was not serializable after the closure cleaning. @throws RuntimeException A RuntimeException may be thrown, if the code of the class could not be loaded, in order to process during the closure cleaning.
protected void privateDuplicate(TupleComparatorBase<T> toClone) { // copy fields and serializer factories this.keyPositions = toClone.keyPositions; this.serializers = new TypeSerializer[toClone.serializers.length]; for (int i = 0; i < toClone.serializers.length; i++) { this.serializers[i] = toClone.serializers[i].duplicate(); } this.comparators = new TypeComparator[toClone.comparators.length]; for (int i = 0; i < toClone.comparators.length; i++) { this.comparators[i] = toClone.comparators[i].duplicate(); } this.normalizedKeyLengths = toClone.normalizedKeyLengths; this.numLeadingNormalizableKeys = toClone.numLeadingNormalizableKeys; this.normalizableKeyPrefixLen = toClone.normalizableKeyPrefixLen; this.invertNormKey = toClone.invertNormKey; }
ScalaTupleComparator
protected final void instantiateDeserializationUtils() { this.deserializedFields1 = new Object[this.serializers.length]; this.deserializedFields2 = new Object[this.serializers.length]; for (int i = 0; i < this.serializers.length; i++) { this.deserializedFields1[i] = this.serializers[i].createInstance(); this.deserializedFields2[i] = this.serializers[i].createInstance(); } }
--------------------------------------------------------------------------------------------
public static RestartStrategy createRestartStrategy(RestartStrategies.RestartStrategyConfiguration restartStrategyConfiguration) { if (restartStrategyConfiguration instanceof RestartStrategies.NoRestartStrategyConfiguration) { return new NoRestartStrategy(); } else if (restartStrategyConfiguration instanceof RestartStrategies.FixedDelayRestartStrategyConfiguration) { RestartStrategies.FixedDelayRestartStrategyConfiguration fixedDelayConfig = (RestartStrategies.FixedDelayRestartStrategyConfiguration) restartStrategyConfiguration; return new FixedDelayRestartStrategy( fixedDelayConfig.getRestartAttempts(), fixedDelayConfig.getDelayBetweenAttemptsInterval().toMilliseconds()); } else if (restartStrategyConfiguration instanceof RestartStrategies.FailureRateRestartStrategyConfiguration) { RestartStrategies.FailureRateRestartStrategyConfiguration config = (RestartStrategies.FailureRateRestartStrategyConfiguration) restartStrategyConfiguration; return new FailureRateRestartStrategy( config.getMaxFailureRate(), config.getFailureInterval(), config.getDelayBetweenAttemptsInterval() ); } else if (restartStrategyConfiguration instanceof RestartStrategies.FallbackRestartStrategyConfiguration) { return null; } else { throw new IllegalArgumentException("Unknown restart strategy configuration " + restartStrategyConfiguration + "."); } }
Creates a {@link RestartStrategy} instance from the given {@link org.apache.flink.api.common.restartstrategy.RestartStrategies.RestartStrategyConfiguration}. @param restartStrategyConfiguration Restart strategy configuration which specifies which restart strategy to instantiate @return RestartStrategy instance
public static RestartStrategyFactory createRestartStrategyFactory(Configuration configuration) throws Exception { String restartStrategyName = configuration.getString(ConfigConstants.RESTART_STRATEGY, null); if (restartStrategyName == null) { // support deprecated ConfigConstants values final int numberExecutionRetries = configuration.getInteger(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, ConfigConstants.DEFAULT_EXECUTION_RETRIES); String pauseString = configuration.getString(AkkaOptions.WATCH_HEARTBEAT_PAUSE); String delayString = configuration.getString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, pauseString); long delay; try { delay = Duration.apply(delayString).toMillis(); } catch (NumberFormatException nfe) { if (delayString.equals(pauseString)) { throw new Exception("Invalid config value for " + AkkaOptions.WATCH_HEARTBEAT_PAUSE.key() + ": " + pauseString + ". Value must be a valid duration (such as '10 s' or '1 min')"); } else { throw new Exception("Invalid config value for " + ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY + ": " + delayString + ". Value must be a valid duration (such as '100 milli' or '10 s')"); } } if (numberExecutionRetries > 0 && delay >= 0) { return new FixedDelayRestartStrategy.FixedDelayRestartStrategyFactory(numberExecutionRetries, delay); } else { return new NoOrFixedIfCheckpointingEnabledRestartStrategyFactory(); } } switch (restartStrategyName.toLowerCase()) { case "none": case "off": case "disable": return NoRestartStrategy.createFactory(configuration); case "fixeddelay": case "fixed-delay": return FixedDelayRestartStrategy.createFactory(configuration); case "failurerate": case "failure-rate": return FailureRateRestartStrategy.createFactory(configuration); default: try { Class<?> clazz = Class.forName(restartStrategyName); if (clazz != null) { Method method = clazz.getMethod(CREATE_METHOD, Configuration.class); if (method != null) { Object result = method.invoke(null, configuration); if (result != null) { return (RestartStrategyFactory) result; } } } } catch (ClassNotFoundException cnfe) { LOG.warn("Could not find restart strategy class {}.", restartStrategyName); } catch (NoSuchMethodException nsme) { LOG.warn("Class {} does not has static method {}.", restartStrategyName, CREATE_METHOD); } catch (InvocationTargetException ite) { LOG.warn("Cannot call static method {} from class {}.", CREATE_METHOD, restartStrategyName); } catch (IllegalAccessException iae) { LOG.warn("Illegal access while calling method {} from class {}.", CREATE_METHOD, restartStrategyName); } // fallback in case of an error return new NoOrFixedIfCheckpointingEnabledRestartStrategyFactory(); } }
Creates a {@link RestartStrategy} instance from the given {@link Configuration}. @return RestartStrategy instance @throws Exception which indicates that the RestartStrategy could not be instantiated.
@PublicEvolving static <T extends Writable> TypeInformation<T> getWritableTypeInfo(Class<T> typeClass) { if (Writable.class.isAssignableFrom(typeClass) && !typeClass.equals(Writable.class)) { return new WritableTypeInfo<T>(typeClass); } else { throw new InvalidTypesException("The given class is no subclass of " + Writable.class.getName()); } }
--------------------------------------------------------------------------------------------
public static boolean shouldAutoCastTo(PrimitiveType from, PrimitiveType to) { InternalType[] castTypes = POSSIBLE_CAST_MAP.get(from); if (castTypes != null) { for (InternalType type : POSSIBLE_CAST_MAP.get(from)) { if (type.equals(to)) { return true; } } } return false; }
Returns whether this type should be automatically casted to the target type in an arithmetic operation.
@Override public void setup(TaskContext<CrossFunction<T1, T2, OT>, OT> context) { this.taskContext = context; this.running = true; }
------------------------------------------------------------------------
public static FileSystemFactory decorateIfLimited(FileSystemFactory factory, String scheme, Configuration config) { checkNotNull(factory, "factory"); final ConnectionLimitingSettings settings = ConnectionLimitingSettings.fromConfig(config, scheme); // decorate only if any limit is configured if (settings == null) { // no limit configured return factory; } else { return new ConnectionLimitingFactory(factory, settings); } }
Decorates the given factory for a {@code ConnectionLimitingFactory}, if the given configuration configured connection limiting for the given file system scheme. Otherwise, it returns the given factory as is. @param factory The factory to potentially decorate. @param scheme The file scheme for which to check the configuration. @param config The configuration @return The decorated factors, if connection limiting is configured, the original factory otherwise.
public static <T> void applyToAllWhileSuppressingExceptions( Iterable<T> inputs, ThrowingConsumer<T, ? extends Exception> throwingConsumer) throws Exception { if (inputs != null && throwingConsumer != null) { Exception exception = null; for (T input : inputs) { if (input != null) { try { throwingConsumer.accept(input); } catch (Exception ex) { exception = ExceptionUtils.firstOrSuppressed(ex, exception); } } } if (exception != null) { throw exception; } } }
This method supplies all elements from the input to the consumer. Exceptions that happen on elements are suppressed until all elements are processed. If exceptions happened for one or more of the inputs, they are reported in a combining suppressed exception. @param inputs iterator for all inputs to the throwingConsumer. @param throwingConsumer this consumer will be called for all elements delivered by the input iterator. @param <T> the type of input. @throws Exception collected exceptions that happened during the invocation of the consumer on the input elements.
public static <E extends Throwable> void withContextClassLoader( final ClassLoader cl, final ThrowingRunnable<E> r) throws E { try (TemporaryClassLoaderContext tmpCl = new TemporaryClassLoaderContext(cl)) { r.run(); } }
Runs the given runnable with the given ClassLoader as the thread's {@link Thread#setContextClassLoader(ClassLoader) context class loader}. <p>The method will make sure to set the context class loader of the calling thread back to what it was before after the runnable completed.
public static <R, E extends Throwable> R withContextClassLoader( final ClassLoader cl, final SupplierWithException<R, E> s) throws E { try (TemporaryClassLoaderContext tmpCl = new TemporaryClassLoaderContext(cl)) { return s.get(); } }
Runs the given runnable with the given ClassLoader as the thread's {@link Thread#setContextClassLoader(ClassLoader) context class loader}. <p>The method will make sure to set the context class loader of the calling thread back to what it was before after the runnable completed.
private ExecutorService createQueryExecutor() { ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Flink " + getServerName() + " Thread %d") .build(); return Executors.newFixedThreadPool(numQueryThreads, threadFactory); }
Creates a thread pool for the query execution. @return Thread pool for query execution
public void start() throws Throwable { Preconditions.checkState(serverAddress == null && serverShutdownFuture.get() == null, serverName + " is already running @ " + serverAddress + ". "); Iterator<Integer> portIterator = bindPortRange.iterator(); while (portIterator.hasNext() && !attemptToBind(portIterator.next())) {} if (serverAddress != null) { log.info("Started {} @ {}.", serverName, serverAddress); } else { log.info("Unable to start {}. All ports in provided range ({}) are occupied.", serverName, bindPortRange); throw new FlinkRuntimeException("Unable to start " + serverName + ". All ports in provided range are occupied."); } }
Starts the server by binding to the configured bind address (blocking). @throws Exception If something goes wrong during the bind operation.
private boolean attemptToBind(final int port) throws Throwable { log.debug("Attempting to start {} on port {}.", serverName, port); this.queryExecutor = createQueryExecutor(); this.handler = initializeHandler(); final NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads); final ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Flink " + serverName + " EventLoop Thread %d") .build(); final NioEventLoopGroup nioGroup = new NioEventLoopGroup(numEventLoopThreads, threadFactory); this.bootstrap = new ServerBootstrap() .localAddress(bindAddress, port) .group(nioGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.ALLOCATOR, bufferPool) .childOption(ChannelOption.ALLOCATOR, bufferPool) .childHandler(new ServerChannelInitializer<>(handler)); final int defaultHighWaterMark = 64 * 1024; // from DefaultChannelConfig (not exposed) //noinspection ConstantConditions // (ignore warning here to make this flexible in case the configuration values change) if (LOW_WATER_MARK > defaultHighWaterMark) { bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK); bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK); } else { // including (newHighWaterMark < defaultLowWaterMark) bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK); bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK); } try { final ChannelFuture future = bootstrap.bind().sync(); if (future.isSuccess()) { final InetSocketAddress localAddress = (InetSocketAddress) future.channel().localAddress(); serverAddress = new InetSocketAddress(localAddress.getAddress(), localAddress.getPort()); return true; } // the following throw is to bypass Netty's "optimization magic" // and catch the bind exception. // the exception is thrown by the sync() call above. throw future.cause(); } catch (BindException e) { log.debug("Failed to start {} on port {}: {}.", serverName, port, e.getMessage()); try { // we shutdown the server but we reset the future every time because in // case of failure to bind, we will call attemptToBind() here, and not resetting // the flag will interfere with future shutdown attempts. shutdownServer() .whenComplete((ignoredV, ignoredT) -> serverShutdownFuture.getAndSet(null)) .get(); } catch (Exception r) { // Here we were seeing this problem: // https://github.com/netty/netty/issues/4357 if we do a get(). // this is why we now simply wait a bit so that everything is shut down. log.warn("Problem while shutting down {}: {}", serverName, r.getMessage()); } } // any other type of exception we let it bubble up. return false; }
Tries to start the server at the provided port. <p>This, in conjunction with {@link #start()}, try to start the server on a free port among the port range provided at the constructor. @param port the port to try to bind the server to. @throws Exception If something goes wrong during the bind operation.
public CompletableFuture<Void> shutdownServer() { CompletableFuture<Void> shutdownFuture = new CompletableFuture<>(); if (serverShutdownFuture.compareAndSet(null, shutdownFuture)) { log.info("Shutting down {} @ {}", serverName, serverAddress); final CompletableFuture<Void> groupShutdownFuture = new CompletableFuture<>(); if (bootstrap != null) { EventLoopGroup group = bootstrap.group(); if (group != null && !group.isShutdown()) { group.shutdownGracefully(0L, 0L, TimeUnit.MILLISECONDS) .addListener(finished -> { if (finished.isSuccess()) { groupShutdownFuture.complete(null); } else { groupShutdownFuture.completeExceptionally(finished.cause()); } }); } else { groupShutdownFuture.complete(null); } } else { groupShutdownFuture.complete(null); } final CompletableFuture<Void> handlerShutdownFuture = new CompletableFuture<>(); if (handler == null) { handlerShutdownFuture.complete(null); } else { handler.shutdown().whenComplete((result, throwable) -> { if (throwable != null) { handlerShutdownFuture.completeExceptionally(throwable); } else { handlerShutdownFuture.complete(null); } }); } final CompletableFuture<Void> queryExecShutdownFuture = CompletableFuture.runAsync(() -> { if (queryExecutor != null) { ExecutorUtils.gracefulShutdown(10L, TimeUnit.MINUTES, queryExecutor); } }); CompletableFuture.allOf( queryExecShutdownFuture, groupShutdownFuture, handlerShutdownFuture ).whenComplete((result, throwable) -> { if (throwable != null) { shutdownFuture.completeExceptionally(throwable); } else { shutdownFuture.complete(null); } }); } return serverShutdownFuture.get(); }
Shuts down the server and all related thread pools. @return A {@link CompletableFuture} that will be completed upon termination of the shutdown process.
private static String replaceInvalidChars(String str) { char[] chars = null; final int strLen = str.length(); int pos = 0; for (int i = 0; i < strLen; i++) { final char c = str.charAt(i); switch (c) { case ' ': case '.': case ':': case ',': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '_'; break; default: if (chars != null) { chars[pos] = c; } pos++; } } return chars == null ? str : new String(chars, 0, pos); }
Lightweight method to replace unsupported characters. If the string does not contain any unsupported characters, this method creates no new string (and in fact no new objects at all). <p>Replacements: <ul> <li>{@code space : . ,} are replaced by {@code _} (underscore)</li> </ul>
public static MetricQueryService createMetricQueryService( RpcService rpcService, ResourceID resourceID, long maximumFrameSize) { String endpointId = resourceID == null ? METRIC_QUERY_SERVICE_NAME : METRIC_QUERY_SERVICE_NAME + "_" + resourceID.getResourceIdString(); return new MetricQueryService(rpcService, endpointId, maximumFrameSize); }
Starts the MetricQueryService actor in the given actor system. @param rpcService The rpcService running the MetricQueryService @param resourceID resource ID to disambiguate the actor name @return actor reference to the MetricQueryService
@Override protected V mergeState(V a, V b) throws Exception { return reduceTransformation.apply(a, b); }
------------------------------------------------------------------------
@Override public void channelRead0(ChannelHandlerContext ctx, RoutedRequest routedRequest) throws Exception { String requestPath = routedRequest.getPath(); respondWithFile(ctx, routedRequest.getRequest(), requestPath); }
------------------------------------------------------------------------
private void respondWithFile(ChannelHandlerContext ctx, HttpRequest request, String requestPath) throws IOException, ParseException { // make sure we request the "index.html" in case there is a directory request if (requestPath.endsWith("/")) { requestPath = requestPath + "index.html"; } if (!requestPath.contains(".")) { // we assume that the path ends in either .html or .js requestPath = requestPath + ".json"; } // convert to absolute path final File file = new File(rootPath, requestPath); if (!file.exists()) { // file does not exist. Try to load it with the classloader ClassLoader cl = HistoryServerStaticFileServerHandler.class.getClassLoader(); try (InputStream resourceStream = cl.getResourceAsStream("web" + requestPath)) { boolean success = false; try { if (resourceStream != null) { URL root = cl.getResource("web"); URL requested = cl.getResource("web" + requestPath); if (root != null && requested != null) { URI rootURI = new URI(root.getPath()).normalize(); URI requestedURI = new URI(requested.getPath()).normalize(); // Check that we don't load anything from outside of the // expected scope. if (!rootURI.relativize(requestedURI).equals(requestedURI)) { LOG.debug("Loading missing file from classloader: {}", requestPath); // ensure that directory to file exists. file.getParentFile().mkdirs(); Files.copy(resourceStream, file.toPath()); success = true; } } } } catch (Throwable t) { LOG.error("error while responding", t); } finally { if (!success) { LOG.debug("Unable to load requested file {} from classloader", requestPath); HandlerUtils.sendErrorResponse( ctx, request, new ErrorResponseBody("File not found."), NOT_FOUND, Collections.emptyMap()); return; } } } } if (!file.exists() || file.isHidden() || file.isDirectory() || !file.isFile()) { HandlerUtils.sendErrorResponse( ctx, request, new ErrorResponseBody("File not found."), NOT_FOUND, Collections.emptyMap()); return; } if (!file.getCanonicalFile().toPath().startsWith(rootPath.toPath())) { HandlerUtils.sendErrorResponse( ctx, request, new ErrorResponseBody("File not found."), NOT_FOUND, Collections.emptyMap()); return; } // cache validation final String ifModifiedSince = request.headers().get(IF_MODIFIED_SINCE); if (ifModifiedSince != null && !ifModifiedSince.isEmpty()) { SimpleDateFormat dateFormatter = new SimpleDateFormat(StaticFileServerHandler.HTTP_DATE_FORMAT, Locale.US); Date ifModifiedSinceDate = dateFormatter.parse(ifModifiedSince); // Only compare up to the second because the datetime format we send to the client // does not have milliseconds long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() / 1000; long fileLastModifiedSeconds = file.lastModified() / 1000; if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) { if (LOG.isDebugEnabled()) { LOG.debug("Responding 'NOT MODIFIED' for file '" + file.getAbsolutePath() + '\''); } StaticFileServerHandler.sendNotModified(ctx); return; } } if (LOG.isDebugEnabled()) { LOG.debug("Responding with file '" + file.getAbsolutePath() + '\''); } // Don't need to close this manually. Netty's DefaultFileRegion will take care of it. final RandomAccessFile raf; try { raf = new RandomAccessFile(file, "r"); } catch (FileNotFoundException e) { HandlerUtils.sendErrorResponse( ctx, request, new ErrorResponseBody("File not found."), NOT_FOUND, Collections.emptyMap()); return; } try { long fileLength = raf.length(); HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); StaticFileServerHandler.setContentTypeHeader(response, file); // the job overview should be updated as soon as possible if (!requestPath.equals("/joboverview.json")) { StaticFileServerHandler.setDateAndCacheHeaders(response, file); } if (HttpHeaders.isKeepAlive(request)) { response.headers().set(CONNECTION, HttpHeaders.Values.KEEP_ALIVE); } HttpHeaders.setContentLength(response, fileLength); // write the initial line and the header. ctx.write(response); // write the content. ChannelFuture lastContentFuture; if (ctx.pipeline().get(SslHandler.class) == null) { ctx.write(new DefaultFileRegion(raf.getChannel(), 0, fileLength), ctx.newProgressivePromise()); lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT); } else { lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)), ctx.newProgressivePromise()); // HttpChunkedInput will write the end marker (LastHttpContent) for us. } // close the connection, if no keep-alive is needed if (!HttpHeaders.isKeepAlive(request)) { lastContentFuture.addListener(ChannelFutureListener.CLOSE); } } catch (Exception e) { raf.close(); LOG.error("Failed to serve file.", e); HandlerUtils.sendErrorResponse( ctx, request, new ErrorResponseBody("Internal server error."), INTERNAL_SERVER_ERROR, Collections.emptyMap()); } }
Response when running with leading JobManager.
private static <T> void aggregateRoutes( String method, Map<PathPattern, T> routes, List<String> accMethods, List<String> accPatterns, List<String> accTargets) { for (Map.Entry<PathPattern, T> entry : routes.entrySet()) { accMethods.add(method); accPatterns.add("/" + entry.getKey().pattern()); accTargets.add(targetToString(entry.getValue())); } }
Helper for toString.
private static int maxLength(List<String> coll) { int max = 0; for (String e : coll) { int length = e.length(); if (length > max) { max = length; } } return max; }
Helper for toString.
private static String targetToString(Object target) { if (target instanceof Class) { return ((Class<?>) target).getName(); } else { return target.toString(); } }
Helper for toString. <p>For example, returns "io.netty.example.http.router.HttpRouterServerHandler" instead of "class io.netty.example.http.router.HttpRouterServerHandler"
public int size() { int ret = anyMethodRouter.size(); for (MethodlessRouter<T> router : routers.values()) { ret += router.size(); } return ret; }
Returns the number of routes in this router.
public Router<T> addRoute(HttpMethod method, String pathPattern, T target) { getMethodlessRouter(method).addRoute(pathPattern, target); return this; }
Add route. <p>A path pattern can only point to one target. This method does nothing if the pattern has already been added.
public void removePathPattern(String pathPattern) { for (MethodlessRouter<T> router : routers.values()) { router.removePathPattern(pathPattern); } anyMethodRouter.removePathPattern(pathPattern); }
Removes the route specified by the path pattern.
public RouteResult<T> route(HttpMethod method, String path) { return route(method, path, Collections.emptyMap()); }
If there's no match, returns the result with {@link #notFound(Object) notFound} as the target if it is set, otherwise returns {@code null}.
private String[] decodePathTokens(String uri) { // Need to split the original URI (instead of QueryStringDecoder#path) then decode the tokens (components), // otherwise /test1/123%2F456 will not match /test1/:p1 int qPos = uri.indexOf("?"); String encodedPath = (qPos >= 0) ? uri.substring(0, qPos) : uri; String[] encodedTokens = PathPattern.removeSlashesAtBothEnds(encodedPath).split("/"); String[] decodedTokens = new String[encodedTokens.length]; for (int i = 0; i < encodedTokens.length; i++) { String encodedToken = encodedTokens[i]; decodedTokens[i] = QueryStringDecoder.decodeComponent(encodedToken); } return decodedTokens; }
--------------------------------------------------------------------------
public Set<HttpMethod> allowedMethods(String uri) { QueryStringDecoder decoder = new QueryStringDecoder(uri); String[] tokens = PathPattern.removeSlashesAtBothEnds(decoder.path()).split("/"); if (anyMethodRouter.anyMatched(tokens)) { return allAllowedMethods(); } Set<HttpMethod> ret = new HashSet<HttpMethod>(routers.size()); for (Map.Entry<HttpMethod, MethodlessRouter<T>> entry : routers.entrySet()) { MethodlessRouter<T> router = entry.getValue(); if (router.anyMatched(tokens)) { HttpMethod method = entry.getKey(); ret.add(method); } } return ret; }
Returns allowed methods for a specific URI. <p>For {@code OPTIONS *}, use {@link #allAllowedMethods()} instead of this method.
public Set<HttpMethod> allAllowedMethods() { if (anyMethodRouter.size() > 0) { Set<HttpMethod> ret = new HashSet<HttpMethod>(9); ret.add(HttpMethod.CONNECT); ret.add(HttpMethod.DELETE); ret.add(HttpMethod.GET); ret.add(HttpMethod.HEAD); ret.add(HttpMethod.OPTIONS); ret.add(HttpMethod.PATCH); ret.add(HttpMethod.POST); ret.add(HttpMethod.PUT); ret.add(HttpMethod.TRACE); return ret; } else { return new HashSet<HttpMethod>(routers.keySet()); } }
Returns all methods that this router handles. For {@code OPTIONS *}.
public Router<T> addConnect(String path, T target) { return addRoute(HttpMethod.CONNECT, path, target); }
--------------------------------------------------------------------------
public static boolean isRunningInExpectedThread(@Nullable Thread expected) { Thread actual = Thread.currentThread(); if (expected != actual) { String violationMsg = "Violation of main thread constraint detected: expected <" + expected + "> but running in <" + actual + ">."; LOG.warn(violationMsg, new Exception(violationMsg)); return false; } return true; }
Returns true iff the current thread is equals to the provided expected thread and logs violations. @param expected the expected main thread. @return true iff the current thread is equals to the provided expected thread.
private TypeInformation<KEY> validateKeyType(TypeInformation<KEY> keyType) { Stack<TypeInformation<?>> stack = new Stack<>(); stack.push(keyType); List<TypeInformation<?>> unsupportedTypes = new ArrayList<>(); while (!stack.isEmpty()) { TypeInformation<?> typeInfo = stack.pop(); if (!validateKeyTypeIsHashable(typeInfo)) { unsupportedTypes.add(typeInfo); } if (typeInfo instanceof TupleTypeInfoBase) { for (int i = 0; i < typeInfo.getArity(); i++) { stack.push(((TupleTypeInfoBase) typeInfo).getTypeAt(i)); } } } if (!unsupportedTypes.isEmpty()) { throw new InvalidProgramException("Type " + keyType + " cannot be used as key. Contained " + "UNSUPPORTED key types: " + StringUtils.join(unsupportedTypes, ", ") + ". Look " + "at the keyBy() documentation for the conditions a type has to satisfy in order to be " + "eligible for a key."); } return keyType; }
Validates that a given type of element (as encoded by the provided {@link TypeInformation}) can be used as a key in the {@code DataStream.keyBy()} operation. This is done by searching depth-first the key type and checking if each of the composite types satisfies the required conditions (see {@link #validateKeyTypeIsHashable(TypeInformation)}). @param keyType The {@link TypeInformation} of the key.
private boolean validateKeyTypeIsHashable(TypeInformation<?> type) { try { return (type instanceof PojoTypeInfo) ? !type.getTypeClass().getMethod("hashCode").getDeclaringClass().equals(Object.class) : !(type instanceof PrimitiveArrayTypeInfo || type instanceof BasicArrayTypeInfo || type instanceof ObjectArrayTypeInfo); } catch (NoSuchMethodException ignored) { // this should never happen as we are just searching for the hashCode() method. } return false; }
Validates that a given type of element (as encoded by the provided {@link TypeInformation}) can be used as a key in the {@code DataStream.keyBy()} operation. @param type The {@link TypeInformation} of the type to check. @return {@code false} if: <ol> <li>it is a POJO type but does not override the {@link #hashCode()} method and relies on the {@link Object#hashCode()} implementation.</li> <li>it is an array of any type (see {@link PrimitiveArrayTypeInfo}, {@link BasicArrayTypeInfo}, {@link ObjectArrayTypeInfo}).</li> </ol>, {@code true} otherwise.
@Override @PublicEvolving public <R> SingleOutputStreamOperator<R> transform(String operatorName, TypeInformation<R> outTypeInfo, OneInputStreamOperator<T, R> operator) { SingleOutputStreamOperator<R> returnStream = super.transform(operatorName, outTypeInfo, operator); // inject the key selector and key type OneInputTransformation<T, R> transform = (OneInputTransformation<T, R>) returnStream.getTransformation(); transform.setStateKeySelector(keySelector); transform.setStateKeyType(keyType); return returnStream; }
------------------------------------------------------------------------
@Deprecated @Override @Internal public <R> SingleOutputStreamOperator<R> process( ProcessFunction<T, R> processFunction, TypeInformation<R> outputType) { LegacyKeyedProcessOperator<KEY, T, R> operator = new LegacyKeyedProcessOperator<>(clean(processFunction)); return transform("Process", outputType, operator); }
Applies the given {@link ProcessFunction} on the input stream, thereby creating a transformed output stream. <p>The function will be called for every element in the input streams and can produce zero or more output elements. Contrary to the {@link DataStream#flatMap(FlatMapFunction)} function, this function can also query the time and set timers. When reacting to the firing of set timers the function can directly emit elements and/or register yet more timers. @param processFunction The {@link ProcessFunction} that is called for each element in the stream. @param outputType {@link TypeInformation} for the result type of the function. @param <R> The type of elements emitted by the {@code ProcessFunction}. @return The transformed {@link DataStream}. @deprecated Use {@link KeyedStream#process(KeyedProcessFunction, TypeInformation)}
@PublicEvolving public <R> SingleOutputStreamOperator<R> process(KeyedProcessFunction<KEY, T, R> keyedProcessFunction) { TypeInformation<R> outType = TypeExtractor.getUnaryOperatorReturnType( keyedProcessFunction, KeyedProcessFunction.class, 1, 2, TypeExtractor.NO_INDEX, getType(), Utils.getCallLocationName(), true); return process(keyedProcessFunction, outType); }
Applies the given {@link KeyedProcessFunction} on the input stream, thereby creating a transformed output stream. <p>The function will be called for every element in the input streams and can produce zero or more output elements. Contrary to the {@link DataStream#flatMap(FlatMapFunction)} function, this function can also query the time and set timers. When reacting to the firing of set timers the function can directly emit elements and/or register yet more timers. @param keyedProcessFunction The {@link KeyedProcessFunction} that is called for each element in the stream. @param <R> The type of elements emitted by the {@code KeyedProcessFunction}. @return The transformed {@link DataStream}.
@Internal public <R> SingleOutputStreamOperator<R> process( KeyedProcessFunction<KEY, T, R> keyedProcessFunction, TypeInformation<R> outputType) { KeyedProcessOperator<KEY, T, R> operator = new KeyedProcessOperator<>(clean(keyedProcessFunction)); return transform("KeyedProcess", outputType, operator); }
Applies the given {@link KeyedProcessFunction} on the input stream, thereby creating a transformed output stream. <p>The function will be called for every element in the input streams and can produce zero or more output elements. Contrary to the {@link DataStream#flatMap(FlatMapFunction)} function, this function can also query the time and set timers. When reacting to the firing of set timers the function can directly emit elements and/or register yet more timers. @param keyedProcessFunction The {@link KeyedProcessFunction} that is called for each element in the stream. @param outputType {@link TypeInformation} for the result type of the function. @param <R> The type of elements emitted by the {@code KeyedProcessFunction}. @return The transformed {@link DataStream}.
@PublicEvolving public <T1> IntervalJoin<T, T1, KEY> intervalJoin(KeyedStream<T1, KEY> otherStream) { return new IntervalJoin<>(this, otherStream); }
Join elements of this {@link KeyedStream} with elements of another {@link KeyedStream} over a time interval that can be specified with {@link IntervalJoin#between(Time, Time)}. @param otherStream The other keyed stream to join this keyed stream with @param <T1> Type parameter of elements in the other stream @return An instance of {@link IntervalJoin} with this keyed stream and the other keyed stream
public WindowedStream<T, KEY, TimeWindow> timeWindow(Time size) { if (environment.getStreamTimeCharacteristic() == TimeCharacteristic.ProcessingTime) { return window(TumblingProcessingTimeWindows.of(size)); } else { return window(TumblingEventTimeWindows.of(size)); } }
Windows this {@code KeyedStream} into tumbling time windows. <p>This is a shortcut for either {@code .window(TumblingEventTimeWindows.of(size))} or {@code .window(TumblingProcessingTimeWindows.of(size))} depending on the time characteristic set using {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)} @param size The size of the window.
public WindowedStream<T, KEY, TimeWindow> timeWindow(Time size, Time slide) { if (environment.getStreamTimeCharacteristic() == TimeCharacteristic.ProcessingTime) { return window(SlidingProcessingTimeWindows.of(size, slide)); } else { return window(SlidingEventTimeWindows.of(size, slide)); } }
Windows this {@code KeyedStream} into sliding time windows. <p>This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or {@code .window(SlidingProcessingTimeWindows.of(size, slide))} depending on the time characteristic set using {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)} @param size The size of the window.
public WindowedStream<T, KEY, GlobalWindow> countWindow(long size) { return window(GlobalWindows.create()).trigger(PurgingTrigger.of(CountTrigger.of(size))); }
Windows this {@code KeyedStream} into tumbling count windows. @param size The size of the windows in number of elements.
public WindowedStream<T, KEY, GlobalWindow> countWindow(long size, long slide) { return window(GlobalWindows.create()) .evictor(CountEvictor.of(size)) .trigger(CountTrigger.of(slide)); }
Windows this {@code KeyedStream} into sliding count windows. @param size The size of the windows in number of elements. @param slide The slide interval in number of elements.
@PublicEvolving public <W extends Window> WindowedStream<T, KEY, W> window(WindowAssigner<? super T, W> assigner) { return new WindowedStream<>(this, assigner); }
Windows this data stream to a {@code WindowedStream}, which evaluates windows over a key grouped stream. Elements are put into windows by a {@link WindowAssigner}. The grouping of elements is done both by key and by window. <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger} that is used if a {@code Trigger} is not specified. @param assigner The {@code WindowAssigner} that assigns elements to windows. @return The trigger windows data stream.
public SingleOutputStreamOperator<T> reduce(ReduceFunction<T> reducer) { return transform("Keyed Reduce", getType(), new StreamGroupedReduce<T>( clean(reducer), getType().createSerializer(getExecutionConfig()))); }
Applies a reduce transformation on the grouped data stream grouped on by the given key position. The {@link ReduceFunction} will receive input values based on the key value. Only input values with the same key will go to the same reducer. @param reducer The {@link ReduceFunction} that will be called for every element of the input values with the same key. @return The transformed DataStream.
@Deprecated public <R> SingleOutputStreamOperator<R> fold(R initialValue, FoldFunction<T, R> folder) { TypeInformation<R> outType = TypeExtractor.getFoldReturnTypes( clean(folder), getType(), Utils.getCallLocationName(), true); return transform("Keyed Fold", outType, new StreamGroupedFold<>(clean(folder), initialValue)); }
Applies a fold transformation on the grouped data stream grouped on by the given key position. The {@link FoldFunction} will receive input values based on the key value. Only input values with the same key will go to the same folder. @param folder The {@link FoldFunction} that will be called for every element of the input values with the same key. @param initialValue The initialValue passed to the folders for each key. @return The transformed DataStream. @deprecated will be removed in a future version
public SingleOutputStreamOperator<T> maxBy(String field, boolean first) { return aggregate(new ComparableAggregator<>(field, getType(), AggregationFunction.AggregationType.MAXBY, first, getExecutionConfig())); }
Applies an aggregation that gives the current maximum element of the data stream by the given field expression by the given key. An independent aggregate is kept per key. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}'s underlying type. A dot can be used to drill down into objects, as in {@code "field1.fieldxy" }. @param field In case of a POJO, Scala case class, or Tuple type, the name of the (public) field on which to perform the aggregation. Additionally, a dot can be used to drill down into nested objects, as in {@code "field1.fieldxy" }. Furthermore "*" can be specified in case of a basic type (which is considered as having only one field). @param first If True then in case of field equality the first object will be returned @return The transformed DataStream.
@PublicEvolving public QueryableStateStream<KEY, T> asQueryableState(String queryableStateName) { ValueStateDescriptor<T> valueStateDescriptor = new ValueStateDescriptor<T>( UUID.randomUUID().toString(), getType()); return asQueryableState(queryableStateName, valueStateDescriptor); }
Publishes the keyed stream as queryable ValueState instance. @param queryableStateName Name under which to the publish the queryable state instance @return Queryable state instance
@PublicEvolving public QueryableStateStream<KEY, T> asQueryableState( String queryableStateName, ValueStateDescriptor<T> stateDescriptor) { transform("Queryable state: " + queryableStateName, getType(), new QueryableValueStateOperator<>(queryableStateName, stateDescriptor)); stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig()); return new QueryableStateStream<>( queryableStateName, stateDescriptor, getKeyType().createSerializer(getExecutionConfig())); }
Publishes the keyed stream as a queryable ValueState instance. @param queryableStateName Name under which to the publish the queryable state instance @param stateDescriptor State descriptor to create state instance from @return Queryable state instance
@PublicEvolving @Deprecated public <ACC> QueryableStateStream<KEY, ACC> asQueryableState( String queryableStateName, FoldingStateDescriptor<T, ACC> stateDescriptor) { transform("Queryable state: " + queryableStateName, getType(), new QueryableAppendingStateOperator<>(queryableStateName, stateDescriptor)); stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig()); return new QueryableStateStream<>( queryableStateName, stateDescriptor, getKeyType().createSerializer(getExecutionConfig())); }
Publishes the keyed stream as a queryable FoldingState instance. @param queryableStateName Name under which to the publish the queryable state instance @param stateDescriptor State descriptor to create state instance from @return Queryable state instance @deprecated will be removed in a future version
@PublicEvolving public QueryableStateStream<KEY, T> asQueryableState( String queryableStateName, ReducingStateDescriptor<T> stateDescriptor) { transform("Queryable state: " + queryableStateName, getType(), new QueryableAppendingStateOperator<>(queryableStateName, stateDescriptor)); stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig()); return new QueryableStateStream<>( queryableStateName, stateDescriptor, getKeyType().createSerializer(getExecutionConfig())); }
Publishes the keyed stream as a queryable ReducingState instance. @param queryableStateName Name under which to the publish the queryable state instance @param stateDescriptor State descriptor to create state instance from @return Queryable state instance
public static Throwable get(Throwable serThrowable, ClassLoader loader) { if (serThrowable instanceof SerializedThrowable) { return ((SerializedThrowable) serThrowable).deserializeError(loader); } else { return serThrowable; } }
------------------------------------------------------------------------
public static Environment parse(URL url) throws IOException { try { return new ConfigUtil.LowerCaseYamlMapper().readValue(url, Environment.class); } catch (JsonMappingException e) { throw new SqlClientException("Could not parse environment file. Cause: " + e.getMessage()); } }
Parses an environment file from an URL.
public static Environment parse(String content) throws IOException { try { return new ConfigUtil.LowerCaseYamlMapper().readValue(content, Environment.class); } catch (JsonMappingException e) { throw new SqlClientException("Could not parse environment file. Cause: " + e.getMessage()); } }
Parses an environment file from an String.
public static Environment merge(Environment env1, Environment env2) { final Environment mergedEnv = new Environment(); // merge tables final Map<String, TableEntry> tables = new LinkedHashMap<>(env1.getTables()); tables.putAll(env2.getTables()); mergedEnv.tables = tables; // merge functions final Map<String, FunctionEntry> functions = new HashMap<>(env1.getFunctions()); functions.putAll(env2.getFunctions()); mergedEnv.functions = functions; // merge execution properties mergedEnv.execution = ExecutionEntry.merge(env1.getExecution(), env2.getExecution()); // merge deployment properties mergedEnv.deployment = DeploymentEntry.merge(env1.getDeployment(), env2.getDeployment()); return mergedEnv; }
Merges two environments. The properties of the first environment might be overwritten by the second one.
public static Environment enrich( Environment env, Map<String, String> properties, Map<String, ViewEntry> views) { final Environment enrichedEnv = new Environment(); // merge tables enrichedEnv.tables = new LinkedHashMap<>(env.getTables()); enrichedEnv.tables.putAll(views); // merge functions enrichedEnv.functions = new HashMap<>(env.getFunctions()); // enrich execution properties enrichedEnv.execution = ExecutionEntry.enrich(env.execution, properties); // enrich deployment properties enrichedEnv.deployment = DeploymentEntry.enrich(env.deployment, properties); return enrichedEnv; }
Enriches an environment with new/modified properties or views and returns the new instance.
private void internalWriteOuterSnapshot(DataOutputView out) throws IOException { out.writeInt(MAGIC_NUMBER); out.writeInt(getCurrentOuterSnapshotVersion()); writeOuterSnapshot(out); }
------------------------------------------------------------------------------------------
void add(long value) { if (value >= 0) { if (count > 0) { min = Math.min(min, value); max = Math.max(max, value); } else { min = value; max = value; } count++; sum += value; } }
Adds the value to the stats if it is >= 0. @param value Value to add for min/max/avg stats..