code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
@Override public boolean tryAssignPayload(Payload payload) { Preconditions.checkNotNull(payload); // check that we can actually run in this slot if (isCanceled()) { return false; } // atomically assign the vertex if (!PAYLOAD_UPDATER.compareAndSet(this, null, payload)) { return false; } // we need to do a double check that we were not cancelled in the meantime if (isCanceled()) { this.payload = null; return false; } return true; }
Atomically sets the executed vertex, if no vertex has been assigned to this slot so far. @param payload The vertex to assign to this slot. @return True, if the vertex was assigned, false, otherwise.
@Override public CompletableFuture<?> releaseSlot(@Nullable Throwable cause) { if (!isCanceled()) { final CompletableFuture<?> terminationFuture; if (payload != null) { // trigger the failure of the slot payload payload.fail(cause != null ? cause : new FlinkException("TaskManager was lost/killed: " + getTaskManagerLocation())); // wait for the termination of the payload before releasing the slot terminationFuture = payload.getTerminalStateFuture(); } else { terminationFuture = CompletableFuture.completedFuture(null); } terminationFuture.whenComplete( (Object ignored, Throwable throwable) -> { // release directly (if we are directly allocated), // otherwise release through the parent shared slot if (getParent() == null) { // we have to give back the slot to the owning instance if (markCancelled()) { try { getOwner().returnLogicalSlot(this); releaseFuture.complete(null); } catch (Exception e) { releaseFuture.completeExceptionally(e); } } } else { // we have to ask our parent to dispose us getParent().releaseChild(this); releaseFuture.complete(null); } }); } return releaseFuture; }
------------------------------------------------------------------------
public static ScopeFormats fromConfig(Configuration config) { String jmFormat = config.getString(MetricOptions.SCOPE_NAMING_JM); String jmJobFormat = config.getString(MetricOptions.SCOPE_NAMING_JM_JOB); String tmFormat = config.getString(MetricOptions.SCOPE_NAMING_TM); String tmJobFormat = config.getString(MetricOptions.SCOPE_NAMING_TM_JOB); String taskFormat = config.getString(MetricOptions.SCOPE_NAMING_TASK); String operatorFormat = config.getString(MetricOptions.SCOPE_NAMING_OPERATOR); return new ScopeFormats(jmFormat, jmJobFormat, tmFormat, tmJobFormat, taskFormat, operatorFormat); }
Creates the scope formats as defined in the given configuration. @param config The configuration that defines the formats @return The ScopeFormats parsed from the configuration
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); // kryoRegistrations may be null if this value serializer is deserialized from an old version if (kryoRegistrations == null) { this.kryoRegistrations = asKryoRegistrations(type); } }
--------------------------------------------------------------------------------------------
private static void initDefaultsFromConfiguration(Configuration configuration) { final long to = configuration.getLong(ConfigConstants.FS_STREAM_OPENING_TIMEOUT_KEY, ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT); if (to < 0) { LOG.error("Invalid timeout value for filesystem stream opening: " + to + ". Using default value of " + ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT); DEFAULT_OPENING_TIMEOUT = ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT; } else if (to == 0) { DEFAULT_OPENING_TIMEOUT = 300000; // 5 minutes } else { DEFAULT_OPENING_TIMEOUT = to; } }
Initialize defaults for input format. Needs to be a static method because it is configured for local cluster execution. @param configuration The configuration to load defaults from
public static void registerInflaterInputStreamFactory(String fileExtension, InflaterInputStreamFactory<?> factory) { synchronized (INFLATER_INPUT_STREAM_FACTORIES) { if (INFLATER_INPUT_STREAM_FACTORIES.put(fileExtension, factory) != null) { LOG.warn("Overwriting an existing decompression algorithm for \"{}\" files.", fileExtension); } } }
Registers a decompression algorithm through a {@link org.apache.flink.api.common.io.compression.InflaterInputStreamFactory} with a file extension for transparent decompression. @param fileExtension of the compressed files @param factory to create an {@link java.util.zip.InflaterInputStream} that handles the decompression format
protected static String extractFileExtension(String fileName) { checkNotNull(fileName); int lastPeriodIndex = fileName.lastIndexOf('.'); if (lastPeriodIndex < 0){ return null; } else { return fileName.substring(lastPeriodIndex + 1); } }
Returns the extension of a file name (!= a path). @return the extension of the file name or {@code null} if there is no extension.
public Path[] getFilePaths() { if (supportsMultiPaths()) { if (this.filePaths == null) { return new Path[0]; } return this.filePaths; } else { if (this.filePath == null) { return new Path[0]; } return new Path[] {filePath}; } }
Returns the paths of all files to be read by the FileInputFormat. @return The list of all paths to read.
public void setFilePaths(String... filePaths) { Path[] paths = new Path[filePaths.length]; for (int i = 0; i < paths.length; i++) { paths[i] = new Path(filePaths[i]); } setFilePaths(paths); }
Sets multiple paths of files to be read. @param filePaths The paths of the files to read.
public void setFilePaths(Path... filePaths) { if (!supportsMultiPaths() && filePaths.length > 1) { throw new UnsupportedOperationException( "Multiple paths are not supported by this FileInputFormat."); } if (filePaths.length < 1) { throw new IllegalArgumentException("At least one file path must be specified."); } if (filePaths.length == 1) { // set for backwards compatibility this.filePath = filePaths[0]; } else { // clear file path in case it had been set before this.filePath = null; } this.filePaths = filePaths; }
Sets multiple paths of files to be read. @param filePaths The paths of the files to read.
@Override public void configure(Configuration parameters) { if (getFilePaths().length == 0) { // file path was not specified yet. Try to set it from the parameters. String filePath = parameters.getString(FILE_PARAMETER_KEY, null); if (filePath == null) { throw new IllegalArgumentException("File path was not specified in input format or configuration."); } else { setFilePath(filePath); } } if (!this.enumerateNestedFiles) { this.enumerateNestedFiles = parameters.getBoolean(ENUMERATE_NESTED_FILES_FLAG, false); } }
Configures the file input format by reading the file path from the configuration. @see org.apache.flink.api.common.io.InputFormat#configure(org.apache.flink.configuration.Configuration)
@Override public FileBaseStatistics getStatistics(BaseStatistics cachedStats) throws IOException { final FileBaseStatistics cachedFileStats = cachedStats instanceof FileBaseStatistics ? (FileBaseStatistics) cachedStats : null; try { return getFileStats(cachedFileStats, getFilePaths(), new ArrayList<>(getFilePaths().length)); } catch (IOException ioex) { if (LOG.isWarnEnabled()) { LOG.warn("Could not determine statistics for paths '" + Arrays.toString(getFilePaths()) + "' due to an io error: " + ioex.getMessage()); } } catch (Throwable t) { if (LOG.isErrorEnabled()) { LOG.error("Unexpected problem while getting the file statistics for paths '" + Arrays.toString(getFilePaths()) + "': " + t.getMessage(), t); } } // no statistics available return null; }
Obtains basic file statistics containing only file size. If the input is a directory, then the size is the sum of all contained files. @see org.apache.flink.api.common.io.InputFormat#getStatistics(org.apache.flink.api.common.io.statistics.BaseStatistics)
private long addFilesInDir(Path path, List<FileStatus> files, boolean logExcludedFiles) throws IOException { final FileSystem fs = path.getFileSystem(); long length = 0; for(FileStatus dir: fs.listStatus(path)) { if (dir.isDir()) { if (acceptFile(dir) && enumerateNestedFiles) { length += addFilesInDir(dir.getPath(), files, logExcludedFiles); } else { if (logExcludedFiles && LOG.isDebugEnabled()) { LOG.debug("Directory "+dir.getPath().toString()+" did not pass the file-filter and is excluded."); } } } else { if(acceptFile(dir)) { files.add(dir); length += dir.getLen(); testForUnsplittable(dir); } else { if (logExcludedFiles && LOG.isDebugEnabled()) { LOG.debug("Directory "+dir.getPath().toString()+" did not pass the file-filter and is excluded."); } } } } return length; }
Enumerate all files in the directory and recursive if enumerateNestedFiles is true. @return the total length of accepted files.
@Override public void open(FileInputSplit fileSplit) throws IOException { this.currentSplit = fileSplit; this.splitStart = fileSplit.getStart(); this.splitLength = fileSplit.getLength(); if (LOG.isDebugEnabled()) { LOG.debug("Opening input split " + fileSplit.getPath() + " [" + this.splitStart + "," + this.splitLength + "]"); } // open the split in an asynchronous thread final InputSplitOpenThread isot = new InputSplitOpenThread(fileSplit, this.openTimeout); isot.start(); try { this.stream = isot.waitForCompletion(); this.stream = decorateInputStream(this.stream, fileSplit); } catch (Throwable t) { throw new IOException("Error opening the Input Split " + fileSplit.getPath() + " [" + splitStart + "," + splitLength + "]: " + t.getMessage(), t); } // get FSDataInputStream if (this.splitStart != 0) { this.stream.seek(this.splitStart); } }
Opens an input stream to the file defined in the input format. The stream is positioned at the beginning of the given split. <p> The stream is actually opened in an asynchronous thread to make sure any interruptions to the thread working on the input format do not reach the file system.
protected FSDataInputStream decorateInputStream(FSDataInputStream inputStream, FileInputSplit fileSplit) throws Throwable { // Wrap stream in a extracting (decompressing) stream if file ends with a known compression file extension. InflaterInputStreamFactory<?> inflaterInputStreamFactory = getInflaterInputStreamFactory(fileSplit.getPath()); if (inflaterInputStreamFactory != null) { return new InputStreamFSInputWrapper(inflaterInputStreamFactory.create(stream)); } return inputStream; }
This method allows to wrap/decorate the raw {@link FSDataInputStream} for a certain file split, e.g., for decoding. When overriding this method, also consider adapting {@link FileInputFormat#testForUnsplittable} if your stream decoration renders the input file unsplittable. Also consider calling existing superclass implementations. @param inputStream is the input stream to decorated @param fileSplit is the file split for which the input stream shall be decorated @return the decorated input stream @throws Throwable if the decoration fails @see org.apache.flink.api.common.io.InputStreamFSInputWrapper
public synchronized void addOpenChannels(List<FileIOChannel> toOpen) { checkArgument(!closed); for (FileIOChannel channel : toOpen) { openChannels.add(channel); channels.remove(channel.getChannelID()); } }
Open File channels.
public void open(Context<K, W> ctx) throws Exception { this.ctx = ctx; this.windowAssigner.open(ctx); }
Initialization method for the function. It is called before the actual working methods.
@PublicEvolving public AllWindowedStream<T, W> trigger(Trigger<? super T, ? super W> trigger) { if (windowAssigner instanceof MergingWindowAssigner && !trigger.canMerge()) { throw new UnsupportedOperationException("A merging window assigner cannot be used with a trigger that does not support merging."); } this.trigger = trigger; return this; }
Sets the {@code Trigger} that should be used to trigger window emission.
@PublicEvolving public AllWindowedStream<T, W> allowedLateness(Time lateness) { final long millis = lateness.toMilliseconds(); checkArgument(millis >= 0, "The allowed lateness cannot be negative."); this.allowedLateness = millis; return this; }
Sets the time by which elements are allowed to be late. Elements that arrive behind the watermark by more than the specified time will be dropped. By default, the allowed lateness is {@code 0L}. <p>Setting an allowed lateness is only valid for event-time windows.
@PublicEvolving public AllWindowedStream<T, W> sideOutputLateData(OutputTag<T> outputTag) { Preconditions.checkNotNull(outputTag, "Side output tag must not be null."); this.lateDataOutputTag = input.getExecutionEnvironment().clean(outputTag); return this; }
Send late arriving data to the side output identified by the given {@link OutputTag}. Data is considered late after the watermark has passed the end of the window plus the allowed lateness set using {@link #allowedLateness(Time)}. <p>You can get the stream of late data using {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the windowed operation with the same {@link OutputTag}.
@PublicEvolving public AllWindowedStream<T, W> evictor(Evictor<? super T, ? super W> evictor) { if (windowAssigner instanceof BaseAlignedWindowAssigner) { throw new UnsupportedOperationException("Cannot use a " + windowAssigner.getClass().getSimpleName() + " with an Evictor."); } this.evictor = evictor; return this; }
Sets the {@code Evictor} that should be used to evict elements from a window before emission. <p>Note: When using an evictor window performance will degrade significantly, since incremental aggregation of window results cannot be used.
@SuppressWarnings("unchecked") public SingleOutputStreamOperator<T> reduce(ReduceFunction<T> function) { if (function instanceof RichFunction) { throw new UnsupportedOperationException("ReduceFunction of reduce can not be a RichFunction. " + "Please use reduce(ReduceFunction, WindowFunction) instead."); } //clean the closure function = input.getExecutionEnvironment().clean(function); String callLocation = Utils.getCallLocationName(); String udfName = "AllWindowedStream." + callLocation; return reduce(function, new PassThroughAllWindowFunction<W, T>()); }
Applies a reduce function to the window. The window function is called for each evaluation of the window for each key individually. The output of the reduce function is interpreted as a regular non-windowed stream. <p>This window will try and incrementally aggregate data as much as the window policies permit. For example, tumbling time windows can aggregate the data, meaning that only one element per key is stored. Sliding time windows will aggregate on the granularity of the slide interval, so a few elements are stored per key (one per slide interval). Custom windows may not be able to incrementally aggregate, or may need to store extra values in an aggregation tree. @param function The reduce function. @return The data stream that is the result of applying the reduce function to the window.
@PublicEvolving public <R> SingleOutputStreamOperator<R> reduce( ReduceFunction<T> reduceFunction, AllWindowFunction<T, R, W> function) { TypeInformation<T> inType = input.getType(); TypeInformation<R> resultType = getAllWindowFunctionReturnType(function, inType); return reduce(reduceFunction, function, resultType); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given reducer. @param reduceFunction The reduce function that is used for incremental aggregation. @param function The window function. @return The data stream that is the result of applying the window function to the window.
@PublicEvolving public <R> SingleOutputStreamOperator<R> reduce( ReduceFunction<T> reduceFunction, ProcessAllWindowFunction<T, R, W> function) { TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(function, input.getType()); return reduce(reduceFunction, function, resultType); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given reducer. @param reduceFunction The reduce function that is used for incremental aggregation. @param function The process window function. @return The data stream that is the result of applying the window function to the window.
@PublicEvolving public <ACC, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, R> function) { checkNotNull(function, "function"); if (function instanceof RichFunction) { throw new UnsupportedOperationException("This aggregation function cannot be a RichFunction."); } TypeInformation<ACC> accumulatorType = TypeExtractor.getAggregateFunctionAccumulatorType( function, input.getType(), null, false); TypeInformation<R> resultType = TypeExtractor.getAggregateFunctionReturnType( function, input.getType(), null, false); return aggregate(function, accumulatorType, resultType); }
Applies the given {@code AggregateFunction} to each window. The AggregateFunction aggregates all elements of a window into a single result element. The stream of these result elements (one per window) is interpreted as a regular non-windowed stream. @param function The aggregation function. @return The data stream that is the result of applying the fold function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <R> The type of the elements in the resulting stream, equal to the AggregateFunction's result type
@PublicEvolving public <ACC, V, R> SingleOutputStreamOperator<R> aggregate( AggregateFunction<T, ACC, V> aggFunction, ProcessAllWindowFunction<V, R, W> windowFunction) { checkNotNull(aggFunction, "aggFunction"); checkNotNull(windowFunction, "windowFunction"); TypeInformation<ACC> accumulatorType = TypeExtractor.getAggregateFunctionAccumulatorType( aggFunction, input.getType(), null, false); TypeInformation<V> aggResultType = TypeExtractor.getAggregateFunctionReturnType( aggFunction, input.getType(), null, false); TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(windowFunction, aggResultType); return aggregate(aggFunction, windowFunction, accumulatorType, aggResultType, resultType); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given aggregate function. This means that the window function typically has only a single value to process when called. @param aggFunction The aggregate function that is used for incremental aggregation. @param windowFunction The process window function. @return The data stream that is the result of applying the window function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <V> The type of AggregateFunction's result, and the WindowFunction's input @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's result type
@PublicEvolving public <ACC, V, R> SingleOutputStreamOperator<R> aggregate( AggregateFunction<T, ACC, V> aggregateFunction, ProcessAllWindowFunction<V, R, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) { checkNotNull(aggregateFunction, "aggregateFunction"); checkNotNull(windowFunction, "windowFunction"); checkNotNull(accumulatorType, "accumulatorType"); checkNotNull(aggregateResultType, "aggregateResultType"); checkNotNull(resultType, "resultType"); if (aggregateFunction instanceof RichFunction) { throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction."); } //clean the closures windowFunction = input.getExecutionEnvironment().clean(windowFunction); aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction); final String callLocation = Utils.getCallLocationName(); final String udfName = "AllWindowedStream." + callLocation; final String opName; final KeySelector<T, Byte> keySel = input.getKeySelector(); OneInputStreamOperator<T, R> operator; if (evictor != null) { @SuppressWarnings({"unchecked", "rawtypes"}) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer( input.getType().createSerializer(getExecutionEnvironment().getConfig())); ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")"; operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalAggregateProcessAllWindowFunction<>(aggregateFunction, windowFunction), trigger, evictor, allowedLateness, lateDataOutputTag); } else { AggregatingStateDescriptor<T, ACC, V> stateDesc = new AggregatingStateDescriptor<>( "window-contents", aggregateFunction, accumulatorType.createSerializer(getExecutionEnvironment().getConfig())); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")"; operator = new WindowOperator<>( windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueProcessAllWindowFunction<>(windowFunction), trigger, allowedLateness, lateDataOutputTag); } return input.transform(opName, resultType, operator).forceNonParallel(); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given aggregate function. This means that the window function typically has only a single value to process when called. @param aggregateFunction The aggregation function that is used for incremental aggregation. @param windowFunction The process window function. @param accumulatorType Type information for the internal accumulator type of the aggregation function @param resultType Type information for the result type of the window function @return The data stream that is the result of applying the window function to the window. @param <ACC> The type of the AggregateFunction's accumulator @param <V> The type of AggregateFunction's result, and the WindowFunction's input @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's result type
@Deprecated public <R> SingleOutputStreamOperator<R> fold(R initialValue, FoldFunction<T, R> function, TypeInformation<R> resultType) { if (function instanceof RichFunction) { throw new UnsupportedOperationException("FoldFunction of fold can not be a RichFunction. " + "Please use fold(FoldFunction, WindowFunction) instead."); } return fold(initialValue, function, new PassThroughAllWindowFunction<W, R>(), resultType, resultType); }
Applies the given fold function to each window. The window function is called for each evaluation of the window for each key individually. The output of the reduce function is interpreted as a regular non-windowed stream. @param function The fold function. @return The data stream that is the result of applying the fold function to the window. @deprecated use {@link #aggregate(AggregateFunction, TypeInformation, TypeInformation)} instead
@PublicEvolving @Deprecated public <ACC, R> SingleOutputStreamOperator<R> fold(ACC initialValue, FoldFunction<T, ACC> foldFunction, AllWindowFunction<ACC, R, W> function, TypeInformation<ACC> foldAccumulatorType, TypeInformation<R> resultType) { if (foldFunction instanceof RichFunction) { throw new UnsupportedOperationException("FoldFunction of fold can not be a RichFunction."); } if (windowAssigner instanceof MergingWindowAssigner) { throw new UnsupportedOperationException("Fold cannot be used with a merging WindowAssigner."); } //clean the closures function = input.getExecutionEnvironment().clean(function); foldFunction = input.getExecutionEnvironment().clean(foldFunction); String callLocation = Utils.getCallLocationName(); String udfName = "AllWindowedStream." + callLocation; String opName; KeySelector<T, Byte> keySel = input.getKeySelector(); OneInputStreamOperator<T, R> operator; if (evictor != null) { @SuppressWarnings({"unchecked", "rawtypes"}) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig())); ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")"; operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableAllWindowFunction<>(new FoldApplyAllWindowFunction<>(initialValue, foldFunction, function, foldAccumulatorType)), trigger, evictor, allowedLateness, lateDataOutputTag); } else { FoldingStateDescriptor<T, ACC> stateDesc = new FoldingStateDescriptor<>("window-contents", initialValue, foldFunction, foldAccumulatorType.createSerializer(getExecutionEnvironment().getConfig())); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")"; operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueAllWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag); } return input.transform(opName, resultType, operator).forceNonParallel(); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given fold function. @param initialValue The initial value of the fold. @param foldFunction The fold function that is used for incremental aggregation. @param function The window function. @param foldAccumulatorType Type information for the result type of the fold function @param resultType Type information for the result type of the window function @return The data stream that is the result of applying the window function to the window. @deprecated use {@link #aggregate(AggregateFunction, AllWindowFunction, TypeInformation, TypeInformation)} instead
public <R> SingleOutputStreamOperator<R> apply(AllWindowFunction<T, R, W> function) { String callLocation = Utils.getCallLocationName(); function = input.getExecutionEnvironment().clean(function); TypeInformation<R> resultType = getAllWindowFunctionReturnType(function, getInputType()); return apply(new InternalIterableAllWindowFunction<>(function), resultType, callLocation); }
Applies the given window function to each window. The window function is called for each evaluation of the window. The output of the window function is interpreted as a regular non-windowed stream. <p>Note that this function requires that all data in the windows is buffered until the window is evaluated, as the function provides no means of incremental aggregation. @param function The window function. @return The data stream that is the result of applying the window function to the window.
@PublicEvolving public <R> SingleOutputStreamOperator<R> process(ProcessAllWindowFunction<T, R, W> function) { String callLocation = Utils.getCallLocationName(); function = input.getExecutionEnvironment().clean(function); TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(function, getInputType()); return apply(new InternalIterableProcessAllWindowFunction<>(function), resultType, callLocation); }
Applies the given window function to each window. The window function is called for each evaluation of the window. The output of the window function is interpreted as a regular non-windowed stream. <p>Note that this function requires that all data in the windows is buffered until the window is evaluated, as the function provides no means of incremental aggregation. @param function The process window function. @return The data stream that is the result of applying the window function to the window.
@Deprecated public <R> SingleOutputStreamOperator<R> apply(ReduceFunction<T> reduceFunction, AllWindowFunction<T, R, W> function) { TypeInformation<T> inType = input.getType(); TypeInformation<R> resultType = getAllWindowFunctionReturnType(function, inType); return apply(reduceFunction, function, resultType); }
Applies the given window function to each window. The window function is called for each evaluation of the window for each key individually. The output of the window function is interpreted as a regular non-windowed stream. <p>Arriving data is incrementally aggregated using the given reducer. @param reduceFunction The reduce function that is used for incremental aggregation. @param function The window function. @return The data stream that is the result of applying the window function to the window. @deprecated Use {@link #reduce(ReduceFunction, AllWindowFunction)} instead.
public SingleOutputStreamOperator<T> sum(int positionToSum) { return aggregate(new SumAggregator<>(positionToSum, input.getType(), input.getExecutionConfig())); }
Applies an aggregation that sums every window of the data stream at the given position. @param positionToSum The position in the tuple/array to sum @return The transformed DataStream.
public SingleOutputStreamOperator<T> sum(String field) { return aggregate(new SumAggregator<>(field, input.getType(), input.getExecutionConfig())); }
Applies an aggregation that sums every window of the pojo data stream at the given field for every window. <p>A field expression is either the name of a public field or a getter method with parentheses of the stream's underlying type. A dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. @param field The field to sum @return The transformed DataStream.
public SingleOutputStreamOperator<T> min(int positionToMin) { return aggregate(new ComparableAggregator<>(positionToMin, input.getType(), AggregationFunction.AggregationType.MIN, input.getExecutionConfig())); }
Applies an aggregation that that gives the minimum value of every window of the data stream at the given position. @param positionToMin The position to minimize @return The transformed DataStream.
public SingleOutputStreamOperator<T> min(String field) { return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationFunction.AggregationType.MIN, false, input.getExecutionConfig())); }
Applies an aggregation that that gives the minimum value of the pojo data stream at the given field expression for every window. <p>A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. @param field The field expression based on which the aggregation will be applied. @return The transformed DataStream.
public SingleOutputStreamOperator<T> minBy(String field, boolean first) { return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationFunction.AggregationType.MINBY, first, input.getExecutionConfig())); }
Applies an aggregation that that gives the minimum element of the pojo data stream by the given field expression for every window. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. @param field The field expression based on which the aggregation will be applied. @param first If True then in case of field equality the first object will be returned @return The transformed DataStream.
public SingleOutputStreamOperator<T> max(int positionToMax) { return aggregate(new ComparableAggregator<>(positionToMax, input.getType(), AggregationFunction.AggregationType.MAX, input.getExecutionConfig())); }
Applies an aggregation that gives the maximum value of every window of the data stream at the given position. @param positionToMax The position to maximize @return The transformed DataStream.
public SingleOutputStreamOperator<T> max(String field) { return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationFunction.AggregationType.MAX, false, input.getExecutionConfig())); }
Applies an aggregation that that gives the maximum value of the pojo data stream at the given field expression for every window. A field expression is either the name of a public field or a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. @param field The field expression based on which the aggregation will be applied. @return The transformed DataStream.
public SortedGrouping<T> withPartitioner(Partitioner<?> partitioner) { Preconditions.checkNotNull(partitioner); getKeys().validateCustomPartitioner(partitioner, null); this.customPartitioner = partitioner; return this; }
Uses a custom partitioner for the grouping. @param partitioner The custom partitioner. @return The grouping object itself, to allow for method chaining.
public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) { if (combiner == null) { throw new NullPointerException("GroupCombine function must not be null."); } TypeInformation<R> resultType = TypeExtractor.getGroupCombineReturnTypes(combiner, this.getInputDataSet().getType(), Utils.getCallLocationName(), true); return new GroupCombineOperator<>(this, resultType, inputDataSet.clean(combiner), Utils.getCallLocationName()); }
Applies a GroupCombineFunction on a grouped {@link DataSet}. A CombineFunction is similar to a GroupReduceFunction but does not perform a full data exchange. Instead, the CombineFunction calls the combine method once per partition for combining a group of results. This operator is suitable for combining values into an intermediate format before doing a proper groupReduce where the data is shuffled across the node for further reduction. The GroupReduce operator can also be supplied with a combiner by implementing the RichGroupReduce function. The combine method of the RichGroupReduce function demands input and output type to be the same. The CombineFunction, on the other side, can have an arbitrary output type. @param combiner The GroupCombineFunction that is applied on the DataSet. @return A GroupCombineOperator which represents the combined DataSet.
public GroupReduceOperator<T, T> first(int n) { if (n < 1) { throw new InvalidProgramException("Parameter n of first(n) must be at least 1."); } return reduceGroup(new FirstReducer<T>(n)); }
Returns a new set containing the first n elements in this grouped and sorted {@link DataSet}. @param n The desired number of elements for each group. @return A GroupReduceOperator that represents the DataSet containing the elements.
public SortedGrouping<T> sortGroup(int field, Order order) { if (groupSortSelectorFunctionKey != null) { throw new InvalidProgramException("Chaining sortGroup with KeySelector sorting is not supported"); } if (!Keys.ExpressionKeys.isSortKey(field, inputDataSet.getType())) { throw new InvalidProgramException("Selected sort key is not a sortable type"); } ExpressionKeys<T> ek = new ExpressionKeys<>(field, inputDataSet.getType()); addSortGroupInternal(ek, order); return this; }
Sorts {@link org.apache.flink.api.java.tuple.Tuple} elements within a group on the specified field in the specified {@link Order}. <p><b>Note: Only groups of Tuple or Pojo elements can be sorted.</b> <p>Groups can be sorted by multiple fields by chaining {@link #sortGroup(int, Order)} calls. @param field The Tuple field on which the group is sorted. @param order The Order in which the specified Tuple field is sorted. @return A SortedGrouping with specified order of group element. @see org.apache.flink.api.java.tuple.Tuple @see Order
public static byte[] encodeUTF8(String str) { byte[] bytes = allocateReuseBytes(str.length() * MAX_BYTES_PER_CHAR); int len = encodeUTF8(str, bytes); return Arrays.copyOf(bytes, len); }
This method must have the same result with JDK's String.getBytes.
public MethodlessRouter<T> addRoute(String pathPattern, T target) { PathPattern p = new PathPattern(pathPattern); if (routes.containsKey(p)) { return this; } routes.put(p, target); return this; }
This method does nothing if the path pattern has already been added. A path pattern can only point to one target.
public void removePathPattern(String pathPattern) { PathPattern p = new PathPattern(pathPattern); T target = routes.remove(p); if (target == null) { return; } }
Removes the route specified by the path pattern.
public boolean anyMatched(String[] requestPathTokens) { Map<String, String> pathParams = new HashMap<>(); for (PathPattern pattern : routes.keySet()) { if (pattern.match(requestPathTokens, pathParams)) { return true; } // Reset for the next loop pathParams.clear(); } return false; }
Checks if there's any matching route.
protected List<OUT> executeOnCollections(RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception { @SuppressWarnings("unchecked") InputFormat<OUT, InputSplit> inputFormat = (InputFormat<OUT, InputSplit>) this.formatWrapper.getUserCodeObject(); //configure the input format inputFormat.configure(this.parameters); //open the input format if (inputFormat instanceof RichInputFormat) { ((RichInputFormat) inputFormat).setRuntimeContext(ctx); ((RichInputFormat) inputFormat).openInputFormat(); } List<OUT> result = new ArrayList<OUT>(); // splits InputSplit[] splits = inputFormat.createInputSplits(1); TypeSerializer<OUT> serializer = getOperatorInfo().getOutputType().createSerializer(executionConfig); for (InputSplit split : splits) { inputFormat.open(split); while (!inputFormat.reachedEnd()) { OUT next = inputFormat.nextRecord(serializer.createInstance()); if (next != null) { result.add(serializer.copy(next)); } } inputFormat.close(); } //close the input format if (inputFormat instanceof RichInputFormat) { ((RichInputFormat) inputFormat).closeInputFormat(); } return result; }
--------------------------------------------------------------------------------------------
public void addColumn(String family, String qualifier, Class<?> clazz) { this.hBaseSchema.addColumn(family, qualifier, clazz); }
Adds a column defined by family, qualifier, and type to the table schema. @param family the family name @param qualifier the qualifier name @param clazz the data type of the qualifier
private String checkAndTrimPathArg(String path) { // disallow construction of a Path from an empty string if (path == null) { throw new IllegalArgumentException("Can not create a Path from a null string"); } path = path.trim(); if (path.length() == 0) { throw new IllegalArgumentException("Can not create a Path from an empty string"); } return path; }
Checks if the provided path string is either null or has zero length and throws a {@link IllegalArgumentException} if any of the two conditions apply. In addition, leading and tailing whitespaces are removed. @param path the path string to be checked @return The checked and trimmed path.
private void initialize(String scheme, String authority, String path) { try { this.uri = new URI(scheme, authority, normalizePath(path), null, null).normalize(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } }
Initializes a path object given the scheme, authority and path string. @param scheme the scheme string. @param authority the authority string. @param path the path string.
private String normalizePath(String path) { // remove leading and tailing whitespaces path = path.trim(); // remove consecutive slashes & backslashes path = path.replace("\\", "/"); path = path.replaceAll("/+", "/"); // remove tailing separator if (path.endsWith(SEPARATOR) && !path.equals(SEPARATOR) && // UNIX root path !WINDOWS_ROOT_DIR_REGEX.matcher(path).matches()) { // Windows root path) // remove tailing slash path = path.substring(0, path.length() - SEPARATOR.length()); } return path; }
Normalizes a path string. @param path the path string to normalize @return the normalized path string
public boolean isAbsolute() { final int start = hasWindowsDrive(uri.getPath(), true) ? 3 : 0; return uri.getPath().startsWith(SEPARATOR, start); }
Checks if the directory of this path is absolute. @return <code>true</code> if the directory of this path is absolute, <code>false</code> otherwise
public int depth() { String path = uri.getPath(); int depth = 0; int slash = path.length() == 1 && path.charAt(0) == '/' ? -1 : 0; while (slash != -1) { depth++; slash = path.indexOf(SEPARATOR, slash + 1); } return depth; }
Returns the number of elements in this path. @return the number of elements in this path
@Override public void read(DataInputView in) throws IOException { final boolean isNotNull = in.readBoolean(); if (isNotNull) { final String scheme = StringUtils.readNullableString(in); final String userInfo = StringUtils.readNullableString(in); final String host = StringUtils.readNullableString(in); final int port = in.readInt(); final String path = StringUtils.readNullableString(in); final String query = StringUtils.readNullableString(in); final String fragment = StringUtils.readNullableString(in); try { uri = new URI(scheme, userInfo, host, port, path, query, fragment); } catch (URISyntaxException e) { throw new IOException("Error reconstructing URI", e); } } }
------------------------------------------------------------------------
public Avro recordClass(Class<? extends SpecificRecord> recordClass) { Preconditions.checkNotNull(recordClass); this.recordClass = recordClass; return this; }
Sets the class of the Avro specific record. @param recordClass class of the Avro record.
@Override public FieldList addField(Integer fieldID) { if (fieldID == null) { throw new IllegalArgumentException("Field ID must not be null."); } if (size() == 0) { return new FieldList(fieldID); } else { ArrayList<Integer> list = new ArrayList<Integer>(size() + 1); list.addAll(this.collection); list.add(fieldID); return new FieldList(Collections.unmodifiableList(list)); } }
--------------------------------------------------------------------------------------------
@Override public boolean isValidSubset(FieldSet set) { if (set instanceof FieldList) { return (isValidSubset((FieldList) set)); } else { return false; } }
--------------------------------------------------------------------------------------------
public static void copyBytes(final InputStream in, final OutputStream out, final int buffSize, final boolean close) throws IOException { @SuppressWarnings("resource") final PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null; final byte[] buf = new byte[buffSize]; try { int bytesRead = in.read(buf); while (bytesRead >= 0) { out.write(buf, 0, bytesRead); if ((ps != null) && ps.checkError()) { throw new IOException("Unable to write to output stream."); } bytesRead = in.read(buf); } } finally { if (close) { out.close(); in.close(); } } }
Copies from one stream to another. @param in InputStream to read from @param out OutputStream to write to @param buffSize the size of the buffer @param close whether or not close the InputStream and OutputStream at the end. The streams are closed in the finally clause. @throws IOException thrown if an error occurred while writing to the output stream
public static void copyBytes(final InputStream in, final OutputStream out) throws IOException { copyBytes(in, out, BLOCKSIZE, true); }
Copies from one stream to another. <strong>closes the input and output streams at the end</strong>. @param in InputStream to read from @param out OutputStream to write to @throws IOException thrown if an I/O error occurs while copying
public static void copyBytes(final InputStream in, final OutputStream out, final boolean close) throws IOException { copyBytes(in, out, BLOCKSIZE, close); }
Copies from one stream to another. @param in InputStream to read from @param out OutputStream to write to @param close whether or not close the InputStream and OutputStream at the end. The streams are closed in the finally clause. @throws IOException thrown if an I/O error occurs while copying
public static void readFully(final InputStream in, final byte[] buf, int off, final int len) throws IOException { int toRead = len; while (toRead > 0) { final int ret = in.read(buf, off, toRead); if (ret < 0) { throw new IOException("Premeture EOF from inputStream"); } toRead -= ret; off += ret; } }
Reads len bytes in a loop. @param in The InputStream to read from @param buf The buffer to fill @param off offset from the buffer @param len the length of bytes to read @throws IOException if it could not read requested number of bytes for any reason (including EOF)
public static void skipFully(final InputStream in, long len) throws IOException { while (len > 0) { final long ret = in.skip(len); if (ret < 0) { throw new IOException("Premeture EOF from inputStream"); } len -= ret; } }
Similar to readFully(). Skips bytes in a loop. @param in The InputStream to skip bytes from @param len number of bytes to skip @throws IOException if it could not skip requested number of bytes for any reason (including EOF)
public static void cleanup(final Logger log, final AutoCloseable... closeables) { for (AutoCloseable c : closeables) { if (c != null) { try { c.close(); } catch (Exception e) { if (log != null && log.isDebugEnabled()) { log.debug("Exception in closing " + c, e); } } } } }
Close the AutoCloseable objects and <b>ignore</b> any {@link Exception} or null pointers. Must only be used for cleanup in exception handlers. @param log the log to record problems to at debug level. Can be <code>null</code>. @param closeables the objects to close
public static void closeAll(Iterable<? extends AutoCloseable> closeables) throws Exception { if (null != closeables) { Exception collectedExceptions = null; for (AutoCloseable closeable : closeables) { try { if (null != closeable) { closeable.close(); } } catch (Exception e) { collectedExceptions = ExceptionUtils.firstOrSuppressed(collectedExceptions, e); } } if (null != collectedExceptions) { throw collectedExceptions; } } }
Closes all {@link AutoCloseable} objects in the parameter, suppressing exceptions. Exception will be emitted after calling close() on every object. @param closeables iterable with closeables to close. @throws Exception collected exceptions that occurred during closing
public static void closeAllQuietly(Iterable<? extends AutoCloseable> closeables) { if (null != closeables) { for (AutoCloseable closeable : closeables) { closeQuietly(closeable); } } }
Closes all elements in the iterable with closeQuietly().
private static String getAlgorithmsListing() { StrBuilder strBuilder = new StrBuilder(); strBuilder .appendNewLine() .appendln("Select an algorithm to view usage: flink run examples/flink-gelly-examples_<version>.jar --algorithm <algorithm>") .appendNewLine() .appendln("Available algorithms:"); for (Driver algorithm : driverFactory) { strBuilder.append(" ") .appendFixedWidthPadRight(algorithm.getName(), 30, ' ') .append(algorithm.getShortDescription()).appendNewLine(); } return strBuilder.toString(); }
List available algorithms. This is displayed to the user when no valid algorithm is given in the program parameterization. @return usage string listing available algorithms
private static String getAlgorithmUsage(String algorithmName) { StrBuilder strBuilder = new StrBuilder(); Driver algorithm = driverFactory.get(algorithmName); strBuilder .appendNewLine() .appendNewLine() .appendln(algorithm.getLongDescription()) .appendNewLine() .append("usage: flink run examples/flink-gelly-examples_<version>.jar --algorithm ") .append(algorithmName) .append(" [algorithm options] --input <input> [input options] --output <output> [output options]") .appendNewLine() .appendNewLine() .appendln("Available inputs:"); for (Input input : inputFactory) { strBuilder .append(" --input ") .append(input.getName()) .append(" ") .appendln(input.getUsage()); } String algorithmParameterization = algorithm.getUsage(); if (algorithmParameterization.length() > 0) { strBuilder .appendNewLine() .appendln("Algorithm configuration:") .append(" ") .appendln(algorithm.getUsage()); } strBuilder .appendNewLine() .appendln("Available outputs:"); for (Output output : outputFactory) { strBuilder .append(" --output ") .append(output.getName()) .append(" ") .appendln(output.getUsage()); } return strBuilder .appendNewLine() .toString(); }
Display the usage for the given algorithm. This includes options for all compatible inputs, the selected algorithm, and outputs implemented by the selected algorithm. @param algorithmName unique identifier of the selected algorithm @return usage string for the given algorithm
private void parameterize(Parameterized parameterized) { try { parameterized.configure(parameters); } catch (RuntimeException ex) { throw new ProgramParametrizationException(ex.getMessage()); } }
Configure a runtime component. Catch {@link RuntimeException} and re-throw with a Flink internal exception which is processed by CliFrontend for display to the user. @param parameterized the component to be configured
public Runner run() throws Exception { // Set up the execution environment env = ExecutionEnvironment.getExecutionEnvironment(); ExecutionConfig config = env.getConfig(); // should not have any non-Flink data types config.disableForceAvro(); config.disableForceKryo(); config.setGlobalJobParameters(parameters); parameterize(this); // configure local parameters and throw proper exception on error try { this.configure(parameters); } catch (RuntimeException ex) { throw new ProgramParametrizationException(ex.getMessage()); } // integration tests run with with object reuse both disabled and enabled if (disableObjectReuse.getValue()) { config.disableObjectReuse(); } else { config.enableObjectReuse(); } // ---------------------------------------------------------------------------------------- // Usage and configuration // ---------------------------------------------------------------------------------------- // algorithm and usage if (!parameters.has(ALGORITHM)) { throw new ProgramParametrizationException(getAlgorithmsListing()); } String algorithmName = parameters.get(ALGORITHM); algorithm = driverFactory.get(algorithmName); if (algorithm == null) { throw new ProgramParametrizationException("Unknown algorithm name: " + algorithmName); } // input and usage if (!parameters.has(INPUT)) { if (!parameters.has(OUTPUT)) { // if neither input nor output is given then print algorithm usage throw new ProgramParametrizationException(getAlgorithmUsage(algorithmName)); } throw new ProgramParametrizationException("No input given"); } parameterize(algorithm); String inputName = parameters.get(INPUT); Input input = inputFactory.get(inputName); if (input == null) { throw new ProgramParametrizationException("Unknown input type: " + inputName); } parameterize(input); // output and usage if (!parameters.has(OUTPUT)) { throw new ProgramParametrizationException("No output given"); } String outputName = parameters.get(OUTPUT); output = outputFactory.get(outputName); if (output == null) { throw new ProgramParametrizationException("Unknown output type: " + outputName); } parameterize(output); // ---------------------------------------------------------------------------------------- // Create list of input and algorithm transforms // ---------------------------------------------------------------------------------------- List<Transform> transforms = new ArrayList<>(); if (input instanceof Transformable) { transforms.addAll(((Transformable) input).getTransformers()); } if (algorithm instanceof Transformable) { transforms.addAll(((Transformable) algorithm).getTransformers()); } for (Transform transform : transforms) { parameterize(transform); } // unused parameters if (parameters.getUnrequestedParameters().size() > 0) { throw new ProgramParametrizationException("Unrequested parameters: " + parameters.getUnrequestedParameters()); } // ---------------------------------------------------------------------------------------- // Execute // ---------------------------------------------------------------------------------------- // Create input Graph graph = input.create(env); // Transform input for (Transform transform : transforms) { graph = (Graph) transform.transformInput(graph); } // Run algorithm result = algorithm.plan(graph); // Output executionName = jobName.getValue() != null ? jobName.getValue() + ": " : ""; executionName += input.getIdentity() + " ⇨ " + algorithmName + " ⇨ " + output.getName(); if (transforms.size() > 0) { // append identifiers to job name StringBuffer buffer = new StringBuffer(executionName).append(" ["); for (Transform transform : transforms) { buffer.append(transform.getIdentity()); } executionName = buffer.append("]").toString(); } if (output == null) { throw new ProgramParametrizationException("Unknown output type: " + outputName); } try { output.configure(parameters); } catch (RuntimeException ex) { throw new ProgramParametrizationException(ex.getMessage()); } if (result != null) { // Transform output if algorithm returned result DataSet if (transforms.size() > 0) { Collections.reverse(transforms); for (Transform transform : transforms) { result = (DataSet) transform.transformResult(result); } } } return this; }
Setup the Flink job with the graph input, algorithm, and output. <p>To then execute the job call {@link #execute}. @return this @throws Exception on error
private void execute() throws Exception { if (result == null) { env.execute(executionName); } else { output.write(executionName.toString(), System.out, result); } System.out.println(); algorithm.printAnalytics(System.out); if (jobDetailsPath.getValue() != null) { writeJobDetails(env, jobDetailsPath.getValue()); } }
Execute the Flink job. @throws Exception on error
private static void writeJobDetails(ExecutionEnvironment env, String jobDetailsPath) throws IOException { JobExecutionResult result = env.getLastJobExecutionResult(); File jsonFile = new File(jobDetailsPath); try (JsonGenerator json = new JsonFactory().createGenerator(jsonFile, JsonEncoding.UTF8)) { json.writeStartObject(); json.writeObjectFieldStart("Apache Flink"); json.writeStringField("version", EnvironmentInformation.getVersion()); json.writeStringField("commit ID", EnvironmentInformation.getRevisionInformation().commitId); json.writeStringField("commit date", EnvironmentInformation.getRevisionInformation().commitDate); json.writeEndObject(); json.writeStringField("job_id", result.getJobID().toString()); json.writeNumberField("runtime_ms", result.getNetRuntime()); json.writeObjectFieldStart("parameters"); for (Map.Entry<String, String> entry : env.getConfig().getGlobalJobParameters().toMap().entrySet()) { json.writeStringField(entry.getKey(), entry.getValue()); } json.writeEndObject(); json.writeObjectFieldStart("accumulators"); for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) { json.writeStringField(entry.getKey(), entry.getValue().toString()); } json.writeEndObject(); json.writeEndObject(); } }
Write the following job details as a JSON encoded file: runtime environment job ID, runtime, parameters, and accumulators. @param env the execution environment @param jobDetailsPath filesystem path to write job details @throws IOException on error writing to jobDetailsPath
private void initOutputFormat() { ClassLoader userCodeClassLoader = getUserCodeClassLoader(); // obtain task configuration (including stub parameters) Configuration taskConf = getTaskConfiguration(); this.config = new TaskConfig(taskConf); try { this.format = config.<OutputFormat<IT>>getStubWrapper(userCodeClassLoader).getUserCodeObject(OutputFormat.class, userCodeClassLoader); // check if the class is a subclass, if the check is required if (!OutputFormat.class.isAssignableFrom(this.format.getClass())) { throw new RuntimeException("The class '" + this.format.getClass().getName() + "' is not a subclass of '" + OutputFormat.class.getName() + "' as is required."); } } catch (ClassCastException ccex) { throw new RuntimeException("The stub class is not a proper subclass of " + OutputFormat.class.getName(), ccex); } Thread thread = Thread.currentThread(); ClassLoader original = thread.getContextClassLoader(); // configure the stub. catch exceptions here extra, to report them as originating from the user code try { thread.setContextClassLoader(userCodeClassLoader); this.format.configure(this.config.getStubParameters()); } catch (Throwable t) { throw new RuntimeException("The user defined 'configure()' method in the Output Format caused an error: " + t.getMessage(), t); } finally { thread.setContextClassLoader(original); } }
Initializes the OutputFormat implementation and configuration. @throws RuntimeException Throws if instance of OutputFormat implementation can not be obtained.
@SuppressWarnings("unchecked") private void initInputReaders() throws Exception { int numGates = 0; // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(0); numGates += groupSize; if (groupSize == 1) { // non-union case inputReader = new MutableRecordReader<DeserializationDelegate<IT>>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case inputReader = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(getEnvironment().getAllInputGates()), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } this.inputTypeSerializerFactory = this.config.getInputSerializer(0, getUserCodeClassLoader()); @SuppressWarnings({ "rawtypes" }) final MutableObjectIterator<?> iter = new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer()); this.reader = (MutableObjectIterator<IT>)iter; // final sanity check if (numGates != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Initializes the input readers of the DataSinkTask. @throws RuntimeException Thrown in case of invalid task input configuration.
private String getLogString(String message) { return BatchTask.constructLogString(message, this.getEnvironment().getTaskInfo().getTaskName(), this); }
Utility function that composes a string for logging purposes. The string includes the given message and the index of the task in its task group together with the number of tasks in the task group. @param message The main message for the log. @return The string ready for logging.
protected void startThreads() { if (this.readThread != null) { this.readThread.start(); } if (this.sortThread != null) { this.sortThread.start(); } if (this.spillThread != null) { this.spillThread.start(); } }
Starts all the threads that are used by this sort-merger.
@Override public MutableObjectIterator<E> getIterator() throws InterruptedException { synchronized (this.iteratorLock) { // wait while both the iterator and the exception are not set while (this.iterator == null && this.iteratorException == null) { this.iteratorLock.wait(); } if (this.iteratorException != null) { throw new RuntimeException("Error obtaining the sorted input: " + this.iteratorException.getMessage(), this.iteratorException); } else { return this.iterator; } } }
------------------------------------------------------------------------
protected final void setResultIterator(MutableObjectIterator<E> iterator) { synchronized (this.iteratorLock) { // set the result iterator only, if no exception has occurred if (this.iteratorException == null) { this.iterator = iterator; this.iteratorLock.notifyAll(); } } }
Sets the result iterator. By setting the result iterator, all threads that are waiting for the result iterator are notified and will obtain it. @param iterator The result iterator to set.
protected final void setResultIteratorException(IOException ioex) { synchronized (this.iteratorLock) { if (this.iteratorException == null) { this.iteratorException = ioex; this.iteratorLock.notifyAll(); } } }
Reports an exception to all threads that are waiting for the result iterator. @param ioex The exception to be reported to the threads that wait for the result iterator.
protected static <T> CircularElement<T> endMarker() { @SuppressWarnings("unchecked") CircularElement<T> c = (CircularElement<T>) EOF_MARKER; return c; }
Gets the element that is passed as marker for the end of data. @return The element that is passed as marker for the end of data.
protected static <T> CircularElement<T> spillingMarker() { @SuppressWarnings("unchecked") CircularElement<T> c = (CircularElement<T>) SPILLING_MARKER; return c; }
Gets the element that is passed as marker for signal beginning of spilling. @return The element that is passed as marker for signal beginning of spilling.
public static void main(String[] args) throws Exception { // Checking input parameters final ParameterTool params = ParameterTool.fromArgs(args); // set up execution environment final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // read input data DataSet<Edge> edges; if (params.has("edges")) { edges = env.readCsvFile(params.get("edges")) .fieldDelimiter(" ") .includeFields(true, true) .types(Integer.class, Integer.class) .map(new TupleEdgeConverter()); } else { System.out.println("Executing EnumTriangles example with default edges data set."); System.out.println("Use --edges to specify file input."); edges = EnumTrianglesData.getDefaultEdgeDataSet(env); } // project edges by vertex id DataSet<Edge> edgesById = edges .map(new EdgeByIdProjector()); DataSet<Triad> triangles = edgesById // build triads .groupBy(Edge.V1).sortGroup(Edge.V2, Order.ASCENDING).reduceGroup(new TriadBuilder()) // filter triads .join(edgesById).where(Triad.V2, Triad.V3).equalTo(Edge.V1, Edge.V2).with(new TriadFilter()); // emit result if (params.has("output")) { triangles.writeAsCsv(params.get("output"), "\n", ","); // execute program env.execute("Basic Triangle Enumeration Example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); triangles.print(); } }
*************************************************************************
@Override protected List<OUT> executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception { MapPartitionFunction<IN, OUT> function = this.userFunction.getUserCodeObject(); FunctionUtils.setFunctionRuntimeContext(function, ctx); FunctionUtils.openFunction(function, this.parameters); ArrayList<OUT> result = new ArrayList<OUT>(inputData.size() / 4); TypeSerializer<IN> inSerializer = getOperatorInfo().getInputType().createSerializer(executionConfig); TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig); CopyingIterator<IN> source = new CopyingIterator<IN>(inputData.iterator(), inSerializer); CopyingListCollector<OUT> resultCollector = new CopyingListCollector<OUT>(result, outSerializer); function.mapPartition(source, resultCollector); result.trimToSize(); FunctionUtils.closeFunction(function); return result; }
--------------------------------------------------------------------------------------------
@Override public void createResource() throws Exception { cluster = builder.getCluster(); session = cluster.connect(); session.execute(String.format("CREATE KEYSPACE IF NOT EXISTS %s with replication={'class':'SimpleStrategy', 'replication_factor':1};", keySpace)); session.execute(String.format("CREATE TABLE IF NOT EXISTS %s.%s (sink_id text, sub_id int, checkpoint_id bigint, PRIMARY KEY (sink_id, sub_id));", keySpace, table)); try { session.close(); } catch (Exception e) { LOG.error("Error while closing session.", e); } try { cluster.close(); } catch (Exception e) { LOG.error("Error while closing cluster.", e); } }
Generates the necessary tables to store information. @throws Exception
@Override public void onTaskFailure(Execution taskExecution, Throwable cause) { executionGraph.getJobMasterMainThreadExecutor().assertRunningInMainThread(); // to better handle the lack of resources (potentially by a scale-in), we // make failures due to missing resources global failures if (cause instanceof NoResourceAvailableException) { LOG.info("Not enough resources to schedule {} - triggering full recovery.", taskExecution); executionGraph.failGlobal(cause); return; } LOG.info("Recovering task failure for {} (#{}) via individual restart.", taskExecution.getVertex().getTaskNameWithSubtaskIndex(), taskExecution.getAttemptNumber()); numTaskFailures.inc(); // trigger the restart once the task has reached its terminal state // Note: currently all tasks passed here are already in their terminal state, // so we could actually avoid the future. We use it anyways because it is cheap and // it helps to support better testing final CompletableFuture<ExecutionState> terminationFuture = taskExecution.getTerminalStateFuture(); terminationFuture.thenRun( () -> performExecutionVertexRestart(taskExecution.getVertex(), taskExecution.getGlobalModVersion())); }
------------------------------------------------------------------------
public static void main(String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.getConfig().setGlobalJobParameters(params); // get input data DataSet<Tuple2<String, String>> documents = getDocumentsDataSet(env, params); DataSet<Tuple3<Integer, String, Integer>> ranks = getRanksDataSet(env, params); DataSet<Tuple2<String, String>> visits = getVisitsDataSet(env, params); // Retain documents with keywords DataSet<Tuple1<String>> filterDocs = documents .filter(new FilterDocByKeyWords()) .project(0); // Filter ranks by minimum rank DataSet<Tuple3<Integer, String, Integer>> filterRanks = ranks .filter(new FilterByRank()); // Filter visits by visit date DataSet<Tuple1<String>> filterVisits = visits .filter(new FilterVisitsByDate()) .project(0); // Join the filtered documents and ranks, i.e., get all URLs with min rank and keywords DataSet<Tuple3<Integer, String, Integer>> joinDocsRanks = filterDocs.join(filterRanks) .where(0).equalTo(1) .projectSecond(0, 1, 2); // Anti-join urls with visits, i.e., retain all URLs which have NOT been visited in a certain time DataSet<Tuple3<Integer, String, Integer>> result = joinDocsRanks.coGroup(filterVisits) .where(1).equalTo(0) .with(new AntiJoinVisits()); // emit result if (params.has("output")) { result.writeAsCsv(params.get("output"), "\n", "|"); // execute program env.execute("WebLogAnalysis Example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); result.print(); } }
*************************************************************************
private static DataSet<Tuple2<String, String>> getDocumentsDataSet(ExecutionEnvironment env, ParameterTool params) { // Create DataSet for documents relation (URL, Doc-Text) if (params.has("documents")) { return env.readCsvFile(params.get("documents")) .fieldDelimiter("|") .types(String.class, String.class); } else { System.out.println("Executing WebLogAnalysis example with default documents data set."); System.out.println("Use --documents to specify file input."); return WebLogData.getDocumentDataSet(env); } }
*************************************************************************
public void jarDir(File dirOrFile2Jar, File destJar) throws IOException { if (dirOrFile2Jar == null || destJar == null) { throw new IllegalArgumentException(); } mDestJarName = destJar.getCanonicalPath(); FileOutputStream fout = new FileOutputStream(destJar); JarOutputStream jout = new JarOutputStream(fout); //jout.setLevel(0); try { jarDir(dirOrFile2Jar, jout, null); } catch (IOException ioe) { throw ioe; } finally { jout.close(); fout.close(); } }
Jars a given directory or single file into a JarOutputStream.
public void unjarDir(File jarFile, File destDir) throws IOException { BufferedOutputStream dest = null; FileInputStream fis = new FileInputStream(jarFile); unjar(fis, destDir); }
Unjars a given jar file into a given directory.
public void unjar(InputStream in, File destDir) throws IOException { BufferedOutputStream dest = null; JarInputStream jis = new JarInputStream(in); JarEntry entry; while ((entry = jis.getNextJarEntry()) != null) { if (entry.isDirectory()) { File dir = new File(destDir, entry.getName()); dir.mkdir(); if (entry.getTime() != -1) { dir.setLastModified(entry.getTime()); } continue; } int count; byte[] data = new byte[BUFFER_SIZE]; File destFile = new File(destDir, entry.getName()); if (mVerbose) { System.out.println("unjarring " + destFile + " from " + entry.getName()); } FileOutputStream fos = new FileOutputStream(destFile); dest = new BufferedOutputStream(fos, BUFFER_SIZE); try { while ((count = jis.read(data, 0, BUFFER_SIZE)) != -1) { dest.write(data, 0, count); } dest.flush(); } finally { dest.close(); } if (entry.getTime() != -1) { destFile.setLastModified(entry.getTime()); } } jis.close(); }
Given an InputStream on a jar file, unjars the contents into the given directory.
private void jarDir(File dirOrFile2jar, JarOutputStream jos, String path) throws IOException { if (mVerbose) { System.out.println("checking " + dirOrFile2jar); } if (dirOrFile2jar.isDirectory()) { String[] dirList = dirOrFile2jar.list(); String subPath = (path == null) ? "" : (path + dirOrFile2jar.getName() + SEP); if (path != null) { JarEntry je = new JarEntry(subPath); je.setTime(dirOrFile2jar.lastModified()); jos.putNextEntry(je); jos.flush(); jos.closeEntry(); } for (int i = 0; i < dirList.length; i++) { File f = new File(dirOrFile2jar, dirList[i]); jarDir(f, jos, subPath); } } else if (dirOrFile2jar.exists()) { if (dirOrFile2jar.getCanonicalPath().equals(mDestJarName)) { if (mVerbose) { System.out.println("skipping " + dirOrFile2jar.getPath()); } return; } if (mVerbose) { System.out.println("adding " + dirOrFile2jar.getPath()); } FileInputStream fis = new FileInputStream(dirOrFile2jar); try { JarEntry entry = new JarEntry(path + dirOrFile2jar.getName()); entry.setTime(dirOrFile2jar.lastModified()); jos.putNextEntry(entry); while ((mByteCount = fis.read(mBuffer)) != -1) { jos.write(mBuffer, 0, mByteCount); if (mVerbose) { System.out.println("wrote " + mByteCount + " bytes"); } } jos.flush(); jos.closeEntry(); } catch (IOException ioe) { throw ioe; } finally { fis.close(); } } }
Recursively jars up the given path under the given directory.
public static void main(String[] args) throws IOException { if (args.length < 2) { System.err.println("Usage: JarHelper jarname.jar directory"); return; } JarHelper jarHelper = new JarHelper(); jarHelper.mVerbose = true; File destJar = new File(args[0]); File dirOrFile2Jar = new File(args[1]); jarHelper.jarDir(dirOrFile2Jar, destJar); }
for debugging
private boolean[] computeProjectionMask() { // mask with all fields of the schema boolean[] projectionMask = new boolean[schema.getMaximumId() + 1]; // for each selected field for (int inIdx : selectedFields) { // set all nested fields of a selected field to true TypeDescription fieldSchema = schema.getChildren().get(inIdx); for (int i = fieldSchema.getId(); i <= fieldSchema.getMaximumId(); i++) { projectionMask[i] = true; } } return projectionMask; }
Computes the ORC projection mask of the fields to include from the selected fields.rowOrcInputFormat.nextRecord(null). @return The ORC projection mask.
private boolean ensureBatch() throws IOException { if (nextRow >= rowsInBatch) { // No more rows available in the Rows array. nextRow = 0; // Try to read the next batch if rows from the ORC file. boolean moreRows = orcRowsReader.nextBatch(rowBatch); if (moreRows) { // Load the data into the Rows array. rowsInBatch = fillRows(rows, schema, rowBatch, selectedFields); } return moreRows; } // there is at least one Row left in the Rows array. return true; }
Checks if there is at least one row left in the batch to return. If no more row are available, it reads another batch of rows. @return Returns true if there is one more row to return, false otherwise. @throws IOException throw if an exception happens while reading a batch.
private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(batchSize); this.conf.write(out); out.writeUTF(schema.toString()); out.writeInt(selectedFields.length); for (int f : selectedFields) { out.writeInt(f); } out.writeInt(conjunctPredicates.size()); for (Predicate p : conjunctPredicates) { out.writeObject(p); } }
--------------------------------------------------------------------------------------------
public static String getOptimizedPlanAsJson(Optimizer compiler, PackagedProgram prog, int parallelism) throws CompilerException, ProgramInvocationException { PlanJSONDumpGenerator jsonGen = new PlanJSONDumpGenerator(); return jsonGen.getOptimizerPlanAsJSON((OptimizedPlan) getOptimizedPlan(compiler, prog, parallelism)); }
------------------------------------------------------------------------
public JobSubmissionResult run(PackagedProgram prog, int parallelism) throws ProgramInvocationException, ProgramMissingJobException { Thread.currentThread().setContextClassLoader(prog.getUserCodeClassLoader()); if (prog.isUsingProgramEntryPoint()) { final JobWithJars jobWithJars = prog.getPlanWithJars(); return run(jobWithJars, parallelism, prog.getSavepointSettings()); } else if (prog.isUsingInteractiveMode()) { log.info("Starting program in interactive mode (detached: {})", isDetached()); final List<URL> libraries = prog.getAllLibraries(); ContextEnvironmentFactory factory = new ContextEnvironmentFactory(this, libraries, prog.getClasspaths(), prog.getUserCodeClassLoader(), parallelism, isDetached(), prog.getSavepointSettings()); ContextEnvironment.setAsContext(factory); try { // invoke main method prog.invokeInteractiveModeForExecution(); if (lastJobExecutionResult == null && factory.getLastEnvCreated() == null) { throw new ProgramMissingJobException("The program didn't contain a Flink job."); } if (isDetached()) { // in detached mode, we execute the whole user code to extract the Flink job, afterwards we run it here return ((DetachedEnvironment) factory.getLastEnvCreated()).finalizeExecute(); } else { // in blocking mode, we execute all Flink jobs contained in the user code and then return here return this.lastJobExecutionResult; } } finally { ContextEnvironment.unsetContext(); } } else { throw new ProgramInvocationException("PackagedProgram does not have a valid invocation mode."); } }
General purpose method to run a user jar from the CliFrontend in either blocking or detached mode, depending on whether {@code setDetached(true)} or {@code setDetached(false)}. @param prog the packaged program @param parallelism the parallelism to execute the contained Flink job @return The result of the execution @throws ProgramMissingJobException @throws ProgramInvocationException
public JobSubmissionResult run(JobWithJars jobWithJars, int parallelism, SavepointRestoreSettings savepointSettings) throws CompilerException, ProgramInvocationException { ClassLoader classLoader = jobWithJars.getUserCodeClassLoader(); if (classLoader == null) { throw new IllegalArgumentException("The given JobWithJars does not provide a usercode class loader."); } OptimizedPlan optPlan = getOptimizedPlan(compiler, jobWithJars, parallelism); return run(optPlan, jobWithJars.getJarFiles(), jobWithJars.getClasspaths(), classLoader, savepointSettings); }
Runs a program on the Flink cluster to which this client is connected. The call blocks until the execution is complete, and returns afterwards. @param jobWithJars The program to be executed. @param parallelism The default parallelism to use when running the program. The default parallelism is used when the program does not set a parallelism by itself. @throws CompilerException Thrown, if the compiler encounters an illegal situation. @throws ProgramInvocationException Thrown, if the program could not be instantiated from its jar file, or if the submission failed. That might be either due to an I/O problem, i.e. the job-manager is unreachable, or due to the fact that the parallel execution failed.
public Map<String, OptionalFailure<Object>> getAccumulators(JobID jobID) throws Exception { return getAccumulators(jobID, ClassLoader.getSystemClassLoader()); }
Requests and returns the accumulators for the given job identifier. Accumulators can be requested while a is running or after it has finished. The default class loader is used to deserialize the incoming accumulator results. @param jobID The job identifier of a job. @return A Map containing the accumulator's name and its value.
private static OptimizedPlan getOptimizedPlan(Optimizer compiler, JobWithJars prog, int parallelism) throws CompilerException, ProgramInvocationException { return getOptimizedPlan(compiler, prog.getPlan(), parallelism); }
Creates the optimized plan for a given program, using this client's compiler. @param prog The program to be compiled. @return The compiled and optimized plan, as returned by the compiler. @throws CompilerException Thrown, if the compiler encounters an illegal situation.