name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_Tuple16_setFields_rdh
/** * Sets new values to all fields of the tuple. * * @param f0 * The value for field 0 * @param f1 * The value for field 1 * @param f2 * The value for field 2 * @param f3 * The value for field 3 * @param f4 * The value for field 4 * @param f5 * The value for field 5 * @param f6 * The value for field 6 * @param f7 * The value for field 7 * @param f8 * The value for field 8 * @param f9 * The value for field 9 * @param f10 * The value for field 10 * @param f11 * The value for field 11 * @param f12 * The value for field 12 * @param f13 * The value for field 13 * @param f14 * The value for field 14 * @param f15 * The value for field 15 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8; this.f9 = f9; this.f10 = f10; this.f11 = f11; this.f12 = f12; this.f13 = f13; this.f14 = f14; this.f15 = f15; }
3.26
flink_FirstValueAggFunction_createAccumulator_rdh
// -------------------------------------------------------------------------------------------- // Runtime // -------------------------------------------------------------------------------------------- @Override public RowData createAccumulator() { GenericRowData acc = new GenericRowData(2); acc.setField(0, null);acc.setField(1, Long.MAX_VALUE); return acc; }
3.26
flink_FirstValueAggFunction_m0_rdh
// -------------------------------------------------------------------------------------------- // Planning // -------------------------------------------------------------------------------------------- @Override public List<DataType> m0() { return Collections.singletonList(valueDataType); }
3.26
flink_ObjectArrayTypeInfo_getInfoFor_rdh
/** * Creates a new {@link org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo} from a {@link TypeInformation} for the component type. * * <p>This must be used in cases where the complete type of the array is not available as a * {@link java.lang.reflect.Type} or {@link java.lang.Class}. */ @SuppressWarnings("unchecked") @PublicEvolving public static <T, C> ObjectArrayTypeInfo<T, C> getInfoFor(TypeInformation<C> componentInfo) { checkNotNull(componentInfo); return new ObjectArrayTypeInfo<T, C>(((Class<T>) (Array.newInstance(componentInfo.getTypeClass(), 0).getClass())), componentInfo); }
3.26
flink_ObjectArrayTypeInfo_m0_rdh
// -------------------------------------------------------------------------------------------- @PublicEvolving public static <T, C> ObjectArrayTypeInfo<T, C> m0(Class<T> arrayClass, TypeInformation<C> componentInfo) { checkNotNull(arrayClass); checkNotNull(componentInfo); checkArgument(arrayClass.isArray(), ("Class " + arrayClass) + " must be an array."); return new ObjectArrayTypeInfo<T, C>(arrayClass, componentInfo); }
3.26
flink_ObjectArrayTypeInfo_isBasicType_rdh
// -------------------------------------------------------------------------------------------- @Override @PublicEvolving public boolean isBasicType() { return false; }
3.26
flink_StateDescriptor_getName_rdh
// ------------------------------------------------------------------------ /** * Returns the name of this {@code StateDescriptor}. */ public String getName() { return name; }
3.26
flink_StateDescriptor_getSerializer_rdh
/** * Returns the {@link TypeSerializer} that can be used to serialize the value in the state. Note * that the serializer may initialized lazily and is only guaranteed to exist after calling * {@link #initializeSerializerUnlessSet(ExecutionConfig)}. */ public TypeSerializer<T> getSerializer() { TypeSerializer<T> serializer = serializerAtomicReference.get(); if (serializer != null) { return serializer.duplicate(); } else { throw new IllegalStateException("Serializer not yet initialized."); } }
3.26
flink_StateDescriptor_hashCode_rdh
// ------------------------------------------------------------------------ // Standard Utils // ------------------------------------------------------------------------ @Override public final int hashCode() { return name.hashCode() + (31 * getClass().hashCode()); }
3.26
flink_StateDescriptor_writeObject_rdh
// ------------------------------------------------------------------------ // Serialization // ------------------------------------------------------------------------ private void writeObject(final ObjectOutputStream out) throws IOException { // write all the non-transient fields out.defaultWriteObject(); // write the non-serializable default value field if (defaultValue == null) { // we don't have a default value out.writeBoolean(false); } else { TypeSerializer<T> serializer = serializerAtomicReference.get(); checkNotNull(serializer, "Serializer not initialized."); // we have a default value out.writeBoolean(true); byte[] serializedDefaultValue; try (ByteArrayOutputStream baos = new ByteArrayOutputStream();DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(baos)) { TypeSerializer<T> duplicateSerializer = serializer.duplicate();duplicateSerializer.serialize(defaultValue, outView); outView.flush(); serializedDefaultValue = baos.toByteArray(); } catch (Exception e) { throw new IOException(("Unable to serialize default value of type " + defaultValue.getClass().getSimpleName()) + ".", e); } out.writeInt(serializedDefaultValue.length); out.write(serializedDefaultValue); } }
3.26
flink_StateDescriptor_enableTimeToLive_rdh
/** * Configures optional activation of state time-to-live (TTL). * * <p>State user value will expire, become unavailable and be cleaned up in storage depending on * configured {@link StateTtlConfig}. * * <p>If enabling the TTL configuration, the field {@link StateDescriptor#defaultValue} will be * invalid. * * @param ttlConfig * configuration of state TTL */ public void enableTimeToLive(StateTtlConfig ttlConfig) { Preconditions.checkNotNull(ttlConfig); if (ttlConfig.isEnabled()) { Preconditions.checkArgument(queryableStateName == null, "Queryable state is currently not supported with TTL"); } this.ttlConfig = ttlConfig; }
3.26
flink_StateDescriptor_initializeSerializerUnlessSet_rdh
/** * Initializes the serializer, unless it has been initialized before. * * @param executionConfig * The execution config to use when creating the serializer. */ public void initializeSerializerUnlessSet(ExecutionConfig executionConfig) { if (serializerAtomicReference.get() == null) { checkState(typeInfo != null, "no serializer and no type info"); // try to instantiate and set the serializer TypeSerializer<T> serializer = typeInfo.createSerializer(executionConfig); // use cas to assure the singleton if (!serializerAtomicReference.compareAndSet(null, serializer)) { LOG.debug("Someone else beat us at initializing the serializer."); } } }
3.26
flink_StateDescriptor_isSerializerInitialized_rdh
// ------------------------------------------------------------------------ /** * Checks whether the serializer has been initialized. Serializer initialization is lazy, to * allow parametrization of serializers with an {@link ExecutionConfig} via {@link #initializeSerializerUnlessSet(ExecutionConfig)}. * * @return True if the serializers have been initialized, false otherwise. */ public boolean isSerializerInitialized() { return serializerAtomicReference.get() != null; }
3.26
flink_StateDescriptor_getDefaultValue_rdh
/** * Returns the default value. */ public T getDefaultValue() { if (defaultValue != null) { TypeSerializer<T> serializer = serializerAtomicReference.get(); if (serializer != null) { return serializer.copy(defaultValue); } else { throw new IllegalStateException("Serializer not yet initialized."); } } else { return null; } }
3.26
flink_YarnJobClusterEntrypoint_main_rdh
// ------------------------------------------------------------------------ // The executable entry point for the Yarn Application Master Process // for a single Flink job. // ------------------------------------------------------------------------ public static void main(String[] args) { LOG.warn("Job Clusters are deprecated since Flink 1.15. Please use an Application Cluster/Application Mode instead."); // startup checks and logging EnvironmentInformation.logEnvironmentInfo(LOG, YarnJobClusterEntrypoint.class.getSimpleName(), args); SignalHandler.register(LOG); JvmShutdownSafeguard.installAsShutdownHook(LOG); Map<String, String> env = System.getenv(); final String workingDirectory = env.get(Environment.PWD.key()); Preconditions.checkArgument(workingDirectory != null, "Working directory variable (%s) not set", Environment.PWD.key()); try { YarnEntrypointUtils.logYarnEnvironmentInformation(env, LOG); } catch (IOException e) { LOG.warn("Could not log YARN environment information.", e); } final Configuration dynamicParameters = ClusterEntrypointUtils.parseParametersOrExit(args, new DynamicParametersConfigurationParserFactory(), YarnJobClusterEntrypoint.class); final Configuration configuration = YarnEntrypointUtils.loadConfiguration(workingDirectory, dynamicParameters, env); YarnJobClusterEntrypoint yarnJobClusterEntrypoint = new YarnJobClusterEntrypoint(configuration); ClusterEntrypoint.runClusterEntrypoint(yarnJobClusterEntrypoint); }
3.26
flink_SqlTimeSerializer_snapshotConfiguration_rdh
// -------------------------------------------------------------------------------------------- // Serializer configuration snapshotting // -------------------------------------------------------------------------------------------- @Override public TypeSerializerSnapshot<Time> snapshotConfiguration() { return new SqlTimeSerializerSnapshot(); }
3.26
flink_ExpressionConverter_extractValue_rdh
/** * Extracts a value from a literal. Including planner-specific instances such as {@link DecimalData}. */ @SuppressWarnings("unchecked") public static <T> T extractValue(ValueLiteralExpression literal, Class<T> clazz) { final Optional<Object> v17 = literal.getValueAs(Object.class); if (!v17.isPresent()) {throw new TableException("Invalid literal."); } final Object object = v17.get(); if (clazz.equals(BigDecimal.class)) { final Optional<BigDecimal> possibleDecimal = literal.getValueAs(BigDecimal.class); if (possibleDecimal.isPresent()) { return ((T) (possibleDecimal.get())); } if (object instanceof DecimalData) { return ((T) (((DecimalData) (object)).toBigDecimal())); } } return literal.getValueAs(clazz).orElseThrow(() -> new TableException("Unsupported literal class: " + clazz)); }
3.26
flink_FileChannelBoundedData_create_rdh
// ------------------------------------------------------------------------ public static FileChannelBoundedData create(Path filePath, int memorySegmentSize) throws IOException { final FileChannel fileChannel = FileChannel.open(filePath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE); return new FileChannelBoundedData(filePath, fileChannel, memorySegmentSize); }
3.26
flink_LegacySourceTransformation_setBoundedness_rdh
/** * Mutable for legacy sources in the Table API. */ public void setBoundedness(Boundedness boundedness) { this.boundedness = boundedness; }
3.26
flink_LegacySourceTransformation_getOperatorFactory_rdh
/** * Returns the {@code StreamOperatorFactory} of this {@code LegacySourceTransformation}. */ public StreamOperatorFactory<T> getOperatorFactory() { return operatorFactory; }
3.26
flink_WindowedStream_evictor_rdh
/** * Sets the {@code Evictor} that should be used to evict elements from a window before emission. * * <p>Note: When using an evictor window performance will degrade significantly, since * incremental aggregation of window results cannot be used. */ @PublicEvolving public WindowedStream<T, K, W> evictor(Evictor<? super T, ? super W> evictor) { builder.evictor(evictor); return this; }
3.26
flink_WindowedStream_reduce_rdh
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given reducer. * * @param reduceFunction * The reduce function that is used for incremental aggregation. * @param function * The window function. * @param resultType * Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. */ @Internal public <R> SingleOutputStreamOperator<R> reduce(ReduceFunction<T> reduceFunction, ProcessWindowFunction<T, R, K, W> function, TypeInformation<R> resultType) { // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); final String opName = builder.generateOperatorName(); final String opDescription = builder.generateOperatorDescription(reduceFunction, function); OneInputStreamOperator<T, R> operator = builder.reduce(reduceFunction, function); return input.transform(opName, resultType, operator).setDescription(opDescription); }
3.26
flink_WindowedStream_allowedLateness_rdh
/** * Sets the time by which elements are allowed to be late. Elements that arrive behind the * watermark by more than the specified time will be dropped. By default, the allowed lateness * is {@code 0L}. * * <p>Setting an allowed lateness is only valid for event-time windows. */ @PublicEvolving public WindowedStream<T, K, W> allowedLateness(Time lateness) { builder.allowedLateness(lateness); return this; } /** * Send late arriving data to the side output identified by the given {@link OutputTag}. Data is * considered late after the watermark has passed the end of the window plus the allowed * lateness set using {@link #allowedLateness(Time)}. * * <p>You can get the stream of late data using {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the windowed operation with the same {@link OutputTag}
3.26
flink_WindowedStream_minBy_rdh
/** * Applies an aggregation that gives the minimum element of every window of the data stream by * the given position. If more elements have the same minimum value the operator returns either * the first or last one depending on the parameter setting. * * @param positionToMinBy * The position to minimize * @param first * If true, then the operator return the first element with the minimum value, * otherwise returns the last * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> minBy(int positionToMinBy, boolean first) { return aggregate(new ComparableAggregator<>(positionToMinBy, input.getType(), AggregationType.MINBY, first, input.getExecutionConfig())); } /** * Applies an aggregation that gives the minimum element of the pojo data stream by the given * field expression for every window. A field expression is either the name of a public field or * a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot * can be used to drill down into objects, as in {@code "field1.getInnerField2()"}
3.26
flink_WindowedStream_process_rdh
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Note that this function requires that all data in the windows is buffered until the window * is evaluated, as the function provides no means of incremental aggregation. * * @param function * The window function. * @param resultType * Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. */ @Internal public <R> SingleOutputStreamOperator<R> process(ProcessWindowFunction<T, R, K, W> function, TypeInformation<R> resultType) { function = input.getExecutionEnvironment().clean(function); final String opName = builder.generateOperatorName(); final String opDesc = builder.generateOperatorDescription(function, null); OneInputStreamOperator<T, R> operator = builder.process(function); return input.transform(opName, resultType, operator).setDescription(opDesc); }
3.26
flink_WindowedStream_aggregate_rdh
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given aggregate function. This means * that the window function typically has only a single value to process when called. * * @param aggregateFunction * The aggregation function that is used for incremental aggregation. * @param windowFunction * The window function. * @param accumulatorType * Type information for the internal accumulator type of the aggregation * function * @param resultType * Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. * @param <ACC> * The type of the AggregateFunction's accumulator * @param <V> * The type of AggregateFunction's result, and the WindowFunction's input * @param <R> * The type of the elements in the resulting stream, equal to the WindowFunction's * result type */@PublicEvolving public <ACC, V, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, ProcessWindowFunction<V, R, K, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) { checkNotNull(aggregateFunction, "aggregateFunction"); checkNotNull(windowFunction, "windowFunction"); checkNotNull(accumulatorType, "accumulatorType"); checkNotNull(aggregateResultType, "aggregateResultType"); checkNotNull(resultType, "resultType"); if (aggregateFunction instanceof RichFunction) { throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction."); } // clean the closures windowFunction = input.getExecutionEnvironment().clean(windowFunction); aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction); final String opName = builder.generateOperatorName(); final String opDescription = builder.generateOperatorDescription(aggregateFunction, windowFunction); OneInputStreamOperator<T, R> operator = builder.aggregate(aggregateFunction, windowFunction, accumulatorType); return input.transform(opName, resultType, operator).setDescription(opDescription); }
3.26
flink_WindowedStream_apply_rdh
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given reducer. * * @param reduceFunction * The reduce function that is used for incremental aggregation. * @param function * The window function. * @param resultType * Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. * @deprecated Use {@link #reduce(ReduceFunction, WindowFunction, TypeInformation)} instead. */ @Deprecated public <R> SingleOutputStreamOperator<R> apply(ReduceFunction<T> reduceFunction, WindowFunction<T, R, K, W> function, TypeInformation<R> resultType) { // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); final String opName = builder.generateOperatorName(); final String opDesc = builder.generateOperatorDescription(reduceFunction, function); OneInputStreamOperator<T, R> operator = builder.reduce(reduceFunction, function); return input.transform(opName, resultType, operator).setDescription(opDesc); }
3.26
flink_WindowedStream_maxBy_rdh
/** * Applies an aggregation that gives the maximum element of every window of the data stream by * the given position. If more elements have the same maximum value the operator returns either * the first or last one depending on the parameter setting. * * @param positionToMaxBy * The position to maximize by * @param first * If true, then the operator return the first element with the maximum value, * otherwise returns the last * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> maxBy(int positionToMaxBy, boolean first) { return aggregate(new ComparableAggregator<>(positionToMaxBy, input.getType(), AggregationType.MAXBY, first, input.getExecutionConfig())); } /** * Applies an aggregation that gives the maximum element of the pojo data stream by the given * field expression for every window. A field expression is either the name of a public field or * a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be * used to drill down into objects, as in {@code "field1.getInnerField2()"}
3.26
flink_WindowedStream_trigger_rdh
/** * Sets the {@code Trigger} that should be used to trigger window emission. */ @PublicEvolving public WindowedStream<T, K, W> trigger(Trigger<? super T, ? super W> trigger) { builder.trigger(trigger); return this; }
3.26
flink_WindowedStream_min_rdh
/** * Applies an aggregation that gives the minimum value of the pojo data stream at the given * field expression for every window. * * <p>A field * expression is either the name of a public field or a getter method with * parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into * objects, as in {@code "field1.getInnerField2()"}. * * @param field * The field expression based on which the aggregation will be applied. * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> min(String field) { return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationType.MIN, false, input.getExecutionConfig())); }
3.26
flink_WindowedStream_max_rdh
/** * Applies an aggregation that gives the maximum value of the pojo data stream at the given * field expression for every window. A field expression is either the name of a public field or * a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot * can be used to drill down into objects, as in {@code "field1.getInnerField2()"}. * * @param field * The field expression based on which the aggregation will be applied. * @return The transformed DataStream. */public SingleOutputStreamOperator<T> max(String field) { return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationType.MAX, false, input.getExecutionConfig())); }
3.26
flink_WindowedStream_sum_rdh
/** * Applies an aggregation that sums every window of the pojo data stream at the given field for * every window. * * <p>A field expression is either the name of a public field or a getter method with * parentheses of the stream's underlying type. A dot can be used to drill down into objects, as * in {@code "field1.getInnerField2()"}. * * @param field * The field to sum * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> sum(String field) { return aggregate(new SumAggregator<>(field, input.getType(), input.getExecutionConfig())); }
3.26
flink_WindowedStream_getAllowedLateness_rdh
// -------------------- Testing Methods -------------------- @VisibleForTesting long getAllowedLateness() { return builder.getAllowedLateness(); }
3.26
flink_TopSpeedWindowing_main_rdh
/** * An example of grouped stream windowing where different eviction and trigger policies can be used. * A source fetches events from cars containing their id, their current speed (kmh), overall elapsed * distance (m) and a timestamp. The streaming example triggers the top speed of each car every x * meters elapsed for the last y seconds. */ public class TopSpeedWindowing { // ************************************************************************* // PROGRAM // ************************************************************************* public static void main(String[] args) throws Exception { final CLI params = CLI.fromArgs(args); // Create the execution environment. This is the main entrypoint // to building a Flink application. final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // Apache Flink’s unified approach to stream and batch processing means that a DataStream // application executed over bounded input will produce the same final results regardless // of the configured execution mode. It is important to note what final means here: a job // executing in STREAMING mode might produce incremental updates (think upserts in // a database) while a BATCH job would only produce one final result at the end. The final // result will be the same if interpreted correctly, but getting there can be different. // // The “classic” execution behavior of the DataStream API is called STREAMING execution // mode. Applications should use streaming execution for unbounded jobs that require // continuous incremental processing and are expected to stay online indefinitely. // // By enabling BATCH execution, we allow Flink to apply additional optimizations that we // can only do when we know that our input is bounded. For example, different // join/aggregation strategies can be used, in addition to a different shuffle // implementation that allows more efficient task scheduling and failure recovery behavior. // // By setting the runtime mode to AUTOMATIC, Flink will choose BATCH if all sources // are bounded and otherwise STREAMING. env.setRuntimeMode(params.getExecutionMode()); // This optional step makes the input parameters // available in the Flink UI. env.getConfig().setGlobalJobParameters(params); SingleOutputStreamOperator<Tuple4<Integer, Integer, Double, Long>> carData; if (params.getInputs().isPresent()) { // Create a new file source that will read files from a given set of directories. // Each file will be processed as plain text and split based on newlines. FileSource.FileSourceBuilder<String> builder = FileSource.forRecordStreamFormat(new TextLineInputFormat(), params.getInputs().get()); // If a discovery interval is provided, the source will // continuously watch the given directories for new files. params.getDiscoveryInterval().ifPresent(builder::monitorContinuously); carData = env.fromSource(builder.build(), WatermarkStrategy.noWatermarks(), "file-input").map(new ParseCarData()).name("parse-input"); } else { CarGeneratorFunction carGenerator = new CarGeneratorFunction(2); DataGeneratorSource<Tuple4<Integer, Integer, Double, Long>> carGeneratorSource = new DataGeneratorSource<>(carGenerator, Long.MAX_VALUE, parallelismIgnored -> new GuavaRateLimiter(10), TypeInformation.of(new TypeHint<Tuple4<Integer, Integer, Double, Long>>() {})); carData = env.fromSource(carGeneratorSource, WatermarkStrategy.noWatermarks(), "Car data generator source"); carData.setParallelism(1); } int evictionSec = 10; double triggerMeters = 50; DataStream<Tuple4<Integer, Integer, Double, Long>> topSpeeds = carData.assignTimestampsAndWatermarks(WatermarkStrategy.<Tuple4<Integer, Integer, Double, Long>>forMonotonousTimestamps().withTimestampAssigner((car, ts) -> car.f3)).keyBy(value -> value.f0).window(GlobalWindows.create()).evictor(TimeEvictor.of(Time.of(evictionSec, TimeUnit.SECONDS))).trigger(DeltaTrigger.of(triggerMeters, new DeltaFunction<Tuple4<Integer, Integer, Double, Long>>() { private static final long serialVersionUID = 1L; @Override public double getDelta(Tuple4<Integer, Integer, Double, Long> oldDataPoint, Tuple4<Integer, Integer, Double, Long> newDataPoint) { return newDataPoint.f2 - oldDataPoint.f2; } }, carData.getType().createSerializer(env.getConfig()))).maxBy(1); if (params.getOutput().isPresent()) {// Given an output directory, Flink will write the results to a file // using a simple string encoding. In a production environment, this might // be something more structured like CSV, Avro, JSON, or Parquet. topSpeeds.sinkTo(FileSink.<Tuple4<Integer, Integer, Double, Long>>forRowFormat(params.getOutput().get(), new SimpleStringEncoder<>()).withRollingPolicy(DefaultRollingPolicy.builder().withMaxPartSize(MemorySize.ofMebiBytes(1)).withRolloverInterval(Duration.ofSeconds(10)).build()).build()).name("file-sink"); } else { topSpeeds.print(); } env.execute("CarTopSpeedWindowingExample"); }
3.26
flink_RetryPredicates_createExceptionTypePredicate_rdh
/** * Creates a predicate on given exception type. * * @param exceptionClass * @return predicate on exception type. */ public static ExceptionTypePredicate createExceptionTypePredicate(@Nonnull Class<? extends Throwable> exceptionClass) { return new ExceptionTypePredicate(exceptionClass); }
3.26
flink_RemoteStreamEnvironment_getHost_rdh
/** * Gets the hostname of the master (JobManager), where the program will be executed. * * @return The hostname of the master */ public String getHost() {return configuration.getString(JobManagerOptions.ADDRESS); }
3.26
flink_RemoteStreamEnvironment_getClientConfiguration_rdh
/** * * @deprecated This method is going to be removed in the next releases. */ @Deprecatedpublic Configuration getClientConfiguration() { return configuration; }
3.26
flink_RemoteStreamEnvironment_getPort_rdh
/** * Gets the port of the master (JobManager), where the program will be executed. * * @return The port of the master */ public int getPort() {return configuration.getInteger(JobManagerOptions.PORT); }
3.26
flink_TupleTypeInfo_getBasicTupleTypeInfo_rdh
// -------------------------------------------------------------------------------------------- @PublicEvolving public static <X extends Tuple> TupleTypeInfo<X> getBasicTupleTypeInfo(Class<?>... basicTypes) { if ((basicTypes == null) || (basicTypes.length == 0)) { throw new IllegalArgumentException(); } TypeInformation<?>[] infos = new TypeInformation<?>[basicTypes.length]; for (int i = 0; i < infos.length; i++) { Class<?> type = basicTypes[i];if (type == null) { throw new IllegalArgumentException(("Type at position " + i) + " is null."); } TypeInformation<?> info = BasicTypeInfo.getInfoFor(type); if (info == null) { throw new IllegalArgumentException(("Type at position " + i) + " is not a basic type."); } infos[i] = info; } @SuppressWarnings("unchecked") TupleTypeInfo<X> tupleInfo = ((TupleTypeInfo<X>) (new TupleTypeInfo<Tuple>(infos))); return tupleInfo; }
3.26
flink_TupleTypeInfo_equals_rdh
// -------------------------------------------------------------------------------------------- @Override public boolean equals(Object obj) { if (obj instanceof TupleTypeInfo) {@SuppressWarnings("unchecked") TupleTypeInfo<T> other = ((TupleTypeInfo<T>) (obj)); return (other.canEqual(this) && super.equals(other)) && Arrays.equals(fieldNames, other.fieldNames); } else { return false; } }
3.26
flink_CompressedSerializedValue_toString_rdh
// -------------------------------------------------------------------------------------------- @Override public String toString() { return String.format("Compressed Serialized Value [byte array length: %d]", getSize()); }
3.26
flink_CompressedSerializedValue_fromObject_rdh
/** * Constructs a compressed serialized value for the given object. * * @param object * the object to serialize and compress * @throws NullPointerException * if object is null * @throws IOException * exception during serialization and compression */ public static <T> CompressedSerializedValue<T> fromObject(T object) throws IOException { Preconditions.checkNotNull(object, "Value must not be null"); return new CompressedSerializedValue<>(object);}
3.26
flink_CompressedSerializedValue_deserializeValue_rdh
/** * Decompress and deserialize the data to get the original object. * * @param loader * the classloader to deserialize * @return the deserialized object * @throws IOException * exception during decompression and deserialization * @throws ClassNotFoundException * if class is not found in the classloader */ @Override public T deserializeValue(ClassLoader loader) throws IOException, ClassNotFoundException { Preconditions.checkNotNull(loader, "No classloader has been passed"); return InstantiationUtil.decompressAndDeserializeObject(getByteArray(), loader); }
3.26
flink_CompressedSerializedValue_getSize_rdh
/** * Returns the size of the compressed serialized data. */ public int getSize() { return getByteArray().length; }
3.26
flink_CompressedSerializedValue_fromBytes_rdh
/** * Construct a compressed serialized value with a serialized byte array. * * <p>The byte array must be the result of serialization and compression with {@link InstantiationUtil#serializeObjectAndCompress}. * * @param compressedSerializedData * the compressed serialized byte array * @param <T> * type of the object * @return {@link CompressedSerializedValue} that can be deserialized as the object */ public static <T> CompressedSerializedValue<T> fromBytes(byte[] compressedSerializedData) { return new CompressedSerializedValue<>(compressedSerializedData); }
3.26
flink_InternalSerializers_create_rdh
/** * Creates a {@link TypeSerializer} for internal data structures of the given {@link RowType}. */public static <T> RowDataSerializer create(RowType type) { return ((RowDataSerializer) (createInternal(type))); }
3.26
flink_StandardSinkTopologies_addGlobalCommitter_rdh
/** * Adds a global committer to the pipeline that runs as final operator with a parallelism of * one. */ public static <CommT> void addGlobalCommitter(DataStream<CommittableMessage<CommT>> committables, SerializableSupplier<Committer<CommT>> committerFactory, SerializableSupplier<SimpleVersionedSerializer<CommT>> committableSerializer) { final PhysicalTransformation<Void> transformation = ((PhysicalTransformation<Void>) (committables.global().transform(GLOBAL_COMMITTER_TRANSFORMATION_NAME, Types.VOID, new GlobalCommitterOperator<>(committerFactory, committableSerializer)).getTransformation())); transformation.setChainingStrategy(ChainingStrategy.ALWAYS); transformation.setName(GLOBAL_COMMITTER_TRANSFORMATION_NAME); transformation.setParallelism(1); transformation.setMaxParallelism(1); }
3.26
flink_InternalOperatorMetricGroup_putVariables_rdh
// ------------------------------------------------------------------------ // Component Metric Group Specifics // ------------------------------------------------------------------------ @Override protected void putVariables(Map<String, String> variables) { variables.put(ScopeFormat.SCOPE_OPERATOR_ID, String.valueOf(operatorID)); variables.put(ScopeFormat.SCOPE_OPERATOR_NAME, operatorName); // we don't enter the subtask_index as the task group does that already }
3.26
flink_InternalOperatorMetricGroup_getTaskIOMetricGroup_rdh
// ------------------------------------------------------------------------ public final TaskIOMetricGroup getTaskIOMetricGroup() { return parent.getIOMetricGroup(); }
3.26
flink_JobManagerRunnerResult_getInitializationFailure_rdh
/** * This method returns the initialization failure. * * @return the initialization failure * @throws IllegalStateException * if the result is not an initialization failure */ public Throwable getInitializationFailure() { Preconditions.checkState(isInitializationFailure()); return failure; }
3.26
flink_BlockerSync_releaseBlocker_rdh
/** * Lets the blocked thread continue. */ public void releaseBlocker() { synchronized(lock) { blockerReleased = true;lock.notifyAll(); } }
3.26
flink_BlockerSync_blockNonInterruptible_rdh
/** * Blocks until {@link #releaseBlocker()} is called. Notifies the awaiting thread that waits in * the method {@link #awaitBlocker()}. */ public void blockNonInterruptible() { synchronized(lock) { blockerReady = true; lock.notifyAll(); while (!blockerReleased) { try { lock.wait(); } catch (InterruptedException ignored) { } } } }
3.26
flink_BlockerSync_block_rdh
/** * Blocks until {@link #releaseBlocker()} is called or this thread is interrupted. Notifies the * awaiting thread that waits in the method {@link #awaitBlocker()}. */ public void block() throws InterruptedException { synchronized(lock) { blockerReady = true; lock.notifyAll(); while (!blockerReleased) { lock.wait(); } } }
3.26
flink_BlockerSync_awaitBlocker_rdh
/** * Waits until the blocking thread has entered the method {@link #block()} or {@link #blockNonInterruptible()}. */ public void awaitBlocker() throws InterruptedException { synchronized(lock) { while (!blockerReady) { lock.wait(); } } }
3.26
flink_SkipListKeySerializer_serialize_rdh
/** * Serialize the key and namespace to bytes. The format is - int: length of serialized namespace * - byte[]: serialized namespace - int: length of serialized key - byte[]: serialized key */ byte[] serialize(K key, N namespace) { // we know that the segment contains a byte[], because it is created // in the method below by wrapping a byte[] return serializeToSegment(key, namespace).getArray(); }
3.26
flink_SkipListKeySerializer_serializeToSegment_rdh
/** * Serialize the key and namespace to bytes. The format is - int: length of serialized namespace * - byte[]: serialized namespace - int: length of serialized key - byte[]: serialized key */ MemorySegment serializeToSegment(K key, N namespace) { outputStream.reset(); try { // serialize namespace outputStream.setPosition(Integer.BYTES); namespaceSerializer.serialize(namespace, outputView); } catch (IOException e) { throw new RuntimeException("Failed to serialize namespace", e); } int keyStartPos = outputStream.getPosition(); try { // serialize key outputStream.setPosition(keyStartPos + Integer.BYTES); keySerializer.serialize(key, outputView); } catch (IOException e) { throw new RuntimeException("Failed to serialize key", e); } final byte[] result = outputStream.toByteArray(); final MemorySegment segment = MemorySegmentFactory.wrap(result); // set length of namespace and key segment.putInt(0, keyStartPos - Integer.BYTES); segment.putInt(keyStartPos, (result.length - keyStartPos) - Integer.BYTES); return segment; }
3.26
flink_SkipListKeySerializer_serializeNamespace_rdh
/** * Serialize the namespace to bytes. */ byte[] serializeNamespace(N namespace) { outputStream.reset(); try { namespaceSerializer.serialize(namespace, outputView); } catch (IOException e) {throw new RuntimeException("serialize namespace failed", e); } return outputStream.toByteArray(); }
3.26
flink_SkipListKeySerializer_deserializeNamespace_rdh
/** * Deserialize the namespace from the byte buffer which stores skip list key. * * @param memorySegment * the memory segment which stores the skip list key. * @param offset * the start position of the skip list key in the byte buffer. * @param len * length of the skip list key. */ N deserializeNamespace(MemorySegment memorySegment, int offset, int len) { MemorySegmentInputStreamWithPos inputStream = new MemorySegmentInputStreamWithPos(memorySegment, offset, len); DataInputViewStreamWrapper inputView = new DataInputViewStreamWrapper(inputStream); inputStream.setPosition(offset + Integer.BYTES); try { return namespaceSerializer.deserialize(inputView); } catch (IOException e) { throw new RuntimeException("deserialize namespace failed", e); } }
3.26
flink_SkipListKeySerializer_deserializeKey_rdh
/** * Deserialize the partition key from the byte buffer which stores skip list key. * * @param memorySegment * the memory segment which stores the skip list key. * @param offset * the start position of the skip list key in the byte buffer. * @param len * length of the skip list key. */ K deserializeKey(MemorySegment memorySegment, int offset, int len) { MemorySegmentInputStreamWithPos v5 = new MemorySegmentInputStreamWithPos(memorySegment, offset, len); DataInputViewStreamWrapper inputView = new DataInputViewStreamWrapper(v5); int namespaceLen = memorySegment.getInt(offset); v5.setPosition(((offset + Integer.BYTES) + namespaceLen) + Integer.BYTES); try { return keySerializer.deserialize(inputView); } catch (IOException e) { throw new RuntimeException("deserialize key failed", e); } }
3.26
flink_SkipListKeySerializer_getSerializedKeyAndNamespace_rdh
/** * Gets serialized key and namespace from the byte buffer. * * @param memorySegment * the memory segment which stores the skip list key. * @param offset * the start position of the skip list key in the byte buffer. * @return tuple of serialized key and namespace. */ Tuple2<byte[], byte[]> getSerializedKeyAndNamespace(MemorySegment memorySegment, int offset) { // read namespace int namespaceLen = memorySegment.getInt(offset); MemorySegment namespaceSegment = MemorySegmentFactory.allocateUnpooledSegment(namespaceLen); memorySegment.copyTo(offset + Integer.BYTES, namespaceSegment, 0, namespaceLen); // read key int keyOffset = (offset + Integer.BYTES) + namespaceLen; int keyLen = memorySegment.getInt(keyOffset); MemorySegment keySegment = MemorySegmentFactory.allocateUnpooledSegment(keyLen); memorySegment.copyTo(keyOffset + Integer.BYTES, keySegment, 0, keyLen); return Tuple2.of(keySegment.getArray(), namespaceSegment.getArray()); }
3.26
flink_WritableSavepoint_write_rdh
/** * Write out a new or updated savepoint. * * @param path * The path to where the savepoint should be written. */ public final void write(String path) { final Path savepointPath = new Path(path); List<BootstrapTransformationWithID<?>> newOperatorTransformations = metadata.getNewOperators(); DataSet<OperatorState> newOperatorStates = writeOperatorStates(newOperatorTransformations, configuration, savepointPath); List<OperatorState> existingOperators = metadata.getExistingOperators(); DataSet<OperatorState> finalOperatorStates; if (existingOperators.isEmpty()) { finalOperatorStates = newOperatorStates; } else { DataSet<OperatorState> existingOperatorStates = newOperatorStates.getExecutionEnvironment().fromCollection(existingOperators).name("existingOperatorStates"); existingOperatorStates.flatMap(new StatePathExtractor()).setParallelism(1).output(new FileCopyFunction(path)); finalOperatorStates = newOperatorStates.union(existingOperatorStates); } finalOperatorStates.reduceGroup(new MergeOperatorStates(metadata.getMasterStates())).name("reduce(OperatorState)").output(new SavepointOutputFormat(savepointPath)).name(path); }
3.26
flink_WritableSavepoint_removeOperator_rdh
/** * Drop an existing operator from the savepoint. * * @param uid * The uid of the operator. * @return A modified savepoint. */ @SuppressWarnings("unchecked") public F removeOperator(String uid) { metadata.removeOperator(uid); return ((F) (this)); }
3.26
flink_WritableSavepoint_withConfiguration_rdh
/** * Sets a configuration that will be applied to the stream operators used to bootstrap a new * savepoint. * * @param option * metadata information * @param value * value to be stored * @param <T> * type of the value to be stored * @return The modified savepoint. */ @SuppressWarnings("unchecked")public <T> F withConfiguration(ConfigOption<T> option, T value) { configuration.set(option, value); return ((F) (this));}
3.26
flink_WritableSavepoint_withOperator_rdh
/** * Adds a new operator to the savepoint. * * @param uid * The uid of the operator. * @param transformation * The operator to be included. * @return The modified savepoint. */ @SuppressWarnings("unchecked") public <T> F withOperator(String uid, BootstrapTransformation<T> transformation) { metadata.addOperator(uid, transformation); return ((F) (this)); }
3.26
flink_TaskExecutorResourceUtils_generateDefaultSlotResourceProfile_rdh
/** * This must be consist with {@link org.apache.flink.runtime.resourcemanager.slotmanager.SlotManagerUtils#generateDefaultSlotResourceProfile}. */ @VisibleForTesting public static ResourceProfile generateDefaultSlotResourceProfile(TaskExecutorResourceSpec taskExecutorResourceSpec, int numberOfSlots) { final ResourceProfile.Builder resourceProfileBuilder = ResourceProfile.newBuilder().setCpuCores(taskExecutorResourceSpec.getCpuCores().divide(numberOfSlots)).setTaskHeapMemory(taskExecutorResourceSpec.getTaskHeapSize().divide(numberOfSlots)).setTaskOffHeapMemory(taskExecutorResourceSpec.getTaskOffHeapSize().divide(numberOfSlots)).setManagedMemory(taskExecutorResourceSpec.getManagedMemorySize().divide(numberOfSlots)).setNetworkMemory(taskExecutorResourceSpec.getNetworkMemSize().divide(numberOfSlots)); taskExecutorResourceSpec.getExtendedResources().forEach((name, resource) -> resourceProfileBuilder.setExtendedResource(resource.divide(numberOfSlots))); return resourceProfileBuilder.build(); }
3.26
flink_LookupFunction_eval_rdh
/** * Invoke {@link #lookup} and handle exceptions. */ public final void eval(Object... keys) { GenericRowData keyRow = GenericRowData.of(keys); try { Collection<RowData> lookup = lookup(keyRow); if (lookup == null) { return; } lookup.forEach(this::collect); } catch (IOException e) { throw new RuntimeException(String.format("Failed to lookup values with given key row '%s'", keyRow), e); } }
3.26
flink_CliFrontendParser_getRunOptionsWithoutDeprecatedOptions_rdh
// -------------------------------------------------------------------------------------------- // Help // -------------------------------------------------------------------------------------------- private static Options getRunOptionsWithoutDeprecatedOptions(Options options) { return getProgramSpecificOptionsWithoutDeprecatedOptions(options).addOption(SAVEPOINT_PATH_OPTION).addOption(SAVEPOINT_ALLOW_NON_RESTORED_OPTION).addOption(f1); }
3.26
flink_CliFrontendParser_printCustomCliOptions_rdh
/** * Prints custom cli options. * * @param formatter * The formatter to use for printing * @param runOptions * True if the run options should be printed, False to print only general * options */ private static void printCustomCliOptions(Collection<CustomCommandLine> customCommandLines, HelpFormatter formatter, boolean runOptions) { // prints options from all available command-line classes for (CustomCommandLine cli : customCommandLines) { formatter.setSyntaxPrefix((" Options for " + cli.getId()) + " mode:"); Options customOpts = new Options();cli.addGeneralOptions(customOpts); if (runOptions) { cli.addRunOptions(customOpts);} formatter.printHelp(" ", customOpts); System.out.println();} }
3.26
flink_CliFrontendParser_mergeOptions_rdh
/** * Merges the given {@link Options} into a new Options object. * * @param optionsA * options to merge, can be null if none * @param optionsB * options to merge, can be null if none * @return */ public static Options mergeOptions(@Nullable Options optionsA, @Nullable Options optionsB) { final Options resultOptions = new Options(); if (optionsA != null) { for (Option option : optionsA.getOptions()) { resultOptions.addOption(option); } } if (optionsB != null) { for (Option option : optionsB.getOptions()) { resultOptions.addOption(option); }} return resultOptions; }
3.26
flink_CliFrontendParser_parse_rdh
// -------------------------------------------------------------------------------------------- public static CommandLine parse(Options options, String[] args, boolean stopAtNonOptions) throws CliArgsException {final DefaultParser parser = new DefaultParser(); try { return parser.parse(options, args, stopAtNonOptions); } catch (ParseException e) { throw new CliArgsException(e.getMessage()); } }
3.26
flink_CliFrontendParser_printHelp_rdh
/** * Prints the help for the client. */ public static void printHelp(Collection<CustomCommandLine> customCommandLines) { System.out.println("./flink <ACTION> [OPTIONS] [ARGUMENTS]"); System.out.println(); System.out.println("The following actions are available:"); printHelpForRun(customCommandLines); printHelpForRunApplication(customCommandLines);printHelpForInfo(); printHelpForList(customCommandLines); printHelpForStop(customCommandLines); printHelpForCancel(customCommandLines); printHelpForSavepoint(customCommandLines); System.out.println(); }
3.26
flink_OneInputTransformation_getStateKeySelector_rdh
/** * Returns the {@code KeySelector} that must be used for partitioning keyed state in this * Operation. * * @see #setStateKeySelector */ public KeySelector<IN, ?> getStateKeySelector() { return stateKeySelector; }
3.26
flink_OneInputTransformation_setStateKeySelector_rdh
/** * Sets the {@link KeySelector} that must be used for partitioning keyed state of this * operation. * * @param stateKeySelector * The {@code KeySelector} to set */ public void setStateKeySelector(KeySelector<IN, ?> stateKeySelector) { this.stateKeySelector = stateKeySelector; updateManagedMemoryStateBackendUseCase(stateKeySelector != null); }
3.26
flink_OneInputTransformation_getOperatorFactory_rdh
/** * Returns the {@code StreamOperatorFactory} of this Transformation. */ public StreamOperatorFactory<OUT> getOperatorFactory() { return operatorFactory; }
3.26
flink_OneInputTransformation_getInputType_rdh
/** * Returns the {@code TypeInformation} for the elements of the input. */ public TypeInformation<IN> getInputType() { return input.getOutputType(); }
3.26
flink_EventSerializer_toBuffer_rdh
// ------------------------------------------------------------------------ // Buffer helpers // ------------------------------------------------------------------------ public static Buffer toBuffer(AbstractEvent event, boolean hasPriority) throws IOException { final ByteBuffer v34 = EventSerializer.toSerializedEvent(event); MemorySegment data = MemorySegmentFactory.wrap(v34.array()); final Buffer buffer = new NetworkBuffer(data, FreeingBufferRecycler.INSTANCE, getDataType(event, hasPriority)); buffer.setSize(v34.remaining()); return buffer; }
3.26
flink_EventSerializer_toSerializedEvent_rdh
// ------------------------------------------------------------------------ // Serialization Logic // ------------------------------------------------------------------------ public static ByteBuffer toSerializedEvent(AbstractEvent event) throws IOException { final Class<?> eventClass = event.getClass(); if (eventClass == EndOfPartitionEvent.class) { return ByteBuffer.wrap(new byte[]{ 0, 0, 0, END_OF_PARTITION_EVENT }); } else if (eventClass == CheckpointBarrier.class) { return serializeCheckpointBarrier(((CheckpointBarrier) (event))); } else if (eventClass == EndOfSuperstepEvent.class) { return ByteBuffer.wrap(new byte[]{ 0, 0, 0, END_OF_SUPERSTEP_EVENT }); } else if (eventClass == EndOfChannelStateEvent.class) { return ByteBuffer.wrap(new byte[]{ 0, 0, 0, END_OF_CHANNEL_STATE_EVENT }); } else if (eventClass == EndOfData.class) { return ByteBuffer.wrap(new byte[]{ 0, 0, 0, END_OF_USER_RECORDS_EVENT, ((byte) (((EndOfData) (event)).getStopMode().ordinal())) }); } else if (eventClass == CancelCheckpointMarker.class) { CancelCheckpointMarker marker = ((CancelCheckpointMarker) (event)); ByteBuffer buf = ByteBuffer.allocate(12); buf.putInt(0, CANCEL_CHECKPOINT_MARKER_EVENT); buf.putLong(4, marker.getCheckpointId()); return buf; } else if (eventClass == EventAnnouncement.class) { EventAnnouncement announcement = ((EventAnnouncement) (event)); ByteBuffer serializedAnnouncedEvent = toSerializedEvent(announcement.getAnnouncedEvent()); ByteBuffer serializedAnnouncement = ByteBuffer.allocate((2 * Integer.BYTES) + serializedAnnouncedEvent.capacity()); serializedAnnouncement.putInt(0, ANNOUNCEMENT_EVENT); serializedAnnouncement.putInt(4, announcement.getSequenceNumber()); serializedAnnouncement.position(8); serializedAnnouncement.put(serializedAnnouncedEvent); serializedAnnouncement.flip(); return serializedAnnouncement; } else if (eventClass == SubtaskConnectionDescriptor.class) { SubtaskConnectionDescriptor v6 = ((SubtaskConnectionDescriptor) (event)); ByteBuffer buf = ByteBuffer.allocate(12); buf.putInt(VIRTUAL_CHANNEL_SELECTOR_EVENT); buf.putInt(v6.getInputSubtaskIndex()); buf.putInt(v6.getOutputSubtaskIndex()); buf.flip(); return buf; } else if (eventClass == EndOfSegmentEvent.class) { return ByteBuffer.wrap(new byte[]{ 0, 0, 0, END_OF_SEGMENT }); } else { try { final DataOutputSerializer serializer = new DataOutputSerializer(128); serializer.writeInt(OTHER_EVENT); serializer.writeUTF(event.getClass().getName()); event.write(serializer); return serializer.wrapAsByteBuffer(); } catch (IOException e) { throw new IOException("Error while serializing event.", e); } } }
3.26
flink_KeyGroupRange_of_rdh
/** * Factory method that also handles creation of empty key-groups. * * @param startKeyGroup * start of the range (inclusive) * @param endKeyGroup * end of the range (inclusive) * @return the key-group from start to end or an empty key-group range. */ public static KeyGroupRange of(int startKeyGroup, int endKeyGroup) { return startKeyGroup <= endKeyGroup ? new KeyGroupRange(startKeyGroup, endKeyGroup) : EMPTY_KEY_GROUP_RANGE; }
3.26
flink_KeyGroupRange_getStartKeyGroup_rdh
/** * * @return The first key-group in the range. */ public int getStartKeyGroup() { return startKeyGroup; }
3.26
flink_KeyGroupRange_contains_rdh
/** * Checks whether or not a single key-group is contained in the range. * * @param keyGroup * Key-group to check for inclusion. * @return True, only if the key-group is in the range. */ @Override public boolean contains(int keyGroup) { return (keyGroup >= startKeyGroup) && (keyGroup <= endKeyGroup); }
3.26
flink_KeyGroupRange_getIntersection_rdh
/** * Create a range that represent the intersection between this range and the given range. * * @param other * A KeyGroupRange to intersect. * @return Key-group range that is the intersection between this and the given key-group range. */public KeyGroupRange getIntersection(KeyGroupRange other) { int start = Math.max(startKeyGroup, other.startKeyGroup); int end = Math.min(endKeyGroup, other.endKeyGroup); return start <= end ? new KeyGroupRange(start, end) : EMPTY_KEY_GROUP_RANGE; }
3.26
flink_KeyGroupRange_getNumberOfKeyGroups_rdh
/** * * @return The number of key-groups in the range */ @Override public int getNumberOfKeyGroups() { return (1 + endKeyGroup) - startKeyGroup; }
3.26
flink_KeyGroupRange_getEndKeyGroup_rdh
/** * * @return The last key-group in the range. */ public int getEndKeyGroup() { return endKeyGroup; }
3.26
flink_OutputFileConfig_getPartPrefix_rdh
/** * The prefix for the part name. */ public String getPartPrefix() {return partPrefix; }
3.26
flink_OutputFileConfig_getPartSuffix_rdh
/** * The suffix for the part name. */ public String getPartSuffix() { return partSuffix; }
3.26
flink_DefaultJobLeaderIdService_m0_rdh
/** * Checks whether the service has been started. * * @return True if the service has been started; otherwise false */ public boolean m0() { return jobLeaderIdActions != null; }
3.26
flink_HsResultPartition_setupInternal_rdh
// Called by task thread. @Override protected void setupInternal() throws IOException { if (isReleased()) { throw new IOException("Result partition has been released."); } this.fileDataManager.setup(); this.memoryDataManager = new HsMemoryDataManager(isBroadcastOnly ? 1 : numSubpartitions, networkBufferSize, bufferPool, getSpillingStrategy(hybridShuffleConfiguration), dataIndex, dataFilePath, bufferCompressor, hybridShuffleConfiguration.getBufferPoolSizeCheckIntervalMs()); }
3.26
flink_FlinkAggregateExpandDistinctAggregatesRule_convertSingletonDistinct_rdh
/** * Converts an aggregate with one distinct aggregate and one or more non-distinct aggregates to * multi-phase aggregates (see reference example below). * * @param relBuilder * Contains the input relational expression * @param aggregate * Original aggregate * @param argLists * Arguments and filters to the distinct aggregate function */ private RelBuilder convertSingletonDistinct(RelBuilder relBuilder, Aggregate aggregate, Set<Pair<List<Integer>, Integer>> argLists) { // In this case, we are assuming that there is a single distinct function. // So make sure that argLists is of size one. Preconditions.checkArgument(argLists.size() == 1); // For example, // SELECT deptno, COUNT(*), SUM(bonus), MIN(DISTINCT sal) // FROM emp // GROUP BY deptno // // becomes // // SELECT deptno, SUM(cnt), SUM(bonus), MIN(sal) // FROM ( // SELECT deptno, COUNT(*) as cnt, SUM(bonus), sal // FROM EMP // GROUP BY deptno, sal) // Aggregate B // GROUP BY deptno // Aggregate A relBuilder.push(aggregate.getInput()); final List<AggregateCall> originalAggCalls = aggregate.getAggCallList(); final ImmutableBitSet originalGroupSet = aggregate.getGroupSet(); // Add the distinct aggregate column(s) to the group-by columns, // if not already a part of the group-by final SortedSet<Integer> bottomGroupSet = new TreeSet<>(); bottomGroupSet.addAll(aggregate.getGroupSet().asList()); for (AggregateCall aggCall : originalAggCalls) {if (aggCall.isDistinct()) { bottomGroupSet.addAll(aggCall.getArgList());break;// since we only have single distinct call } } // Generate the intermediate aggregate B, the one on the bottom that converts // a distinct call to group by call. // Bottom aggregate is the same as the original aggregate, except that // the bottom aggregate has converted the DISTINCT aggregate to a group by clause. final List<AggregateCall> bottomAggregateCalls = new ArrayList<>(); for (AggregateCall aggCall : originalAggCalls) { // Project the column corresponding to the distinct aggregate. Project // as-is all the non-distinct aggregates if (!aggCall.isDistinct()) { final AggregateCall newCall = AggregateCall.create(aggCall.getAggregation(), false, aggCall.isApproximate(), false, aggCall.getArgList(), -1, aggCall.distinctKeys, RelCollations.EMPTY, ImmutableBitSet.of(bottomGroupSet).cardinality(), relBuilder.peek(), null, aggCall.name); bottomAggregateCalls.add(newCall); } } // Generate the aggregate B (see the reference example above) relBuilder.push(aggregate.copy(aggregate.getTraitSet(), relBuilder.build(), ImmutableBitSet.of(bottomGroupSet), null, bottomAggregateCalls)); // Add aggregate A (see the reference example above), the top aggregate // to handle the rest of the aggregation that the bottom aggregate hasn't handled final List<AggregateCall> topAggregateCalls = Lists.newArrayList(); // Use the remapped arguments for the (non)distinct aggregate calls int nonDistinctAggCallProcessedSoFar = 0; for (AggregateCall aggCall : originalAggCalls) { final AggregateCall newCall; if (aggCall.isDistinct()) { List<Integer> newArgList = new ArrayList<>(); for (int arg : aggCall.getArgList()) { newArgList.add(bottomGroupSet.headSet(arg).size()); } newCall = AggregateCall.create(aggCall.getAggregation(), false, aggCall.isApproximate(), false, newArgList, -1, aggCall.distinctKeys, RelCollations.EMPTY, originalGroupSet.cardinality(), relBuilder.peek(), aggCall.getType(), aggCall.name); } else { // If aggregate B had a COUNT aggregate call the corresponding aggregate at // aggregate A must be SUM. For other aggregates, it remains the same. final List<Integer> newArgs = Lists.newArrayList(bottomGroupSet.size() + nonDistinctAggCallProcessedSoFar); if (aggCall.getAggregation().getKind() == SqlKind.COUNT) { newCall = AggregateCall.create(new SqlSumEmptyIsZeroAggFunction(), false, aggCall.isApproximate(), false, newArgs, -1, aggCall.distinctKeys, RelCollations.EMPTY, originalGroupSet.cardinality(), relBuilder.peek(), aggCall.getType(), aggCall.getName()); } else { newCall = AggregateCall.create(aggCall.getAggregation(), false, aggCall.isApproximate(), false, newArgs, -1, aggCall.distinctKeys, RelCollations.EMPTY, originalGroupSet.cardinality(), relBuilder.peek(), aggCall.getType(), aggCall.name); } nonDistinctAggCallProcessedSoFar++; } topAggregateCalls.add(newCall); } // Populate the group-by keys with the remapped arguments for aggregate A // The top groupset is basically an identity (first X fields of aggregate B's // output), minus the distinct aggCall's input. final Set<Integer> topGroupSet = new HashSet<>(); int groupSetToAdd = 0; for (int bottomGroup : bottomGroupSet) {if (originalGroupSet.get(bottomGroup)) { topGroupSet.add(groupSetToAdd); } groupSetToAdd++; } relBuilder.push(aggregate.copy(aggregate.getTraitSet(), relBuilder.build(), ImmutableBitSet.of(topGroupSet), null, topAggregateCalls));return relBuilder; }
3.26
flink_FlinkAggregateExpandDistinctAggregatesRule_createSelectDistinct_rdh
/** * Given an {@link org.apache.calcite.rel.core.Aggregate} and the ordinals of the arguments to a * particular call to an aggregate function, creates a 'select distinct' relational expression * which projects the group columns and those arguments but nothing else. * * <p>For example, given * * <blockquote> * * <pre>select f0, count(distinct f1), count(distinct f2) * from t group by f0</pre> * * </blockquote> * * <p>and the argument list * * <blockquote> * * {2} * * </blockquote> * * <p>returns * * <blockquote> * * <pre>select distinct f0, f2 from t</pre> * * </blockquote> * * <p>The <code>sourceOf</code> map is populated with the source of each column; in this case * sourceOf.get(0) = 0, and sourceOf.get(1) = 2. * * @param relBuilder * Relational expression builder * @param aggregate * Aggregate relational expression * @param argList * Ordinals of columns to make distinct * @param filterArg * Ordinal of column to filter on, or -1 * @param sourceOf * Out parameter, is populated with a map of where each output field came from * @return Aggregate relational expression which projects the required columns */ private RelBuilder createSelectDistinct(RelBuilder relBuilder, Aggregate aggregate, List<Integer> argList, int filterArg, Map<Integer, Integer> sourceOf) { relBuilder.push(aggregate.getInput());final List<Pair<RexNode, String>> projects = new ArrayList<>(); final List<RelDataTypeField> childFields = relBuilder.peek().getRowType().getFieldList(); for (int i : aggregate.getGroupSet()) { sourceOf.put(i, projects.size()); projects.add(RexInputRef.of2(i, childFields)); } if (filterArg >= 0) { sourceOf.put(filterArg, projects.size()); projects.add(RexInputRef.of2(filterArg, childFields)); } for (Integer arg : argList) { if (filterArg >= 0) { // Implement // agg(DISTINCT arg) FILTER $f // by generating // SELECT DISTINCT ... CASE WHEN $f THEN arg ELSE NULL END AS arg // and then applying // agg(arg) // as usual. // // It works except for (rare) agg functions that need to see null // values. final RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder(); final RexInputRef filterRef = RexInputRef.of(filterArg, childFields); final Pair<RexNode, String> argRef = RexInputRef.of2(arg, childFields); RexNode condition = rexBuilder.makeCall(SqlStdOperatorTable.CASE, filterRef, argRef.left, rexBuilder.makeNullLiteral(argRef.left.getType())); sourceOf.put(arg, projects.size()); projects.add(Pair.of(condition, "i$" + argRef.right)); continue; } if (sourceOf.get(arg) != null) { continue; } sourceOf.put(arg, projects.size()); projects.add(RexInputRef.of2(arg, childFields)); } relBuilder.project(Pair.left(projects), Pair.right(projects)); // Get the distinct values of the GROUP BY fields and the arguments // to the agg functions. relBuilder.push(aggregate.copy(aggregate.getTraitSet(), relBuilder.build(), ImmutableBitSet.range(projects.size()), null, ImmutableList.<AggregateCall>of())); return relBuilder; }
3.26
flink_FlinkAggregateExpandDistinctAggregatesRule_convertMonopole_rdh
/** * Converts an aggregate relational expression that contains just one distinct aggregate * function (or perhaps several over the same arguments) and no non-distinct aggregate * functions. */private RelBuilder convertMonopole(RelBuilder relBuilder, Aggregate aggregate, List<Integer> argList, int filterArg) { // For example, // SELECT deptno, COUNT(DISTINCT sal), SUM(DISTINCT sal) // FROM emp // GROUP BY deptno // // becomes // // SELECT deptno, COUNT(distinct_sal), SUM(distinct_sal) // FROM ( // SELECT DISTINCT deptno, sal AS distinct_sal // FROM EMP GROUP BY deptno) // GROUP BY deptno // Project the columns of the GROUP BY plus the arguments // to the agg function. final Map<Integer, Integer> sourceOf = new HashMap<>(); createSelectDistinct(relBuilder, aggregate, argList, filterArg, sourceOf); // Create an aggregate on top, with the new aggregate list. final List<AggregateCall> newAggCalls = Lists.newArrayList(aggregate.getAggCallList()); rewriteAggCalls(newAggCalls, argList, sourceOf);final int cardinality = aggregate.getGroupSet().cardinality(); relBuilder.push(aggregate.copy(aggregate.getTraitSet(), relBuilder.build(), ImmutableBitSet.range(cardinality), null, newAggCalls)); return relBuilder; }
3.26
flink_FlinkAggregateExpandDistinctAggregatesRule_doRewrite_rdh
/** * Converts all distinct aggregate calls to a given set of arguments. * * <p>This method is called several times, one for each set of arguments. Each time it is * called, it generates a JOIN to a new SELECT DISTINCT relational expression, and modifies the * set of top-level calls. * * @param aggregate * Original aggregate * @param n * Ordinal of this in a join. {@code relBuilder} contains the input relational * expression (either the original aggregate, the output from the previous call to this * method. {@code n} is 0 if we're converting the first distinct aggregate in a query with * no non-distinct aggregates) * @param argList * Arguments to the distinct aggregate function * @param filterArg * Argument that filters input to aggregate function, or -1 * @param refs * Array of expressions which will be the projected by the result of this rule. * Those relating to this arg list will be modified @return Relational expression */ private void doRewrite(RelBuilder relBuilder, Aggregate aggregate, int n, List<Integer> argList, int filterArg, List<RexInputRef> refs) { final RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder(); final List<RelDataTypeField> leftFields; if (n == 0) { leftFields = null; } else {leftFields = relBuilder.peek().getRowType().getFieldList();} // Aggregate( // child, // {COUNT(DISTINCT 1), SUM(DISTINCT 1), SUM(2)}) // // becomes // // Aggregate( // Join( // child, // Aggregate(child, < all columns > {}), // INNER, // <f2 = f5>)) // // E.g. // SELECT deptno, SUM(DISTINCT sal), COUNT(DISTINCT gender), MAX(age) // FROM Emps // GROUP BY deptno // // becomes // // SELECT e.deptno, adsal.sum_sal, adgender.count_gender, e.max_age // FROM ( // SELECT deptno, MAX(age) as max_age // FROM Emps GROUP BY deptno) AS e // JOIN ( // SELECT deptno, COUNT(gender) AS count_gender FROM ( // SELECT DISTINCT deptno, gender FROM Emps) AS dgender // GROUP BY deptno) AS adgender // ON e.deptno = adgender.deptno // JOIN ( // SELECT deptno, SUM(sal) AS sum_sal FROM ( // SELECT DISTINCT deptno, sal FROM Emps) AS dsal // GROUP BY deptno) AS adsal // ON e.deptno = adsal.deptno // GROUP BY e.deptno // // Note that if a query contains no non-distinct aggregates, then the // very first join/group by is omitted. In the example above, if // MAX(age) is removed, then the sub-select of "e" is not needed, and // instead the two other group by's are joined to one another. // Project the columns of the GROUP BY plus the arguments // to the agg function. final Map<Integer, Integer> sourceOf = new HashMap<>(); createSelectDistinct(relBuilder, aggregate, argList, filterArg, sourceOf); // Now compute the aggregate functions on top of the distinct dataset. // Each distinct agg becomes a non-distinct call to the corresponding // field from the right; for example, // "COUNT(DISTINCT e.sal)" // becomes // "COUNT(distinct_e.sal)". final List<AggregateCall> aggCallList = new ArrayList<>(); final List<AggregateCall> v96 = aggregate.getAggCallList(); final int groupCount = aggregate.getGroupCount(); int i = groupCount - 1;for (AggregateCall aggCall : v96) { ++i; // Ignore agg calls which are not distinct or have the wrong set // arguments. If we're rewriting aggs whose args are {sal}, we will // rewrite COUNT(DISTINCT sal) and SUM(DISTINCT sal) but ignore // COUNT(DISTINCT gender) or SUM(sal). if (!aggCall.isDistinct()) { continue; } if (!aggCall.getArgList().equals(argList)) { continue; } // Re-map arguments. final int argCount = aggCall.getArgList().size(); final List<Integer> newArgs = new ArrayList<>(argCount); for (int v102 = 0; v102 < argCount; v102++) { final Integer arg = aggCall.getArgList().get(v102); newArgs.add(sourceOf.get(arg)); } final int newFilterArg = (aggCall.filterArg >= 0) ? sourceOf.get(aggCall.filterArg) : -1; final AggregateCall newAggCall = AggregateCall.create(aggCall.getAggregation(), false, aggCall.isApproximate(), false, newArgs, newFilterArg, null, RelCollations.EMPTY, aggCall.getType(), aggCall.getName()); assert refs.get(i) == null; if (n == 0) { refs.set(i, new RexInputRef(groupCount + aggCallList.size(), newAggCall.getType())); } else { refs.set(i, new RexInputRef((leftFields.size() + groupCount) + aggCallList.size(), newAggCall.getType())); } aggCallList.add(newAggCall); } final Map<Integer, Integer> map = new HashMap<>(); for (Integer key : aggregate.getGroupSet()) { map.put(key, map.size()); } final ImmutableBitSet newGroupSet = aggregate.getGroupSet().permute(map);assert newGroupSet.equals(ImmutableBitSet.range(aggregate.getGroupSet().cardinality())); relBuilder.push(aggregate.copy(aggregate.getTraitSet(), relBuilder.build(), newGroupSet, null, aggCallList)); // If there's no left child yet, no need to create the join if (n == 0) { return; } // Create the join condition. It is of the form // 'left.f0 = right.f0 and left.f1 = right.f1 and ...' // where {f0, f1, ...} are the GROUP BY fields. final List<RelDataTypeField> distinctFields = relBuilder.peek().getRowType().getFieldList(); final List<RexNode> conditions = Lists.newArrayList();for (i = 0; i < groupCount; ++i) { // null values form its own group // use "is not distinct from" so that the join condition // allows null values to match. conditions.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, RexInputRef.of(i, leftFields), new RexInputRef(leftFields.size() + i, distinctFields.get(i).getType()))); } // Join in the new 'select distinct' relation. relBuilder.join(JoinRelType.INNER, conditions); }
3.26
flink_FlinkAggregateExpandDistinctAggregatesRule_onMatch_rdh
// ~ Methods ---------------------------------------------------------------- public void onMatch(RelOptRuleCall call) { final Aggregate aggregate = call.rel(0);if (!AggregateUtil.containsAccurateDistinctCall(aggregate.getAggCallList())) { return; } // Check unsupported aggregate which contains both approximate distinct call and // accurate distinct call. if (AggregateUtil.containsApproximateDistinctCall(aggregate.getAggCallList())) { throw new TableException("There are both Distinct AggCall and Approximate Distinct AggCall in one sql statement, " + "it is not supported yet.\nPlease choose one of them."); } // If this aggregate is a non-simple aggregate(e.g. CUBE, ROLLUP) // and contains distinct calls, it should be transformed to simple aggregate first // by DecomposeGroupingSetsRule. Then this rule expands it's distinct aggregates. if (aggregate.getGroupSets().size() > 1) { return; } // Find all of the agg expressions. We use a LinkedHashSet to ensure determinism. // Find all aggregate calls without distinct int nonDistinctAggCallCount = 0; // Find all aggregate calls without distinct but ignore MAX, MIN, BIT_AND, BIT_OR int nonDistinctAggCallExcludingIgnoredCount = 0; int filterCount = 0; int unsupportedNonDistinctAggCallCount = 0; final Set<Pair<List<Integer>, Integer>> argLists = new LinkedHashSet<>(); for (AggregateCall aggCall : aggregate.getAggCallList()) { if (aggCall.filterArg >= 0) { ++filterCount; } if (!aggCall.isDistinct()) { ++nonDistinctAggCallCount; final SqlKind aggCallKind = aggCall.getAggregation().getKind(); // We only support COUNT/SUM/MIN/MAX for the "single" count distinct optimization switch (aggCallKind) { case COUNT : case SUM : case SUM0 : case MIN : case MAX : break; default : ++unsupportedNonDistinctAggCallCount; } if (aggCall.getAggregation().getDistinctOptionality() == Optionality.IGNORED) { argLists.add(Pair.of(aggCall.getArgList(), aggCall.filterArg)); } else { ++nonDistinctAggCallExcludingIgnoredCount; } } else { argLists.add(Pair.of(aggCall.getArgList(), aggCall.filterArg)); } } final int distinctAggCallCount = aggregate.getAggCallList().size() - nonDistinctAggCallCount;Preconditions.checkState(argLists.size() > 0, "containsDistinctCall lied"); // If all of the agg expressions are distinct and have the same // arguments then we can use a more efficient form. // MAX, MIN, BIT_AND, BIT_OR always ignore distinct attribute, // when they are mixed in with other distinct agg calls, // we can still use this promotion. if (((nonDistinctAggCallExcludingIgnoredCount == 0) && (argLists.size() == 1)) && (aggregate.getGroupType() == Group.SIMPLE)) { final Pair<List<Integer>, Integer> pair = Iterables.getOnlyElement(argLists); final RelBuilder relBuilder = call.builder(); convertMonopole(relBuilder, aggregate, pair.left, pair.right); call.transformTo(relBuilder.build()); return; } if (useGroupingSets) { rewriteUsingGroupingSets(call, aggregate); return; } // If only one distinct aggregate and one or more non-distinct aggregates, // we can generate multi-phase aggregates if ((((distinctAggCallCount == 1)// one distinct aggregate && (filterCount == 0))// no filter && (unsupportedNonDistinctAggCallCount == 0))// sum/min/max/count in non-distinct aggregate && (nonDistinctAggCallCount > 0)) { // one or more non-distinct aggregates final RelBuilder relBuilder = call.builder(); convertSingletonDistinct(relBuilder, aggregate, argLists); call.transformTo(relBuilder.build()); return; } // Create a list of the expressions which will yield the final result. // Initially, the expressions point to the input field. final List<RelDataTypeField> aggFields = aggregate.getRowType().getFieldList(); final List<RexInputRef> refs = new ArrayList<>(); final List<String> fieldNames = aggregate.getRowType().getFieldNames(); final ImmutableBitSet groupSet = aggregate.getGroupSet(); final int groupCount = aggregate.getGroupCount(); for (int i : Util.range(groupCount)) { refs.add(RexInputRef.of(i, aggFields));} // Aggregate the original relation, including any non-distinct aggregates. final List<AggregateCall> newAggCallList = new ArrayList<>(); int i = -1; for (AggregateCall aggCall : aggregate.getAggCallList()) { ++i; if (aggCall.isDistinct()) { refs.add(null); continue; } refs.add(new RexInputRef(groupCount + newAggCallList.size(), aggFields.get(groupCount + i).getType())); newAggCallList.add(aggCall); } // In the case where there are no non-distinct aggregates (regardless of // whether there are group bys), there's no need to generate the // extra aggregate and join. final RelBuilder v21 = call.builder(); v21.push(aggregate.getInput()); int v22 = 0; if (!newAggCallList.isEmpty()) { final RelBuilder.GroupKey groupKey = v21.groupKey(groupSet, aggregate.getGroupSets()); v21.aggregate(groupKey, newAggCallList); ++v22; } // For each set of operands, find and rewrite all calls which have that // set of operands. for (Pair<List<Integer>, Integer> argList : argLists) { doRewrite(v21, aggregate, v22++, argList.left, argList.right, refs); } v21.project(refs, fieldNames); call.transformTo(v21.build()); }
3.26
flink_TypedResult_empty_rdh
// -------------------------------------------------------------------------------------------- public static <T> TypedResult<T> empty() { return new TypedResult<>(ResultType.EMPTY, null); }
3.26
flink_ParquetVectorizedInputFormat_clipParquetSchema_rdh
/** * Clips `parquetSchema` according to `fieldNames`. */ private MessageType clipParquetSchema(GroupType parquetSchema, Collection<Integer> unknownFieldsIndices) { Type[] types = new Type[projectedFields.length]; if (isCaseSensitive) {for (int i = 0; i < projectedFields.length; ++i) { String fieldName = projectedFields[i]; if (!parquetSchema.containsField(fieldName)) {LOG.warn("{} does not exist in {}, will fill the field with null.", fieldName, parquetSchema); types[i] = ParquetSchemaConverter.convertToParquetType(fieldName, projectedTypes[i]); unknownFieldsIndices.add(i); } else { types[i] = parquetSchema.getType(fieldName); } } } else { Map<String, Type> caseInsensitiveFieldMap = new HashMap<>(); for (Type type : parquetSchema.getFields()) { caseInsensitiveFieldMap.compute(type.getName().toLowerCase(Locale.ROOT), (key, previousType) -> { if (previousType != null) {throw new FlinkRuntimeException("Parquet with case insensitive mode should have no duplicate key: " + key); }return type; }); } for (int i = 0; i < projectedFields.length; ++i) { Type type = caseInsensitiveFieldMap.get(projectedFields[i].toLowerCase(Locale.ROOT));if (type == null) { LOG.warn("{} does not exist in {}, will fill the field with null.", projectedFields[i], parquetSchema); type = ParquetSchemaConverter.convertToParquetType(projectedFields[i].toLowerCase(Locale.ROOT), projectedTypes[i]); unknownFieldsIndices.add(i); } // TODO clip for array,map,row types. types[i] = type; } } return Types.buildMessage().addFields(types).named("flink-parquet"); }
3.26
flink_ParquetVectorizedInputFormat_createReadableVectors_rdh
/** * Create readable vectors from writable vectors. Especially for decimal, see {@link ParquetDecimalVector}. */private ColumnVector[] createReadableVectors(WritableColumnVector[] writableVectors) {ColumnVector[] vectors = new ColumnVector[writableVectors.length]; for (int i = 0; i < writableVectors.length; i++) { vectors[i] = (projectedTypes[i].getTypeRoot() == LogicalTypeRoot.DECIMAL) ? new ParquetDecimalVector(writableVectors[i]) : writableVectors[i]; } return vectors; }
3.26
flink_ParquetVectorizedInputFormat_nextBatch_rdh
/** * Advances to the next batch of rows. Returns false if there are no more. */ private boolean nextBatch(ParquetReaderBatch<T> batch) throws IOException { for (WritableColumnVector v : batch.writableVectors) { v.reset(); } batch.columnarBatch.setNumRows(0); if (rowsReturned >= totalRowCount) { return false; } if (rowsReturned == totalCountLoadedSoFar) { m0(); } int num = ((int) (Math.min(batchSize, totalCountLoadedSoFar - rowsReturned))); for (int i = 0; i < columnReaders.length; ++i) { if (columnReaders[i] == null) { batch.writableVectors[i].fillWithNulls(); } else { // noinspection unchecked columnReaders[i].readToVector(num, batch.writableVectors[i]); } } rowsReturned += num; batch.columnarBatch.setNumRows(num); return true; }
3.26
flink_SupportsRowLevelUpdate_getRowLevelUpdateMode_rdh
/** * Planner will rewrite the update statement to query base on the {@link RowLevelUpdateMode}, keeping the query of update unchanged by default(in `UPDATED_ROWS` * mode), or changing the query to union the updated rows and the other rows (in `ALL_ROWS` * mode). * * <p>Take the following SQL as an example: * * <pre>{@code UPDATE t SET x = 1 WHERE y = 2;}</pre> * * <p>If returns {@link RowLevelUpdateMode#UPDATED_ROWS}, the sink will get the update after * rows which match the filter [y = 2]. * * <p>If returns {@link RowLevelUpdateMode#ALL_ROWS}, the sink will get both the update * after rows which match the filter [y = 2] and the other rows that don't match the filter * [y = 2]. * * <p>Note: All rows will have RowKind#UPDATE_AFTER when RowLevelUpdateMode is UPDATED_ROWS, * and RowKind#INSERT when RowLevelUpdateMode is ALL_ROWS. */ default RowLevelUpdateMode getRowLevelUpdateMode() { return RowLevelUpdateMode.UPDATED_ROWS; }
3.26
flink_TableSinkBase_getFieldTypes_rdh
/** * Returns the field types of the table to emit. */ @Override public TypeInformation<?>[] getFieldTypes() { if (fieldTypes.isPresent()) { return fieldTypes.get(); } else { throw new IllegalStateException("Table sink must be configured to retrieve field types."); } } /** * Returns a copy of this {@link TableSink} configured with the field names and types of the * table to emit. * * @param fieldNames * The field names of the table to emit. * @param fieldTypes * The field types of the table to emit. * @return A copy of this {@link TableSink}
3.26
flink_TableSinkBase_getFieldNames_rdh
/** * Returns the field names of the table to emit. */ @Override public String[] getFieldNames() { if (fieldNames.isPresent()) { return fieldNames.get(); } else { throw new IllegalStateException("Table sink must be configured to retrieve field names."); }}
3.26
flink_WindowReader_process_rdh
/** * Reads window state generated without any preaggregation such as {@code WindowedStream#apply} * and {@code WindowedStream#process}. * * @param uid * The uid of the operator. * @param readerFunction * The window reader function. * @param keyType * The key type of the window. * @param stateType * The type of records stored in state. * @param outputType * The output type of the reader function. * @param <K> * The type of the key. * @param <T> * The type of the records stored in state. * @param <OUT> * The output type of the reader function. * @return A {@code DataSet} of objects read from keyed state. * @throws IOException * If the savepoint does not contain the specified uid. */ public <K, T, OUT> DataSource<OUT> process(String uid, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> stateType, TypeInformation<OUT> outputType) throws IOException {WindowReaderOperator<?, K, T, W, OUT> operator = WindowReaderOperator.process(readerFunction, keyType, windowSerializer, stateType); return readWindowOperator(uid, outputType, operator); }
3.26