code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
protected void emitRecord(T record, KafkaTopicPartitionState<KPH> partitionState, long offset) throws Exception { if (record != null) { if (timestampWatermarkMode == NO_TIMESTAMPS_WATERMARKS) { // fast path logic, in case there are no watermarks // emit the record, using the checkpoint lock to guarantee // atomicity of record emission and offset state update synchronized (checkpointLock) { sourceContext.collect(record); partitionState.setOffset(offset); } } else if (timestampWatermarkMode == PERIODIC_WATERMARKS) { emitRecordWithTimestampAndPeriodicWatermark(record, partitionState, offset, Long.MIN_VALUE); } else { emitRecordWithTimestampAndPunctuatedWatermark(record, partitionState, offset, Long.MIN_VALUE); } } else { // if the record is null, simply just update the offset state for partition synchronized (checkpointLock) { partitionState.setOffset(offset); } } }
Emits a record without attaching an existing timestamp to it. <p>Implementation Note: This method is kept brief to be JIT inlining friendly. That makes the fast path efficient, the extended paths are called as separate methods. @param record The record to emit @param partitionState The state of the Kafka partition from which the record was fetched @param offset The offset of the record
protected void emitRecordWithTimestamp( T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long timestamp) throws Exception { if (record != null) { if (timestampWatermarkMode == NO_TIMESTAMPS_WATERMARKS) { // fast path logic, in case there are no watermarks generated in the fetcher // emit the record, using the checkpoint lock to guarantee // atomicity of record emission and offset state update synchronized (checkpointLock) { sourceContext.collectWithTimestamp(record, timestamp); partitionState.setOffset(offset); } } else if (timestampWatermarkMode == PERIODIC_WATERMARKS) { emitRecordWithTimestampAndPeriodicWatermark(record, partitionState, offset, timestamp); } else { emitRecordWithTimestampAndPunctuatedWatermark(record, partitionState, offset, timestamp); } } else { // if the record is null, simply just update the offset state for partition synchronized (checkpointLock) { partitionState.setOffset(offset); } } }
Emits a record attaching a timestamp to it. <p>Implementation Note: This method is kept brief to be JIT inlining friendly. That makes the fast path efficient, the extended paths are called as separate methods. @param record The record to emit @param partitionState The state of the Kafka partition from which the record was fetched @param offset The offset of the record
private void emitRecordWithTimestampAndPeriodicWatermark( T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long kafkaEventTimestamp) { @SuppressWarnings("unchecked") final KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH> withWatermarksState = (KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH>) partitionState; // extract timestamp - this accesses/modifies the per-partition state inside the // watermark generator instance, so we need to lock the access on the // partition state. concurrent access can happen from the periodic emitter final long timestamp; //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized (withWatermarksState) { timestamp = withWatermarksState.getTimestampForRecord(record, kafkaEventTimestamp); } // emit the record with timestamp, using the usual checkpoint lock to guarantee // atomicity of record emission and offset state update synchronized (checkpointLock) { sourceContext.collectWithTimestamp(record, timestamp); partitionState.setOffset(offset); } }
Record emission, if a timestamp will be attached from an assigner that is also a periodic watermark generator.
private void emitRecordWithTimestampAndPunctuatedWatermark( T record, KafkaTopicPartitionState<KPH> partitionState, long offset, long kafkaEventTimestamp) { @SuppressWarnings("unchecked") final KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> withWatermarksState = (KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH>) partitionState; // only one thread ever works on accessing timestamps and watermarks // from the punctuated extractor final long timestamp = withWatermarksState.getTimestampForRecord(record, kafkaEventTimestamp); final Watermark newWatermark = withWatermarksState.checkAndGetNewWatermark(record, timestamp); // emit the record with timestamp, using the usual checkpoint lock to guarantee // atomicity of record emission and offset state update synchronized (checkpointLock) { sourceContext.collectWithTimestamp(record, timestamp); partitionState.setOffset(offset); } // if we also have a new per-partition watermark, check if that is also a // new cross-partition watermark if (newWatermark != null) { updateMinPunctuatedWatermark(newWatermark); } }
Record emission, if a timestamp will be attached from an assigner that is also a punctuated watermark generator.
private void updateMinPunctuatedWatermark(Watermark nextWatermark) { if (nextWatermark.getTimestamp() > maxWatermarkSoFar) { long newMin = Long.MAX_VALUE; for (KafkaTopicPartitionState<?> state : subscribedPartitionStates) { @SuppressWarnings("unchecked") final KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> withWatermarksState = (KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH>) state; newMin = Math.min(newMin, withWatermarksState.getCurrentPartitionWatermark()); } // double-check locking pattern if (newMin > maxWatermarkSoFar) { synchronized (checkpointLock) { if (newMin > maxWatermarkSoFar) { maxWatermarkSoFar = newMin; sourceContext.emitWatermark(new Watermark(newMin)); } } } } }
Checks whether a new per-partition watermark is also a new cross-partition watermark.
private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( Map<KafkaTopicPartition, Long> partitionsToInitialOffsets, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { // CopyOnWrite as adding discovered partitions could happen in parallel // while different threads iterate the partitions list List<KafkaTopicPartitionState<KPH>> partitionStates = new CopyOnWriteArrayList<>(); switch (timestampWatermarkMode) { case NO_TIMESTAMPS_WATERMARKS: { for (Map.Entry<KafkaTopicPartition, Long> partitionEntry : partitionsToInitialOffsets.entrySet()) { // create the kafka version specific partition handle KPH kafkaHandle = createKafkaPartitionHandle(partitionEntry.getKey()); KafkaTopicPartitionState<KPH> partitionState = new KafkaTopicPartitionState<>(partitionEntry.getKey(), kafkaHandle); partitionState.setOffset(partitionEntry.getValue()); partitionStates.add(partitionState); } return partitionStates; } case PERIODIC_WATERMARKS: { for (Map.Entry<KafkaTopicPartition, Long> partitionEntry : partitionsToInitialOffsets.entrySet()) { KPH kafkaHandle = createKafkaPartitionHandle(partitionEntry.getKey()); AssignerWithPeriodicWatermarks<T> assignerInstance = watermarksPeriodic.deserializeValue(userCodeClassLoader); KafkaTopicPartitionStateWithPeriodicWatermarks<T, KPH> partitionState = new KafkaTopicPartitionStateWithPeriodicWatermarks<>( partitionEntry.getKey(), kafkaHandle, assignerInstance); partitionState.setOffset(partitionEntry.getValue()); partitionStates.add(partitionState); } return partitionStates; } case PUNCTUATED_WATERMARKS: { for (Map.Entry<KafkaTopicPartition, Long> partitionEntry : partitionsToInitialOffsets.entrySet()) { KPH kafkaHandle = createKafkaPartitionHandle(partitionEntry.getKey()); AssignerWithPunctuatedWatermarks<T> assignerInstance = watermarksPunctuated.deserializeValue(userCodeClassLoader); KafkaTopicPartitionStateWithPunctuatedWatermarks<T, KPH> partitionState = new KafkaTopicPartitionStateWithPunctuatedWatermarks<>( partitionEntry.getKey(), kafkaHandle, assignerInstance); partitionState.setOffset(partitionEntry.getValue()); partitionStates.add(partitionState); } return partitionStates; } default: // cannot happen, add this as a guard for the future throw new RuntimeException(); } }
Utility method that takes the topic partitions and creates the topic partition state holders, depending on the timestamp / watermark mode.
private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarksPeriodic, watermarksPunctuated, userCodeClassLoader); }
Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)} that uses the same offset for all partitions when creating their state holders.
private void registerOffsetMetrics( MetricGroup consumerMetricGroup, List<KafkaTopicPartitionState<KPH>> partitionOffsetStates) { for (KafkaTopicPartitionState<KPH> ktp : partitionOffsetStates) { MetricGroup topicPartitionGroup = consumerMetricGroup .addGroup(OFFSETS_BY_TOPIC_METRICS_GROUP, ktp.getTopic()) .addGroup(OFFSETS_BY_PARTITION_METRICS_GROUP, Integer.toString(ktp.getPartition())); topicPartitionGroup.gauge(CURRENT_OFFSETS_METRICS_GAUGE, new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET)); topicPartitionGroup.gauge(COMMITTED_OFFSETS_METRICS_GAUGE, new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET)); legacyCurrentOffsetsMetricGroup.gauge(getLegacyOffsetsMetricsGaugeName(ktp), new OffsetGauge(ktp, OffsetGaugeType.CURRENT_OFFSET)); legacyCommittedOffsetsMetricGroup.gauge(getLegacyOffsetsMetricsGaugeName(ktp), new OffsetGauge(ktp, OffsetGaugeType.COMMITTED_OFFSET)); } }
For each partition, register a new metric group to expose current offsets and committed offsets. Per-partition metric groups can be scoped by user variables {@link KafkaConsumerMetricConstants#OFFSETS_BY_TOPIC_METRICS_GROUP} and {@link KafkaConsumerMetricConstants#OFFSETS_BY_PARTITION_METRICS_GROUP}. <p>Note: this method also registers gauges for deprecated offset metrics, to maintain backwards compatibility. @param consumerMetricGroup The consumer metric group @param partitionOffsetStates The partition offset state holders, whose values will be used to update metrics
@SuppressWarnings("unchecked") public <OUT extends Tuple> SingleOutputStreamOperator<OUT> projectTupleX() { SingleOutputStreamOperator<OUT> projOperator = null; switch (fieldIndexes.length) { case 1: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple1(); break; case 2: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple2(); break; case 3: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple3(); break; case 4: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple4(); break; case 5: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple5(); break; case 6: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple6(); break; case 7: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple7(); break; case 8: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple8(); break; case 9: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple9(); break; case 10: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple10(); break; case 11: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple11(); break; case 12: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple12(); break; case 13: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple13(); break; case 14: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple14(); break; case 15: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple15(); break; case 16: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple16(); break; case 17: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple17(); break; case 18: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple18(); break; case 19: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple19(); break; case 20: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple20(); break; case 21: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple21(); break; case 22: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple22(); break; case 23: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple23(); break; case 24: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple24(); break; case 25: projOperator = (SingleOutputStreamOperator<OUT>) projectTuple25(); break; default: throw new IllegalStateException("Excessive arity in tuple."); } return projOperator; }
Chooses a projectTupleX according to the length of {@link org.apache.flink.streaming.api.datastream.StreamProjection#fieldIndexes}. @return The projected DataStream. @see org.apache.flink.api.java.operators.ProjectOperator.Projection
public <T0> SingleOutputStreamOperator<Tuple1<T0>> projectTuple1() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple1<T0>> tType = new TupleTypeInfo<Tuple1<T0>>(fTypes); return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple1<T0>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
Projects a {@link Tuple} {@link DataStream} to the previously selected fields. @return The projected DataStream. @see Tuple @see DataStream
public <T0, T1> SingleOutputStreamOperator<Tuple2<T0, T1>> projectTuple2() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes); return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple2<T0, T1>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
Projects a {@link Tuple} {@link DataStream} to the previously selected fields. @return The projected DataStream. @see Tuple @see DataStream
public <T0, T1, T2> SingleOutputStreamOperator<Tuple3<T0, T1, T2>> projectTuple3() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes); return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple3<T0, T1, T2>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
Projects a {@link Tuple} {@link DataStream} to the previously selected fields. @return The projected DataStream. @see Tuple @see DataStream
public <T0, T1, T2, T3> SingleOutputStreamOperator<Tuple4<T0, T1, T2, T3>> projectTuple4() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes); return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple4<T0, T1, T2, T3>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
Projects a {@link Tuple} {@link DataStream} to the previously selected fields. @return The projected DataStream. @see Tuple @see DataStream
public <T0, T1, T2, T3, T4> SingleOutputStreamOperator<Tuple5<T0, T1, T2, T3, T4>> projectTuple5() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType = new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes); return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple5<T0, T1, T2, T3, T4>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
Projects a {@link Tuple} {@link DataStream} to the previously selected fields. @return The projected DataStream. @see Tuple @see DataStream
public <T0, T1, T2, T3, T4, T5> SingleOutputStreamOperator<Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType = new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes); return dataStream.transform("Projection", tType, new StreamProject<IN, Tuple6<T0, T1, T2, T3, T4, T5>>(fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
Projects a {@link Tuple} {@link DataStream} to the previously selected fields. @return The projected DataStream. @see Tuple @see DataStream
public static void main(String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); if (!params.has("customer") && !params.has("orders") && !params.has("lineitem") && !params.has("nation")) { System.err.println(" This program expects data from the TPC-H benchmark as input data."); System.err.println(" Due to legal restrictions, we can not ship generated data."); System.err.println(" You can find the TPC-H data generator at http://www.tpc.org/tpch/."); System.err.println(" Usage: TPCHQuery10 --customer <path> --orders <path> --lineitem <path> --nation <path> [--output <path>]"); return; } // get customer data set: (custkey, name, address, nationkey, acctbal) DataSet<Tuple5<Integer, String, String, Integer, Double>> customers = getCustomerDataSet(env, params.get("customer")); // get orders data set: (orderkey, custkey, orderdate) DataSet<Tuple3<Integer, Integer, String>> orders = getOrdersDataSet(env, params.get("orders")); // get lineitem data set: (orderkey, extendedprice, discount, returnflag) DataSet<Tuple4<Integer, Double, Double, String>> lineitems = getLineitemDataSet(env, params.get("lineitem")); // get nation data set: (nationkey, name) DataSet<Tuple2<Integer, String>> nations = getNationsDataSet(env, params.get("nation")); // orders filtered by year: (orderkey, custkey) DataSet<Tuple2<Integer, Integer>> ordersFilteredByYear = // filter by year orders.filter(order -> Integer.parseInt(order.f2.substring(0, 4)) > 1990) // project fields out that are no longer required .project(0, 1); // lineitems filtered by flag: (orderkey, revenue) DataSet<Tuple2<Integer, Double>> lineitemsFilteredByFlag = // filter by flag lineitems.filter(lineitem -> lineitem.f3.equals("R")) // compute revenue and project out return flag // revenue per item = l_extendedprice * (1 - l_discount) .map(lineitem -> new Tuple2<>(lineitem.f0, lineitem.f1 * (1 - lineitem.f2))) .returns(Types.TUPLE(Types.INT, Types.DOUBLE)); // for lambda with generics // join orders with lineitems: (custkey, revenue) DataSet<Tuple2<Integer, Double>> revenueByCustomer = ordersFilteredByYear.joinWithHuge(lineitemsFilteredByFlag) .where(0).equalTo(0) .projectFirst(1).projectSecond(1); revenueByCustomer = revenueByCustomer.groupBy(0).aggregate(Aggregations.SUM, 1); // join customer with nation (custkey, name, address, nationname, acctbal) DataSet<Tuple5<Integer, String, String, String, Double>> customerWithNation = customers .joinWithTiny(nations) .where(3).equalTo(0) .projectFirst(0, 1, 2).projectSecond(1).projectFirst(4); // join customer (with nation) with revenue (custkey, name, address, nationname, acctbal, revenue) DataSet<Tuple6<Integer, String, String, String, Double, Double>> result = customerWithNation.join(revenueByCustomer) .where(0).equalTo(0) .projectFirst(0, 1, 2, 3, 4).projectSecond(1); // emit result if (params.has("output")) { result.writeAsCsv(params.get("output"), "\n", "|"); // execute program env.execute("TPCH Query 10 Example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); result.print(); } }
*************************************************************************
public Elasticsearch host(String hostname, int port, String protocol) { final Host host = new Host( Preconditions.checkNotNull(hostname), port, Preconditions.checkNotNull(protocol)); hosts.add(host); return this; }
Adds an Elasticsearch host to connect to. Required. <p>Multiple hosts can be declared by calling this method multiple times. @param hostname connection hostname @param port connection port @param protocol connection protocol; e.g. "http"
public Elasticsearch failureHandlerCustom(Class<? extends ActionRequestFailureHandler> failureHandlerClass) { internalProperties.putString(CONNECTOR_FAILURE_HANDLER, ElasticsearchValidator.CONNECTOR_FAILURE_HANDLER_VALUE_CUSTOM); internalProperties.putClass(CONNECTOR_FAILURE_HANDLER_CLASS, failureHandlerClass); return this; }
Configures a failure handling strategy in case a request to Elasticsearch fails. <p>This strategy allows for custom failure handling using a {@link ActionRequestFailureHandler}.
public Elasticsearch bulkFlushMaxSize(String maxSize) { internalProperties.putMemorySize(CONNECTOR_BULK_FLUSH_MAX_SIZE, MemorySize.parse(maxSize, MemorySize.MemoryUnit.BYTES)); return this; }
Configures how to buffer elements before sending them in bulk to the cluster for efficiency. <p>Sets the maximum size of buffered actions per bulk request (using the syntax of {@link MemorySize}).
public Set<String> generateIdsToUse(long nextFreeTransactionalId) { Set<String> transactionalIds = new HashSet<>(); for (int i = 0; i < poolSize; i++) { long transactionalId = nextFreeTransactionalId + subtaskIndex * poolSize + i; transactionalIds.add(generateTransactionalId(transactionalId)); } return transactionalIds; }
Range of available transactional ids to use is: [nextFreeTransactionalId, nextFreeTransactionalId + parallelism * kafkaProducersPoolSize) loop below picks in a deterministic way a subrange of those available transactional ids based on index of this subtask.
public Set<String> generateIdsToAbort() { Set<String> idsToAbort = new HashSet<>(); for (int i = 0; i < safeScaleDownFactor; i++) { idsToAbort.addAll(generateIdsToUse(i * poolSize * totalNumberOfSubtasks)); } return idsToAbort; }
If we have to abort previous transactional id in case of restart after a failure BEFORE first checkpoint completed, we don't know what was the parallelism used in previous attempt. In that case we must guess the ids range to abort based on current configured pool size, current parallelism and safeScaleDownFactor.
@Override public void registerTableSource(String name) { Preconditions.checkNotNull(name); TableSource<?> tableSource = TableFactoryUtil.findAndCreateTableSource(this); tableEnv.registerTableSource(name, tableSource); }
Searches for the specified table source, configures it accordingly, and registers it as a table under the given name. @param name table name to be registered in the table environment
@Override public void registerTableSink(String name) { Preconditions.checkNotNull(name); TableSink<?> tableSink = TableFactoryUtil.findAndCreateTableSink(this); tableEnv.registerTableSink(name, tableSink); }
Searches for the specified table sink, configures it accordingly, and registers it as a table under the given name. @param name table name to be registered in the table environment
@Override public D withFormat(FormatDescriptor format) { formatDescriptor = Optional.of(Preconditions.checkNotNull(format)); return (D) this; }
Specifies the format that defines how to read data from a connector.
@Override public D withSchema(Schema schema) { schemaDescriptor = Optional.of(Preconditions.checkNotNull(schema)); return (D) this; }
Specifies the resulting table schema.
@Override public Map<String, String> toProperties() { DescriptorProperties properties = new DescriptorProperties(); // this performs only basic validation // more validation can only happen within a factory if (connectorDescriptor.isFormatNeeded() && !formatDescriptor.isPresent()) { throw new ValidationException(String.format("The connector %s requires a format description.", connectorDescriptor.toString())); } else if (!connectorDescriptor.isFormatNeeded() && formatDescriptor.isPresent()) { throw new ValidationException( String.format("The connector %s does not require a format description " + "but %s found.", connectorDescriptor.toString(), formatDescriptor.get().toString())); } properties.putProperties(connectorDescriptor.toProperties()); formatDescriptor.ifPresent(s -> properties.putProperties(s.toProperties())); schemaDescriptor.ifPresent(s -> properties.putProperties(s.toProperties())); return properties.asMap(); }
Converts this descriptor into a set of properties.
@Override public int next(int bits) { long nextSeed = seed ^ (seed << 21); nextSeed ^= (nextSeed >>> 35); nextSeed ^= (nextSeed << 4); seed = nextSeed; return (int) (nextSeed & ((1L << bits) - 1)); }
All other methods like nextInt()/nextDouble()... depends on this, so we just need to overwrite this. @param bits Random bits @return The next pseudorandom value from this random number generator's sequence
private List<Map<StreamStateHandle, OperatorStateHandle>> initMergeMapList(List<List<OperatorStateHandle>> parallelSubtaskStates) { int parallelism = parallelSubtaskStates.size(); final List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(parallelism); for (List<OperatorStateHandle> previousParallelSubtaskState : parallelSubtaskStates) { mergeMapList.add(previousParallelSubtaskState.stream() .collect(Collectors.toMap(OperatorStateHandle::getDelegateStateHandle, Function.identity()))); } return mergeMapList; }
Init the the list of StreamStateHandle -> OperatorStateHandle map with given parallelSubtaskStates when parallelism not changed.
private Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> collectUnionStates( List<List<OperatorStateHandle>> parallelSubtaskStates) { Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> unionStates = new HashMap<>(parallelSubtaskStates.size()); for (List<OperatorStateHandle> subTaskState : parallelSubtaskStates) { for (OperatorStateHandle operatorStateHandle : subTaskState) { if (operatorStateHandle == null) { continue; } final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>> partitionOffsetEntries = operatorStateHandle.getStateNameToPartitionOffsets().entrySet(); partitionOffsetEntries.stream() .filter(entry -> entry.getValue().getDistributionMode().equals(OperatorStateHandle.Mode.UNION)) .forEach(entry -> { List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>> stateLocations = unionStates.computeIfAbsent(entry.getKey(), k -> new ArrayList<>(parallelSubtaskStates.size() * partitionOffsetEntries.size())); stateLocations.add(Tuple2.of(operatorStateHandle.getDelegateStateHandle(), entry.getValue())); }); } } return unionStates; }
Collect union states from given parallelSubtaskStates.
@SuppressWarnings("unchecked, rawtype") private GroupByStateNameResults groupByStateMode(List<List<OperatorStateHandle>> previousParallelSubtaskStates) { //Reorganize: group by (State Name -> StreamStateHandle + StateMetaInfo) EnumMap<OperatorStateHandle.Mode, Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>> nameToStateByMode = new EnumMap<>(OperatorStateHandle.Mode.class); for (OperatorStateHandle.Mode mode : OperatorStateHandle.Mode.values()) { nameToStateByMode.put( mode, new HashMap<>()); } for (List<OperatorStateHandle> previousParallelSubtaskState : previousParallelSubtaskStates) { for (OperatorStateHandle operatorStateHandle : previousParallelSubtaskState) { if (operatorStateHandle == null) { continue; } final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>> partitionOffsetEntries = operatorStateHandle.getStateNameToPartitionOffsets().entrySet(); for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> e : partitionOffsetEntries) { OperatorStateHandle.StateMetaInfo metaInfo = e.getValue(); Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToState = nameToStateByMode.get(metaInfo.getDistributionMode()); List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>> stateLocations = nameToState.computeIfAbsent( e.getKey(), k -> new ArrayList<>(previousParallelSubtaskStates.size() * partitionOffsetEntries.size())); stateLocations.add(Tuple2.of(operatorStateHandle.getDelegateStateHandle(), e.getValue())); } } } return new GroupByStateNameResults(nameToStateByMode); }
Group by the different named states.
private List<Map<StreamStateHandle, OperatorStateHandle>> repartition( GroupByStateNameResults nameToStateByMode, int newParallelism) { // We will use this to merge w.r.t. StreamStateHandles for each parallel subtask inside the maps List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(newParallelism); // Initialize for (int i = 0; i < newParallelism; ++i) { mergeMapList.add(new HashMap<>()); } // Start with the state handles we distribute round robin by splitting by offsets Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState = nameToStateByMode.getByMode(OperatorStateHandle.Mode.SPLIT_DISTRIBUTE); repartitionSplitState(nameToDistributeState, newParallelism, mergeMapList); // Now we also add the state handles marked for union to all parallel instances Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToUnionState = nameToStateByMode.getByMode(OperatorStateHandle.Mode.UNION); repartitionUnionState(nameToUnionState, mergeMapList); // Now we also add the state handles marked for uniform broadcast to all parallel instances Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToBroadcastState = nameToStateByMode.getByMode(OperatorStateHandle.Mode.BROADCAST); repartitionBroadcastState(nameToBroadcastState, mergeMapList); return mergeMapList; }
Repartition all named states.
private void repartitionSplitState( Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState, int newParallelism, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) { int startParallelOp = 0; // Iterate all named states and repartition one named state at a time per iteration for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : nameToDistributeState.entrySet()) { List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>> current = e.getValue(); // Determine actual number of partitions for this named state int totalPartitions = 0; for (Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> offsets : current) { totalPartitions += offsets.f1.getOffsets().length; } // Repartition the state across the parallel operator instances int lstIdx = 0; int offsetIdx = 0; int baseFraction = totalPartitions / newParallelism; int remainder = totalPartitions % newParallelism; int newStartParallelOp = startParallelOp; for (int i = 0; i < newParallelism; ++i) { // Preparation: calculate the actual index considering wrap around int parallelOpIdx = (i + startParallelOp) % newParallelism; // Now calculate the number of partitions we will assign to the parallel instance in this round ... int numberOfPartitionsToAssign = baseFraction; // ... and distribute odd partitions while we still have some, one at a time if (remainder > 0) { ++numberOfPartitionsToAssign; --remainder; } else if (remainder == 0) { // We are out of odd partitions now and begin our next redistribution round with the current // parallel operator to ensure fair load balance newStartParallelOp = parallelOpIdx; --remainder; } // Now start collection the partitions for the parallel instance into this list while (numberOfPartitionsToAssign > 0) { Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithOffsets = current.get(lstIdx); long[] offsets = handleWithOffsets.f1.getOffsets(); int remaining = offsets.length - offsetIdx; // Repartition offsets long[] offs; if (remaining > numberOfPartitionsToAssign) { offs = Arrays.copyOfRange(offsets, offsetIdx, offsetIdx + numberOfPartitionsToAssign); offsetIdx += numberOfPartitionsToAssign; } else { if (OPTIMIZE_MEMORY_USE) { handleWithOffsets.f1 = null; // GC } offs = Arrays.copyOfRange(offsets, offsetIdx, offsets.length); offsetIdx = 0; ++lstIdx; } numberOfPartitionsToAssign -= remaining; // As a last step we merge partitions that use the same StreamStateHandle in a single // OperatorStateHandle Map<StreamStateHandle, OperatorStateHandle> mergeMap = mergeMapList.get(parallelOpIdx); OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithOffsets.f0); if (operatorStateHandle == null) { operatorStateHandle = new OperatorStreamStateHandle( new HashMap<>(nameToDistributeState.size()), handleWithOffsets.f0); mergeMap.put(handleWithOffsets.f0, operatorStateHandle); } operatorStateHandle.getStateNameToPartitionOffsets().put( e.getKey(), new OperatorStateHandle.StateMetaInfo(offs, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE)); } } startParallelOp = newStartParallelOp; e.setValue(null); } }
Repartition SPLIT_DISTRIBUTE state.
private void repartitionUnionState( Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> unionState, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) { for (Map<StreamStateHandle, OperatorStateHandle> mergeMap : mergeMapList) { for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : unionState.entrySet()) { for (Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithMetaInfo : e.getValue()) { OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithMetaInfo.f0); if (operatorStateHandle == null) { operatorStateHandle = new OperatorStreamStateHandle( new HashMap<>(unionState.size()), handleWithMetaInfo.f0); mergeMap.put(handleWithMetaInfo.f0, operatorStateHandle); } operatorStateHandle.getStateNameToPartitionOffsets().put(e.getKey(), handleWithMetaInfo.f1); } } } }
Repartition UNION state.
private void repartitionBroadcastState( Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> broadcastState, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) { int newParallelism = mergeMapList.size(); for (int i = 0; i < newParallelism; ++i) { final Map<StreamStateHandle, OperatorStateHandle> mergeMap = mergeMapList.get(i); // for each name, pick the i-th entry for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : broadcastState.entrySet()) { int previousParallelism = e.getValue().size(); Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithMetaInfo = e.getValue().get(i % previousParallelism); OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithMetaInfo.f0); if (operatorStateHandle == null) { operatorStateHandle = new OperatorStreamStateHandle( new HashMap<>(broadcastState.size()), handleWithMetaInfo.f0); mergeMap.put(handleWithMetaInfo.f0, operatorStateHandle); } operatorStateHandle.getStateNameToPartitionOffsets().put(e.getKey(), handleWithMetaInfo.f1); } } }
Repartition BROADCAST state.
public final TableSink<T> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) { final TableSinkBase<T> configuredSink = this.copy(); configuredSink.fieldNames = Optional.of(fieldNames); configuredSink.fieldTypes = Optional.of(fieldTypes); return configuredSink; }
Returns a copy of this {@link TableSink} configured with the field names and types of the table to emit. @param fieldNames The field names of the table to emit. @param fieldTypes The field types of the table to emit. @return A copy of this {@link TableSink} configured with the field names and types of the table to emit.
private int binarySearch(T record) { int low = 0; int high = this.boundaries.length - 1; typeComparator.extractKeys(record, keys, 0); while (low <= high) { final int mid = (low + high) >>> 1; final int result = compareKeys(flatComparators, keys, this.boundaries[mid]); if (result > 0) { low = mid + 1; } else if (result < 0) { high = mid - 1; } else { return mid; } } // key not found, but the low index is the target // bucket, since the boundaries are the upper bound return low; }
Search the range index of input record.
@Override public int serializeToPages( BinaryRow record, AbstractPagedOutputView headerLessView) throws IOException { checkArgument(headerLessView.getHeaderLength() == 0); int sizeInBytes = record.getSizeInBytes(); int skip = checkSkipWriteForFixLengthPart(headerLessView); if (record.getSegments().length == 1) { headerLessView.writeInt(sizeInBytes); headerLessView.write(record.getSegments()[0], record.getOffset(), sizeInBytes); } else { headerLessView.writeInt(record.getSizeInBytes()); serializeToPagesWithoutLength(record, headerLessView); } return skip; }
============================ Page related operations ===================================
public void serializeToPagesWithoutLength( BinaryRow record, AbstractPagedOutputView out) throws IOException { int remainSize = record.getSizeInBytes(); int posInSegOfRecord = record.getOffset(); int segmentSize = record.getSegments()[0].size(); for (MemorySegment segOfRecord : record.getSegments()) { int nWrite = Math.min(segmentSize - posInSegOfRecord, remainSize); assert nWrite > 0; out.write(segOfRecord, posInSegOfRecord, nWrite); // next new segment. posInSegOfRecord = 0; remainSize -= nWrite; if (remainSize == 0) { break; } } checkArgument(remainSize == 0); }
Serialize row to pages without row length. The caller should make sure that the fixed-length parit can fit in output's current segment, no skip check will be done here.
public void copyFromPagesToView( AbstractPagedInputView source, DataOutputView target) throws IOException { checkSkipReadForFixLengthPart(source); int length = source.readInt(); target.writeInt(length); target.write(source, length); }
Copy a binaryRow which stored in paged input view to output view. @param source source paged input view where the binary row stored @param target the target output view.
public void pointTo(int length, BinaryRow reuse, AbstractPagedInputView headerLessView) throws IOException { checkArgument(headerLessView.getHeaderLength() == 0); if (length < 0) { throw new IOException(String.format( "Read unexpected bytes in source of positionInSegment[%d] and limitInSegment[%d]", headerLessView.getCurrentPositionInSegment(), headerLessView.getCurrentSegmentLimit() )); } int remainInSegment = headerLessView.getCurrentSegmentLimit() - headerLessView.getCurrentPositionInSegment(); MemorySegment currSeg = headerLessView.getCurrentSegment(); int currPosInSeg = headerLessView.getCurrentPositionInSegment(); if (remainInSegment >= length) { // all in one segment, that's good. reuse.pointTo(currSeg, currPosInSeg, length); headerLessView.skipBytesToRead(length); } else { pointToMultiSegments( reuse, headerLessView, length, length - remainInSegment, currSeg, currPosInSeg ); } }
Point row to memory segments with offset(in the AbstractPagedInputView) and length. @param length row length. @param reuse reuse BinaryRow object. @param headerLessView source memory segments container.
private int checkSkipWriteForFixLengthPart(AbstractPagedOutputView out) throws IOException { // skip if there is no enough size. int available = out.getSegmentSize() - out.getCurrentPositionInSegment(); if (available < getSerializedRowFixedPartLength()) { out.advance(); return available; } return 0; }
We need skip bytes to write when the remain bytes of current segment is not enough to write binary row fixed part. See {@link BinaryRow}.
public void checkSkipReadForFixLengthPart(AbstractPagedInputView source) throws IOException { // skip if there is no enough size. // Note: Use currentSegmentLimit instead of segmentSize. int available = source.getCurrentSegmentLimit() - source.getCurrentPositionInSegment(); if (available < getSerializedRowFixedPartLength()) { source.advance(); } }
We need skip bytes to read when the remain bytes of current segment is not enough to write binary row fixed part. See {@link BinaryRow}.
public void setDriverKeyInfo(FieldList keys, int id) { this.setDriverKeyInfo(keys, getTrueArray(keys.size()), id); }
Sets the key field indexes for the specified driver comparator. @param keys The key field indexes for the specified driver comparator. @param id The ID of the driver comparator.
public void setDriverKeyInfo(FieldList keys, boolean[] sortOrder, int id) { if(id < 0 || id >= driverKeys.length) { throw new CompilerException("Invalid id for driver key information. DriverStrategy requires only " +super.getDriverStrategy().getNumRequiredComparators()+" comparators."); } this.driverKeys[id] = keys; this.driverSortOrders[id] = sortOrder; }
Sets the key field information for the specified driver comparator. @param keys The key field indexes for the specified driver comparator. @param sortOrder The key sort order for the specified driver comparator. @param id The ID of the driver comparator.
@Override public void accept(Visitor<PlanNode> visitor) { if (visitor.preVisit(this)) { this.input.getSource().accept(visitor); for (Channel broadcastInput : getBroadcastInputs()) { broadcastInput.getSource().accept(visitor); } visitor.postVisit(this); } }
--------------------------------------------------------------------------------------------
protected static boolean[] getTrueArray(int length) { final boolean[] a = new boolean[length]; for (int i = 0; i < length; i++) { a[i] = true; } return a; }
--------------------------------------------------------------------------------------------
public static <OUT> SourceFunction.SourceContext<OUT> getSourceContext( TimeCharacteristic timeCharacteristic, ProcessingTimeService processingTimeService, Object checkpointLock, StreamStatusMaintainer streamStatusMaintainer, Output<StreamRecord<OUT>> output, long watermarkInterval, long idleTimeout) { final SourceFunction.SourceContext<OUT> ctx; switch (timeCharacteristic) { case EventTime: ctx = new ManualWatermarkContext<>( output, processingTimeService, checkpointLock, streamStatusMaintainer, idleTimeout); break; case IngestionTime: ctx = new AutomaticWatermarkContext<>( output, watermarkInterval, processingTimeService, checkpointLock, streamStatusMaintainer, idleTimeout); break; case ProcessingTime: ctx = new NonTimestampContext<>(checkpointLock, output); break; default: throw new IllegalArgumentException(String.valueOf(timeCharacteristic)); } return ctx; }
Depending on the {@link TimeCharacteristic}, this method will return the adequate {@link org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext}. That is: <ul> <li>{@link TimeCharacteristic#IngestionTime} = {@code AutomaticWatermarkContext}</li> <li>{@link TimeCharacteristic#ProcessingTime} = {@code NonTimestampContext}</li> <li>{@link TimeCharacteristic#EventTime} = {@code ManualWatermarkContext}</li> </ul>
private void addContender(EmbeddedLeaderElectionService service, LeaderContender contender) { synchronized (lock) { checkState(!shutdown, "leader election service is shut down"); checkState(!service.running, "leader election service is already started"); try { if (!allLeaderContenders.add(service)) { throw new IllegalStateException("leader election service was added to this service multiple times"); } service.contender = contender; service.running = true; updateLeader().whenComplete((aVoid, throwable) -> { if (throwable != null) { fatalError(throwable); } }); } catch (Throwable t) { fatalError(t); } } }
Callback from leader contenders when they start their service.
private void removeContender(EmbeddedLeaderElectionService service) { synchronized (lock) { // if the service was not even started, simply do nothing if (!service.running || shutdown) { return; } try { if (!allLeaderContenders.remove(service)) { throw new IllegalStateException("leader election service does not belong to this service"); } // stop the service service.contender = null; service.running = false; service.isLeader = false; // if that was the current leader, unset its status if (currentLeaderConfirmed == service) { currentLeaderConfirmed = null; currentLeaderSessionId = null; currentLeaderAddress = null; } if (currentLeaderProposed == service) { currentLeaderProposed = null; currentLeaderSessionId = null; } updateLeader().whenComplete((aVoid, throwable) -> { if (throwable != null) { fatalError(throwable); } }); } catch (Throwable t) { fatalError(t); } } }
Callback from leader contenders when they stop their service.
private void confirmLeader(final EmbeddedLeaderElectionService service, final UUID leaderSessionId) { synchronized (lock) { // if the service was shut down in the meantime, ignore this confirmation if (!service.running || shutdown) { return; } try { // check if the confirmation is for the same grant, or whether it is a stale grant if (service == currentLeaderProposed && currentLeaderSessionId.equals(leaderSessionId)) { final String address = service.contender.getAddress(); LOG.info("Received confirmation of leadership for leader {} , session={}", address, leaderSessionId); // mark leadership currentLeaderConfirmed = service; currentLeaderAddress = address; currentLeaderProposed = null; // notify all listeners notifyAllListeners(address, leaderSessionId); } else { LOG.debug("Received confirmation of leadership for a stale leadership grant. Ignoring."); } } catch (Throwable t) { fatalError(t); } } }
Callback from leader contenders when they confirm a leader grant.
private static Collection<String> getAvailableMetrics(Collection<? extends MetricStore.ComponentMetricStore> stores) { Set<String> uniqueMetrics = new HashSet<>(32); for (MetricStore.ComponentMetricStore store : stores) { uniqueMetrics.addAll(store.metrics.keySet()); } return uniqueMetrics; }
Returns a JSON string containing a list of all available metrics in the given stores. Effectively this method maps the union of all key-sets to JSON. @param stores metrics @return JSON string containing a list of all available metrics
private AggregatedMetricsResponseBody getAggregatedMetricValues( Collection<? extends MetricStore.ComponentMetricStore> stores, List<String> requestedMetrics, MetricAccumulatorFactory requestedAggregationsFactories) { Collection<AggregatedMetric> aggregatedMetrics = new ArrayList<>(requestedMetrics.size()); for (String requestedMetric : requestedMetrics) { final Collection<Double> values = new ArrayList<>(stores.size()); try { for (MetricStore.ComponentMetricStore store : stores) { String stringValue = store.metrics.get(requestedMetric); if (stringValue != null) { values.add(Double.valueOf(stringValue)); } } } catch (NumberFormatException nfe) { log.warn("The metric {} is not numeric and can't be aggregated.", requestedMetric, nfe); // metric is not numeric so we can't perform aggregations => ignore it continue; } if (!values.isEmpty()) { Iterator<Double> valuesIterator = values.iterator(); MetricAccumulator acc = requestedAggregationsFactories.get(requestedMetric, valuesIterator.next()); valuesIterator.forEachRemaining(acc::add); aggregatedMetrics.add(acc.get()); } else { return new AggregatedMetricsResponseBody(Collections.emptyList()); } } return new AggregatedMetricsResponseBody(aggregatedMetrics); }
Extracts and aggregates all requested metrics from the given metric stores, and maps the result to a JSON string. @param stores available metrics @param requestedMetrics ids of requested metrics @param requestedAggregationsFactories requested aggregations @return JSON string containing the requested metrics
public CompletableFuture<Void> deregisterApplicationAndClose( final ApplicationStatus applicationStatus, final @Nullable String diagnostics) { if (isRunning.compareAndSet(true, false)) { final CompletableFuture<Void> closeWebMonitorAndDeregisterAppFuture = FutureUtils.composeAfterwards(webMonitorEndpoint.closeAsync(), () -> deregisterApplication(applicationStatus, diagnostics)); return FutureUtils.composeAfterwards(closeWebMonitorAndDeregisterAppFuture, this::closeAsyncInternal); } else { return terminationFuture; } }
Deregister the Flink application from the resource management system by signalling the {@link ResourceManager}. @param applicationStatus to terminate the application with @param diagnostics additional information about the shut down, can be {@code null} @return Future which is completed once the shut down
@Override public T copy(T from) { if (CONCURRENT_ACCESS_CHECK) { enterExclusiveThread(); } try { checkAvroInitialized(); return avroData.deepCopy(runtimeSchema, from); } finally { if (CONCURRENT_ACCESS_CHECK) { exitExclusiveThread(); } } }
------------------------------------------------------------------------
@Override public TypeSerializerSnapshot<T> snapshotConfiguration() { if (configSnapshot == null) { checkAvroInitialized(); configSnapshot = new AvroSerializerSnapshot<>(runtimeSchema, type); } return configSnapshot; }
------------------------------------------------------------------------
private void enterExclusiveThread() { // we use simple get, check, set here, rather than CAS // we don't need lock-style correctness, this is only a sanity-check and we thus // favor speed at the cost of some false negatives in this check Thread previous = currentThread; Thread thisThread = Thread.currentThread(); if (previous == null) { currentThread = thisThread; } else if (previous != thisThread) { throw new IllegalStateException( "Concurrent access to KryoSerializer. Thread 1: " + thisThread.getName() + " , Thread 2: " + previous.getName()); } }
--------------------------------------------------------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { /* Please see FLINK-11436 for details on why manual deserialization is required. During the release of Flink 1.7, the value of serialVersionUID was uptick to 2L (was 1L before) And although the AvroSerializer (along with it's snapshot class) were migrated to the new serialization abstraction (hence free from Java serialization), there were composite serializers that were not migrated and were serialized with Java serialization. In case that one of the nested serializers were Avro we would bump into deserialization exception due to a wrong serialVersionUID. Unfortunately it is not possible to revert the serialVersionUID back to 1L, because users might have snapshots with 2L present already. To overcome this we first need to make sure that the AvroSerializer is being Java deserialized with FailureTolerantObjectInputStream, and then we determine the serialized layout by looking at the fields. From: https://docs.oracle.com/javase/8/docs/platform/serialization/spec/class.html#a5421 ------------------------------------------------------------------------------------------------------------- The descriptors for primitive typed fields are written first sorted by field name followed by descriptors for the object typed fields sorted by field name. The names are sorted using String.compareTo. ------------------------------------------------------------------------------------------------------------- pre 1.6 field order: [type] pre 1.7 field order: [schemaString, type] post 1.7 field order: [previousSchema, schema, type] We would use the first field to distinguish between the three different layouts. To complicate things even further in pre 1.7, the field @schemaString could be null or a string, but, in post 1.7, the field @previousSchema was never set to null, therefore we can use the first field to determine the version. this logic should stay here as long as we support Flink 1.6 (along with Java serialized TypeSerializers) */ final Object firstField = in.readObject(); if (firstField == null) { // first field can only be NULL in 1.6 (schemaString) read16Layout(null, in); } else if (firstField instanceof String) { // first field is a String only in 1.6 (schemaString) read16Layout((String) firstField, in); } else if (firstField instanceof Class<?>) { // first field is a Class<?> only in 1.5 (type) @SuppressWarnings("unchecked") Class<T> type = (Class<T>) firstField; read15Layout(type); } else if (firstField instanceof SerializableAvroSchema) { readCurrentLayout((SerializableAvroSchema) firstField, in); } else { throw new IllegalStateException("Failed to Java-Deserialize an AvroSerializer instance. " + "Was expecting a first field to be either a String or SerializableAvroSchema, but got: " + "" + firstField.getClass()); } }
-------- backwards compatibility with 1.5, 1.6 -----------
private static void setDeserializer(Properties props) { final String deSerName = ByteArrayDeserializer.class.getName(); Object keyDeSer = props.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); Object valDeSer = props.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); if (keyDeSer != null && !keyDeSer.equals(deSerName)) { LOG.warn("Ignoring configured key DeSerializer ({})", ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); } if (valDeSer != null && !valDeSer.equals(deSerName)) { LOG.warn("Ignoring configured value DeSerializer ({})", ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); } props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deSerName); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deSerName); }
Makes sure that the ByteArrayDeserializer is registered in the Kafka properties. @param props The Kafka properties to register the serializer in.
public void persist() throws Exception { if (!mapping.equals(initialMapping)) { state.clear(); for (Map.Entry<W, W> window : mapping.entrySet()) { state.add(new Tuple2<>(window.getKey(), window.getValue())); } } }
Persist the updated mapping to the given state if the mapping changed since initialization.
public void retireWindow(W window) { W removed = this.mapping.remove(window); if (removed == null) { throw new IllegalStateException("Window " + window + " is not in in-flight window set."); } }
Removes the given window from the set of in-flight windows. @param window The {@code Window} to remove.
public W addWindow(W newWindow, MergeFunction<W> mergeFunction) throws Exception { List<W> windows = new ArrayList<>(); windows.addAll(this.mapping.keySet()); windows.add(newWindow); final Map<W, Collection<W>> mergeResults = new HashMap<>(); windowAssigner.mergeWindows(windows, new MergingWindowAssigner.MergeCallback<W>() { @Override public void merge(Collection<W> toBeMerged, W mergeResult) { if (LOG.isDebugEnabled()) { LOG.debug("Merging {} into {}", toBeMerged, mergeResult); } mergeResults.put(mergeResult, toBeMerged); } }); W resultWindow = newWindow; boolean mergedNewWindow = false; // perform the merge for (Map.Entry<W, Collection<W>> c: mergeResults.entrySet()) { W mergeResult = c.getKey(); Collection<W> mergedWindows = c.getValue(); // if our new window is in the merged windows make the merge result the // result window if (mergedWindows.remove(newWindow)) { mergedNewWindow = true; resultWindow = mergeResult; } // pick any of the merged windows and choose that window's state window // as the state window for the merge result W mergedStateWindow = this.mapping.get(mergedWindows.iterator().next()); // figure out the state windows that we are merging List<W> mergedStateWindows = new ArrayList<>(); for (W mergedWindow: mergedWindows) { W res = this.mapping.remove(mergedWindow); if (res != null) { mergedStateWindows.add(res); } } this.mapping.put(mergeResult, mergedStateWindow); // don't put the target state window into the merged windows mergedStateWindows.remove(mergedStateWindow); // don't merge the new window itself, it never had any state associated with it // i.e. if we are only merging one pre-existing window into itself // without extending the pre-existing window if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) { mergeFunction.merge(mergeResult, mergedWindows, this.mapping.get(mergeResult), mergedStateWindows); } } // the new window created a new, self-contained window without merging if (mergeResults.isEmpty() || (resultWindow.equals(newWindow) && !mergedNewWindow)) { this.mapping.put(resultWindow, resultWindow); } return resultWindow; }
Adds a new {@code Window} to the set of in-flight windows. It might happen that this triggers merging of previously in-flight windows. In that case, the provided {@link MergeFunction} is called. <p>This returns the window that is the representative of the added window after adding. This can either be the new window itself, if no merge occurred, or the newly merged window. Adding an element to a window or calling trigger functions should only happen on the returned representative. This way, we never have to deal with a new window that is immediately swallowed up by another window. <p>If the new window is merged, the {@code MergeFunction} callback arguments also don't contain the new window as part of the list of merged windows. @param newWindow The new {@code Window} to add. @param mergeFunction The callback to be invoked in case a merge occurs. @return The {@code Window} that new new {@code Window} ended up in. This can also be the the new {@code Window} itself in case no merge occurred. @throws Exception
@Override public DataSet<Result<K>> runInternal(Graph<K, VV, EV> input) throws Exception { // u, v where u < v DataSet<Tuple2<K, K>> filteredByID = input .getEdges() .flatMap(new FilterByID<>()) .setParallelism(parallelism) .name("Filter by ID"); // u, v, (edge value, deg(u), deg(v)) DataSet<Edge<K, Tuple3<EV, LongValue, LongValue>>> pairDegree = input .run(new EdgeDegreePair<K, VV, EV>() .setParallelism(parallelism)); // u, v where deg(u) < deg(v) or (deg(u) == deg(v) and u < v) DataSet<Tuple2<K, K>> filteredByDegree = pairDegree .flatMap(new FilterByDegree<>()) .setParallelism(parallelism) .name("Filter by degree"); // u, v, w where (u, v) and (u, w) are edges in graph, v < w DataSet<Tuple3<K, K, K>> triplets = filteredByDegree .groupBy(0) .sortGroup(1, Order.ASCENDING) .reduceGroup(new GenerateTriplets<>()) .name("Generate triplets"); // u, v, w where (u, v), (u, w), and (v, w) are edges in graph, v < w DataSet<Result<K>> triangles = triplets .join(filteredByID, JoinOperatorBase.JoinHint.REPARTITION_HASH_SECOND) .where(1, 2) .equalTo(0, 1) .with(new ProjectTriangles<>()) .name("Triangle listing"); if (permuteResults) { triangles = triangles .flatMap(new PermuteResult<>()) .name("Permute triangle vertices"); } else if (sortTriangleVertices.get()) { triangles = triangles .map(new SortTriangleVertices<>()) .name("Sort triangle vertices"); } return triangles; }
/* Implementation notes: The requirement that "K extends CopyableValue<K>" can be removed when Flink has a self-join and GenerateTriplets is implemented as such. ProjectTriangles should eventually be replaced by ".projectFirst("*")" when projections use code generation.
public final Object accessField(Field field, Object object) { try { object = field.get(object); } catch (NullPointerException npex) { throw new NullKeyFieldException("Unable to access field "+field+" on object "+object); } catch (IllegalAccessException iaex) { throw new RuntimeException("This should not happen since we call setAccesssible(true) in PojoTypeInfo." + " fields: " + field + " obj: " + object); } return object; }
This method is handling the IllegalAccess exceptions of Field.get()
public org.apache.hadoop.conf.Configuration getOrLoadHadoopConfig() { org.apache.hadoop.conf.Configuration hadoopConfig = this.hadoopConfig; if (hadoopConfig == null) { if (flinkConfig != null) { hadoopConfig = mirrorCertainHadoopConfig(loadHadoopConfigFromFlink()); } else { LOG.warn("Flink configuration is not set prior to loading this configuration." + " Cannot forward configuration keys from Flink configuration."); hadoopConfig = new org.apache.hadoop.conf.Configuration(); } } this.hadoopConfig = hadoopConfig; return hadoopConfig; }
get the loaded Hadoop config (or fall back to one loaded from the classpath).
private org.apache.hadoop.conf.Configuration loadHadoopConfigFromFlink() { org.apache.hadoop.conf.Configuration hadoopConfig = new org.apache.hadoop.conf.Configuration(); for (String key : flinkConfig.keySet()) { for (String prefix : flinkConfigPrefixes) { if (key.startsWith(prefix)) { String newKey = hadoopConfigPrefix + key.substring(prefix.length()); String newValue = fixHadoopConfig(key, flinkConfig.getString(key, null)); hadoopConfig.set(newKey, newValue); LOG.debug("Adding Flink config entry for {} as {} to Hadoop config", key, newKey); } } } return hadoopConfig; }
add additional config entries from the Flink config to the Hadoop config
private org.apache.hadoop.conf.Configuration mirrorCertainHadoopConfig( org.apache.hadoop.conf.Configuration hadoopConfig) { for (String[] mirrored : mirroredConfigKeys) { String value = hadoopConfig.get(mirrored[0], null); if (value != null) { hadoopConfig.set(mirrored[1], value); } } return hadoopConfig; }
with different keys
private static void safelyTruncateFile( final FileSystem fileSystem, final Path path, final HadoopFsRecoverable recoverable) throws IOException { ensureTruncateInitialized(); waitUntilLeaseIsRevoked(fileSystem, path); // truncate back and append boolean truncated; try { truncated = truncate(fileSystem, path, recoverable.offset()); } catch (Exception e) { throw new IOException("Problem while truncating file: " + path, e); } if (!truncated) { // Truncate did not complete immediately, we must wait for // the operation to complete and release the lease. waitUntilLeaseIsRevoked(fileSystem, path); } }
------------------------------------------------------------------------
private static boolean waitUntilLeaseIsRevoked(final FileSystem fs, final Path path) throws IOException { Preconditions.checkState(fs instanceof DistributedFileSystem); final DistributedFileSystem dfs = (DistributedFileSystem) fs; dfs.recoverLease(path); final Deadline deadline = Deadline.now().plus(Duration.ofMillis(LEASE_TIMEOUT)); final StopWatch sw = new StopWatch(); sw.start(); boolean isClosed = dfs.isFileClosed(path); while (!isClosed && deadline.hasTimeLeft()) { try { Thread.sleep(500L); } catch (InterruptedException e1) { throw new IOException("Recovering the lease failed: ", e1); } isClosed = dfs.isFileClosed(path); } return isClosed; }
Called when resuming execution after a failure and waits until the lease of the file we are resuming is free. <p>The lease of the file we are resuming writing/committing to may still belong to the process that failed previously and whose state we are recovering. @param path The path to the file we want to resume writing to.
public static KvStateClientProxy createKvStateClientProxy( final InetAddress address, final Iterator<Integer> ports, final int eventLoopThreads, final int queryThreads, final KvStateRequestStats stats) { Preconditions.checkNotNull(address, "address"); Preconditions.checkNotNull(stats, "stats"); Preconditions.checkArgument(eventLoopThreads >= 1); Preconditions.checkArgument(queryThreads >= 1); try { String classname = "org.apache.flink.queryablestate.client.proxy.KvStateClientProxyImpl"; Class<? extends KvStateClientProxy> clazz = Class.forName(classname).asSubclass(KvStateClientProxy.class); Constructor<? extends KvStateClientProxy> constructor = clazz.getConstructor( InetAddress.class, Iterator.class, Integer.class, Integer.class, KvStateRequestStats.class); return constructor.newInstance(address, ports, eventLoopThreads, queryThreads, stats); } catch (ClassNotFoundException e) { final String msg = "Could not load Queryable State Client Proxy. " + ERROR_MESSAGE_ON_LOAD_FAILURE; if (LOG.isDebugEnabled()) { LOG.debug(msg + " Cause: " + e.getMessage()); } else { LOG.info(msg); } return null; } catch (InvocationTargetException e) { LOG.error("Queryable State Client Proxy could not be created: ", e.getTargetException()); return null; } catch (Throwable t) { LOG.error("Failed to instantiate the Queryable State Client Proxy.", t); return null; } }
Initializes the {@link KvStateClientProxy client proxy} responsible for receiving requests from the external (to the cluster) client and forwarding them internally. @param address the address to bind to. @param ports the range of ports the proxy will attempt to listen to (see {@link org.apache.flink.configuration.QueryableStateOptions#PROXY_PORT_RANGE QueryableStateOptions.PROXY_PORT_RANGE}). @param eventLoopThreads the number of threads to be used to process incoming requests. @param queryThreads the number of threads to be used to send the actual state. @param stats statistics to be gathered about the incoming requests. @return the {@link KvStateClientProxy client proxy}.
@Override public JobExecutionResult execute(String jobName) throws Exception { if (executor == null) { startNewSession(); } Plan p = createProgramPlan(jobName); // Session management is disabled, revert this commit to enable //p.setJobId(jobID); //p.setSessionTimeout(sessionTimeout); JobExecutionResult result = executor.executePlan(p); this.lastJobExecutionResult = result; return result; }
--------------------------------------------------------------------------------------------
@Override public void onStart() throws Exception { try { startTaskExecutorServices(); } catch (Exception e) { final TaskManagerException exception = new TaskManagerException(String.format("Could not start the TaskExecutor %s", getAddress()), e); onFatalError(exception); throw exception; } startRegistrationTimeout(); }
------------------------------------------------------------------------
@Override public CompletableFuture<Void> onStop() { log.info("Stopping TaskExecutor {}.", getAddress()); Throwable throwable = null; if (resourceManagerConnection != null) { resourceManagerConnection.close(); } for (JobManagerConnection jobManagerConnection : jobManagerConnections.values()) { try { disassociateFromJobManager(jobManagerConnection, new FlinkException("The TaskExecutor is shutting down.")); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } } jobManagerHeartbeatManager.stop(); resourceManagerHeartbeatManager.stop(); try { stopTaskExecutorServices(); } catch (Exception e) { throwable = ExceptionUtils.firstOrSuppressed(e, throwable); } if (throwable != null) { return FutureUtils.completedExceptionally(new FlinkException("Error while shutting the TaskExecutor down.", throwable)); } else { log.info("Stopped TaskExecutor {}.", getAddress()); return CompletableFuture.completedFuture(null); } }
Called to shut down the TaskManager. The method closes all TaskManager services.
@Override public CompletableFuture<StackTraceSampleResponse> requestStackTraceSample( final ExecutionAttemptID executionAttemptId, final int sampleId, final int numSamples, final Time delayBetweenSamples, final int maxStackTraceDepth, final Time timeout) { final Task task = taskSlotTable.getTask(executionAttemptId); if (task == null) { return FutureUtils.completedExceptionally( new IllegalStateException(String.format("Cannot sample task %s. " + "Task is not known to the task manager.", executionAttemptId))); } final CompletableFuture<List<StackTraceElement[]>> stackTracesFuture = stackTraceSampleService.requestStackTraceSample( TaskStackTraceSampleableTaskAdapter.fromTask(task), numSamples, delayBetweenSamples, maxStackTraceDepth); return stackTracesFuture.thenApply(stackTraces -> new StackTraceSampleResponse(sampleId, executionAttemptId, stackTraces)); }
======================================================================
@Override public CompletableFuture<Acknowledge> submitTask( TaskDeploymentDescriptor tdd, JobMasterId jobMasterId, Time timeout) { try { final JobID jobId = tdd.getJobId(); final JobManagerConnection jobManagerConnection = jobManagerTable.get(jobId); if (jobManagerConnection == null) { final String message = "Could not submit task because there is no JobManager " + "associated for the job " + jobId + '.'; log.debug(message); throw new TaskSubmissionException(message); } if (!Objects.equals(jobManagerConnection.getJobMasterId(), jobMasterId)) { final String message = "Rejecting the task submission because the job manager leader id " + jobMasterId + " does not match the expected job manager leader id " + jobManagerConnection.getJobMasterId() + '.'; log.debug(message); throw new TaskSubmissionException(message); } if (!taskSlotTable.tryMarkSlotActive(jobId, tdd.getAllocationId())) { final String message = "No task slot allocated for job ID " + jobId + " and allocation ID " + tdd.getAllocationId() + '.'; log.debug(message); throw new TaskSubmissionException(message); } // re-integrate offloaded data: try { tdd.loadBigData(blobCacheService.getPermanentBlobService()); } catch (IOException | ClassNotFoundException e) { throw new TaskSubmissionException("Could not re-integrate offloaded TaskDeploymentDescriptor data.", e); } // deserialize the pre-serialized information final JobInformation jobInformation; final TaskInformation taskInformation; try { jobInformation = tdd.getSerializedJobInformation().deserializeValue(getClass().getClassLoader()); taskInformation = tdd.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader()); } catch (IOException | ClassNotFoundException e) { throw new TaskSubmissionException("Could not deserialize the job or task information.", e); } if (!jobId.equals(jobInformation.getJobId())) { throw new TaskSubmissionException( "Inconsistent job ID information inside TaskDeploymentDescriptor (" + tdd.getJobId() + " vs. " + jobInformation.getJobId() + ")"); } TaskMetricGroup taskMetricGroup = taskManagerMetricGroup.addTaskForJob( jobInformation.getJobId(), jobInformation.getJobName(), taskInformation.getJobVertexId(), tdd.getExecutionAttemptId(), taskInformation.getTaskName(), tdd.getSubtaskIndex(), tdd.getAttemptNumber()); InputSplitProvider inputSplitProvider = new RpcInputSplitProvider( jobManagerConnection.getJobManagerGateway(), taskInformation.getJobVertexId(), tdd.getExecutionAttemptId(), taskManagerConfiguration.getTimeout()); TaskManagerActions taskManagerActions = jobManagerConnection.getTaskManagerActions(); CheckpointResponder checkpointResponder = jobManagerConnection.getCheckpointResponder(); GlobalAggregateManager aggregateManager = jobManagerConnection.getGlobalAggregateManager(); LibraryCacheManager libraryCache = jobManagerConnection.getLibraryCacheManager(); ResultPartitionConsumableNotifier resultPartitionConsumableNotifier = jobManagerConnection.getResultPartitionConsumableNotifier(); PartitionProducerStateChecker partitionStateChecker = jobManagerConnection.getPartitionStateChecker(); final TaskLocalStateStore localStateStore = localStateStoresManager.localStateStoreForSubtask( jobId, tdd.getAllocationId(), taskInformation.getJobVertexId(), tdd.getSubtaskIndex()); final JobManagerTaskRestore taskRestore = tdd.getTaskRestore(); final TaskStateManager taskStateManager = new TaskStateManagerImpl( jobId, tdd.getExecutionAttemptId(), localStateStore, taskRestore, checkpointResponder); Task task = new Task( jobInformation, taskInformation, tdd.getExecutionAttemptId(), tdd.getAllocationId(), tdd.getSubtaskIndex(), tdd.getAttemptNumber(), tdd.getProducedPartitions(), tdd.getInputGates(), tdd.getTargetSlotNumber(), taskExecutorServices.getMemoryManager(), taskExecutorServices.getIOManager(), taskExecutorServices.getNetworkEnvironment(), taskExecutorServices.getKvStateService(), taskExecutorServices.getBroadcastVariableManager(), taskExecutorServices.getTaskEventDispatcher(), taskStateManager, taskManagerActions, inputSplitProvider, checkpointResponder, aggregateManager, blobCacheService, libraryCache, fileCache, taskManagerConfiguration, taskMetricGroup, resultPartitionConsumableNotifier, partitionStateChecker, getRpcService().getExecutor()); log.info("Received task {}.", task.getTaskInfo().getTaskNameWithSubtasks()); boolean taskAdded; try { taskAdded = taskSlotTable.addTask(task); } catch (SlotNotFoundException | SlotNotActiveException e) { throw new TaskSubmissionException("Could not submit task.", e); } if (taskAdded) { task.startTaskThread(); return CompletableFuture.completedFuture(Acknowledge.get()); } else { final String message = "TaskManager already contains a task for id " + task.getExecutionId() + '.'; log.debug(message); throw new TaskSubmissionException(message); } } catch (TaskSubmissionException e) { return FutureUtils.completedExceptionally(e); } }
----------------------------------------------------------------------
@Override public CompletableFuture<Acknowledge> updatePartitions( final ExecutionAttemptID executionAttemptID, Iterable<PartitionInfo> partitionInfos, Time timeout) { final Task task = taskSlotTable.getTask(executionAttemptID); if (task != null) { for (final PartitionInfo partitionInfo: partitionInfos) { IntermediateDataSetID intermediateResultPartitionID = partitionInfo.getIntermediateDataSetID(); final SingleInputGate singleInputGate = task.getInputGateById(intermediateResultPartitionID); if (singleInputGate != null) { // Run asynchronously because it might be blocking getRpcService().execute( () -> { try { singleInputGate.updateInputChannel(partitionInfo.getInputChannelDeploymentDescriptor()); } catch (IOException | InterruptedException e) { log.error("Could not update input data location for task {}. Trying to fail task.", task.getTaskInfo().getTaskName(), e); try { task.failExternally(e); } catch (RuntimeException re) { // TODO: Check whether we need this or make exception in failExtenally checked log.error("Failed canceling task with execution ID {} after task update failure.", executionAttemptID, re); } } }); } else { return FutureUtils.completedExceptionally( new PartitionException("No reader with ID " + intermediateResultPartitionID + " for task " + executionAttemptID + " was found.")); } } return CompletableFuture.completedFuture(Acknowledge.get()); } else { log.debug("Discard update for input partitions of task {}. Task is no longer running.", executionAttemptID); return CompletableFuture.completedFuture(Acknowledge.get()); } }
----------------------------------------------------------------------
@Override public CompletableFuture<Acknowledge> triggerCheckpoint( ExecutionAttemptID executionAttemptID, long checkpointId, long checkpointTimestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) { log.debug("Trigger checkpoint {}@{} for {}.", checkpointId, checkpointTimestamp, executionAttemptID); final CheckpointType checkpointType = checkpointOptions.getCheckpointType(); if (advanceToEndOfEventTime && !(checkpointType.isSynchronous() && checkpointType.isSavepoint())) { throw new IllegalArgumentException("Only synchronous savepoints are allowed to advance the watermark to MAX."); } final Task task = taskSlotTable.getTask(executionAttemptID); if (task != null) { task.triggerCheckpointBarrier(checkpointId, checkpointTimestamp, checkpointOptions, advanceToEndOfEventTime); return CompletableFuture.completedFuture(Acknowledge.get()); } else { final String message = "TaskManager received a checkpoint request for unknown task " + executionAttemptID + '.'; log.debug(message); return FutureUtils.completedExceptionally(new CheckpointException(message)); } }
----------------------------------------------------------------------
@Override public CompletableFuture<Acknowledge> requestSlot( final SlotID slotId, final JobID jobId, final AllocationID allocationId, final String targetAddress, final ResourceManagerId resourceManagerId, final Time timeout) { // TODO: Filter invalid requests from the resource manager by using the instance/registration Id log.info("Receive slot request {} for job {} from resource manager with leader id {}.", allocationId, jobId, resourceManagerId); try { if (!isConnectedToResourceManager(resourceManagerId)) { final String message = String.format("TaskManager is not connected to the resource manager %s.", resourceManagerId); log.debug(message); throw new TaskManagerException(message); } if (taskSlotTable.isSlotFree(slotId.getSlotNumber())) { if (taskSlotTable.allocateSlot(slotId.getSlotNumber(), jobId, allocationId, taskManagerConfiguration.getTimeout())) { log.info("Allocated slot for {}.", allocationId); } else { log.info("Could not allocate slot for {}.", allocationId); throw new SlotAllocationException("Could not allocate slot."); } } else if (!taskSlotTable.isAllocated(slotId.getSlotNumber(), jobId, allocationId)) { final String message = "The slot " + slotId + " has already been allocated for a different job."; log.info(message); final AllocationID allocationID = taskSlotTable.getCurrentAllocation(slotId.getSlotNumber()); throw new SlotOccupiedException(message, allocationID, taskSlotTable.getOwningJob(allocationID)); } if (jobManagerTable.contains(jobId)) { offerSlotsToJobManager(jobId); } else { try { jobLeaderService.addJob(jobId, targetAddress); } catch (Exception e) { // free the allocated slot try { taskSlotTable.freeSlot(allocationId); } catch (SlotNotFoundException slotNotFoundException) { // slot no longer existent, this should actually never happen, because we've // just allocated the slot. So let's fail hard in this case! onFatalError(slotNotFoundException); } // release local state under the allocation id. localStateStoresManager.releaseLocalStateForAllocationId(allocationId); // sanity check if (!taskSlotTable.isSlotFree(slotId.getSlotNumber())) { onFatalError(new Exception("Could not free slot " + slotId)); } throw new SlotAllocationException("Could not add job to job leader service.", e); } } } catch (TaskManagerException taskManagerException) { return FutureUtils.completedExceptionally(taskManagerException); } return CompletableFuture.completedFuture(Acknowledge.get()); }
----------------------------------------------------------------------
@Override public void disconnectJobManager(JobID jobId, Exception cause) { closeJobManagerConnection(jobId, cause); jobLeaderService.reconnect(jobId); }
----------------------------------------------------------------------
private void notifyOfNewResourceManagerLeader(String newLeaderAddress, ResourceManagerId newResourceManagerId) { resourceManagerAddress = createResourceManagerAddress(newLeaderAddress, newResourceManagerId); reconnectToResourceManager(new FlinkException(String.format("ResourceManager leader changed to new address %s", resourceManagerAddress))); }
------------------------------------------------------------------------
private void offerSlotsToJobManager(final JobID jobId) { final JobManagerConnection jobManagerConnection = jobManagerTable.get(jobId); if (jobManagerConnection == null) { log.debug("There is no job manager connection to the leader of job {}.", jobId); } else { if (taskSlotTable.hasAllocatedSlots(jobId)) { log.info("Offer reserved slots to the leader of job {}.", jobId); final JobMasterGateway jobMasterGateway = jobManagerConnection.getJobManagerGateway(); final Iterator<TaskSlot> reservedSlotsIterator = taskSlotTable.getAllocatedSlots(jobId); final JobMasterId jobMasterId = jobManagerConnection.getJobMasterId(); final Collection<SlotOffer> reservedSlots = new HashSet<>(2); while (reservedSlotsIterator.hasNext()) { SlotOffer offer = reservedSlotsIterator.next().generateSlotOffer(); reservedSlots.add(offer); } CompletableFuture<Collection<SlotOffer>> acceptedSlotsFuture = jobMasterGateway.offerSlots( getResourceID(), reservedSlots, taskManagerConfiguration.getTimeout()); acceptedSlotsFuture.whenCompleteAsync( (Iterable<SlotOffer> acceptedSlots, Throwable throwable) -> { if (throwable != null) { if (throwable instanceof TimeoutException) { log.info("Slot offering to JobManager did not finish in time. Retrying the slot offering."); // We ran into a timeout. Try again. offerSlotsToJobManager(jobId); } else { log.warn("Slot offering to JobManager failed. Freeing the slots " + "and returning them to the ResourceManager.", throwable); // We encountered an exception. Free the slots and return them to the RM. for (SlotOffer reservedSlot: reservedSlots) { freeSlotInternal(reservedSlot.getAllocationId(), throwable); } } } else { // check if the response is still valid if (isJobManagerConnectionValid(jobId, jobMasterId)) { // mark accepted slots active for (SlotOffer acceptedSlot : acceptedSlots) { try { if (!taskSlotTable.markSlotActive(acceptedSlot.getAllocationId())) { // the slot is either free or releasing at the moment final String message = "Could not mark slot " + jobId + " active."; log.debug(message); jobMasterGateway.failSlot( getResourceID(), acceptedSlot.getAllocationId(), new FlinkException(message)); } } catch (SlotNotFoundException e) { final String message = "Could not mark slot " + jobId + " active."; jobMasterGateway.failSlot( getResourceID(), acceptedSlot.getAllocationId(), new FlinkException(message)); } reservedSlots.remove(acceptedSlot); } final Exception e = new Exception("The slot was rejected by the JobManager."); for (SlotOffer rejectedSlot : reservedSlots) { freeSlotInternal(rejectedSlot.getAllocationId(), e); } } else { // discard the response since there is a new leader for the job log.debug("Discard offer slot response since there is a new leader " + "for the job {}.", jobId); } } }, getMainThreadExecutor()); } else { log.debug("There are no unassigned slots for the job {}.", jobId); } } }
------------------------------------------------------------------------
private void failTask(final ExecutionAttemptID executionAttemptID, final Throwable cause) { final Task task = taskSlotTable.getTask(executionAttemptID); if (task != null) { try { task.failExternally(cause); } catch (Throwable t) { log.error("Could not fail task {}.", executionAttemptID, t); } } else { log.debug("Cannot find task to fail for execution {}.", executionAttemptID); } }
------------------------------------------------------------------------
public Optional<OperatorBackPressureStats> getOperatorBackPressureStats(ExecutionJobVertex vertex) { synchronized (lock) { final OperatorBackPressureStats stats = operatorStatsCache.getIfPresent(vertex); if (stats == null || backPressureStatsRefreshInterval <= System.currentTimeMillis() - stats.getEndTimestamp()) { triggerStackTraceSampleInternal(vertex); } return Optional.ofNullable(stats); } }
Returns back pressure statistics for a operator. Automatically triggers stack trace sampling if statistics are not available or outdated. @param vertex Operator to get the stats for. @return Back pressure statistics for an operator
private boolean triggerStackTraceSampleInternal(final ExecutionJobVertex vertex) { assert(Thread.holdsLock(lock)); if (shutDown) { return false; } if (!pendingStats.contains(vertex) && !vertex.getGraph().getState().isGloballyTerminalState()) { Executor executor = vertex.getGraph().getFutureExecutor(); // Only trigger if still active job if (executor != null) { pendingStats.add(vertex); if (LOG.isDebugEnabled()) { LOG.debug("Triggering stack trace sample for tasks: " + Arrays.toString(vertex.getTaskVertices())); } CompletableFuture<StackTraceSample> sample = coordinator.triggerStackTraceSample( vertex.getTaskVertices(), numSamples, delayBetweenSamples, MAX_STACK_TRACE_DEPTH); sample.handleAsync(new StackTraceSampleCompletionCallback(vertex), executor); return true; } } return false; }
Triggers a stack trace sample for a operator to gather the back pressure statistics. If there is a sample in progress for the operator, the call is ignored. @param vertex Operator to get the stats for. @return Flag indicating whether a sample with triggered.
private static String[] getCLDBLocations(String authority) throws IOException { // Determine the MapR home String maprHome = System.getenv(MAPR_HOME_ENV); if (maprHome == null) { maprHome = DEFAULT_MAPR_HOME; } final File maprClusterConf = new File(maprHome, MAPR_CLUSTER_CONF_FILE); if (LOG.isDebugEnabled()) { LOG.debug(String.format( "Trying to retrieve MapR cluster configuration from %s", maprClusterConf)); } if (!maprClusterConf.exists()) { throw new IOException("Could not find CLDB configuration '" + maprClusterConf.getAbsolutePath() + "', assuming MapR home is '" + maprHome + "'."); } // Read the cluster configuration file, format is specified at // http://doc.mapr.com/display/MapR/mapr-clusters.conf try (BufferedReader br = new BufferedReader(new FileReader(maprClusterConf))) { String line; while ((line = br.readLine()) != null) { // Normalize the string line = line.trim(); line = line.replace('\t', ' '); final String[] fields = line.split(" "); if (fields.length < 1) { continue; } final String clusterName = fields[0]; if (!clusterName.equals(authority)) { continue; } final List<String> cldbLocations = new ArrayList<>(); for (int i = 1; i < fields.length; ++i) { // Make sure this is not a key-value pair MapR recently // introduced in the file format along with their security // features. if (!fields[i].isEmpty() && !fields[i].contains("=")) { cldbLocations.add(fields[i]); } } if (cldbLocations.isEmpty()) { throw new IOException( String.format( "%s contains entry for cluster %s but no CLDB locations.", maprClusterConf, authority)); } return cldbLocations.toArray(new String[cldbLocations.size()]); } } throw new IOException(String.format( "Unable to find CLDB locations for cluster %s", authority)); }
Retrieves the CLDB locations for the given MapR cluster name. @param authority the name of the MapR cluster @return a list of CLDB locations @throws IOException thrown if the CLDB locations for the given MapR cluster name cannot be determined
@SuppressWarnings("unchecked") @PublicEvolving public static <X> PrimitiveArrayTypeInfo<X> getInfoFor(Class<X> type) { if (!type.isArray()) { throw new InvalidTypesException("The given class is no array."); } // basic type arrays return (PrimitiveArrayTypeInfo<X>) TYPES.get(type); }
Tries to get the PrimitiveArrayTypeInfo for an array. Returns null, if the type is an array, but the component type is not a primitive type. @param type The class of the array. @return The corresponding PrimitiveArrayTypeInfo, or null, if the array is not an array of primitives. @throws InvalidTypesException Thrown, if the given class does not represent an array.
static void adjustAutoCommitConfig(Properties properties, OffsetCommitMode offsetCommitMode) { if (offsetCommitMode == OffsetCommitMode.ON_CHECKPOINTS || offsetCommitMode == OffsetCommitMode.DISABLED) { properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); } }
Make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS. This overwrites whatever setting the user configured in the properties. @param properties - Kafka configuration properties to be adjusted @param offsetCommitMode offset commit mode
public FlinkKafkaConsumerBase<T> assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks<T> assigner) { checkNotNull(assigner); if (this.periodicWatermarkAssigner != null) { throw new IllegalStateException("A periodic watermark emitter has already been set."); } try { ClosureCleaner.clean(assigner, true); this.punctuatedWatermarkAssigner = new SerializedValue<>(assigner); return this; } catch (Exception e) { throw new IllegalArgumentException("The given assigner is not serializable", e); } }
Specifies an {@link AssignerWithPunctuatedWatermarks} to emit watermarks in a punctuated manner. The watermark extractor will run per Kafka partition, watermarks will be merged across partitions in the same way as in the Flink runtime, when streams are merged. <p>When a subtask of a FlinkKafkaConsumer source reads multiple Kafka partitions, the streams from the partitions are unioned in a "first come first serve" fashion. Per-partition characteristics are usually lost that way. For example, if the timestamps are strictly ascending per Kafka partition, they will not be strictly ascending in the resulting Flink DataStream, if the parallel source subtask reads more that one partition. <p>Running timestamp extractors / watermark generators directly inside the Kafka source, per Kafka partition, allows users to let them exploit the per-partition characteristics. <p>Note: One can use either an {@link AssignerWithPunctuatedWatermarks} or an {@link AssignerWithPeriodicWatermarks}, not both at the same time. @param assigner The timestamp assigner / watermark generator to use. @return The consumer object, to allow function chaining.
public FlinkKafkaConsumerBase<T> setStartFromEarliest() { this.startupMode = StartupMode.EARLIEST; this.startupOffsetsTimestamp = null; this.specificStartupOffsets = null; return this; }
Specifies the consumer to start reading from the earliest offset for all partitions. This lets the consumer ignore any committed group offsets in Zookeeper / Kafka brokers. <p>This method does not affect where partitions are read from when the consumer is restored from a checkpoint or savepoint. When the consumer is restored from a checkpoint or savepoint, only the offsets in the restored state will be used. @return The consumer object, to allow function chaining.
protected FlinkKafkaConsumerBase<T> setStartFromTimestamp(long startupOffsetsTimestamp) { checkArgument(startupOffsetsTimestamp >= 0, "The provided value for the startup offsets timestamp is invalid."); long currentTimestamp = System.currentTimeMillis(); checkArgument(startupOffsetsTimestamp <= currentTimestamp, "Startup time[%s] must be before current time[%s].", startupOffsetsTimestamp, currentTimestamp); this.startupMode = StartupMode.TIMESTAMP; this.startupOffsetsTimestamp = startupOffsetsTimestamp; this.specificStartupOffsets = null; return this; }
Version-specific subclasses which can expose the functionality should override and allow public access.
public FlinkKafkaConsumerBase<T> setStartFromGroupOffsets() { this.startupMode = StartupMode.GROUP_OFFSETS; this.startupOffsetsTimestamp = null; this.specificStartupOffsets = null; return this; }
Specifies the consumer to start reading from any committed group offsets found in Zookeeper / Kafka brokers. The "group.id" property must be set in the configuration properties. If no offset can be found for a partition, the behaviour in "auto.offset.reset" set in the configuration properties will be used for the partition. <p>This method does not affect where partitions are read from when the consumer is restored from a checkpoint or savepoint. When the consumer is restored from a checkpoint or savepoint, only the offsets in the restored state will be used. @return The consumer object, to allow function chaining.
public FlinkKafkaConsumerBase<T> setStartFromSpecificOffsets(Map<KafkaTopicPartition, Long> specificStartupOffsets) { this.startupMode = StartupMode.SPECIFIC_OFFSETS; this.startupOffsetsTimestamp = null; this.specificStartupOffsets = checkNotNull(specificStartupOffsets); return this; }
Specifies the consumer to start reading partitions from specific offsets, set independently for each partition. The specified offset should be the offset of the next record that will be read from partitions. This lets the consumer ignore any committed group offsets in Zookeeper / Kafka brokers. <p>If the provided map of offsets contains entries whose {@link KafkaTopicPartition} is not subscribed by the consumer, the entry will be ignored. If the consumer subscribes to a partition that does not exist in the provided map of offsets, the consumer will fallback to the default group offset behaviour (see {@link FlinkKafkaConsumerBase#setStartFromGroupOffsets()}) for that particular partition. <p>If the specified offset for a partition is invalid, or the behaviour for that partition is defaulted to group offsets but still no group offset could be found for it, then the "auto.offset.reset" behaviour set in the configuration properties will be used for the partition <p>This method does not affect where partitions are read from when the consumer is restored from a checkpoint or savepoint. When the consumer is restored from a checkpoint or savepoint, only the offsets in the restored state will be used. @return The consumer object, to allow function chaining.
@Override public void open(Configuration configuration) throws Exception { // determine the offset commit mode this.offsetCommitMode = OffsetCommitModes.fromConfiguration( getIsAutoCommitEnabled(), enableCommitOnCheckpoints, ((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled()); // create the partition discoverer this.partitionDiscoverer = createPartitionDiscoverer( topicsDescriptor, getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks()); this.partitionDiscoverer.open(); subscribedPartitionsToStartOffsets = new HashMap<>(); final List<KafkaTopicPartition> allPartitions = partitionDiscoverer.discoverPartitions(); if (restoredState != null) { for (KafkaTopicPartition partition : allPartitions) { if (!restoredState.containsKey(partition)) { restoredState.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET); } } for (Map.Entry<KafkaTopicPartition, Long> restoredStateEntry : restoredState.entrySet()) { if (!restoredFromOldState) { // seed the partition discoverer with the union state while filtering out // restored partitions that should not be subscribed by this subtask if (KafkaTopicPartitionAssigner.assign( restoredStateEntry.getKey(), getRuntimeContext().getNumberOfParallelSubtasks()) == getRuntimeContext().getIndexOfThisSubtask()){ subscribedPartitionsToStartOffsets.put(restoredStateEntry.getKey(), restoredStateEntry.getValue()); } } else { // when restoring from older 1.1 / 1.2 state, the restored state would not be the union state; // in this case, just use the restored state as the subscribed partitions subscribedPartitionsToStartOffsets.put(restoredStateEntry.getKey(), restoredStateEntry.getValue()); } } if (filterRestoredPartitionsWithCurrentTopicsDescriptor) { subscribedPartitionsToStartOffsets.entrySet().removeIf(entry -> { if (!topicsDescriptor.isMatchingTopic(entry.getKey().getTopic())) { LOG.warn( "{} is removed from subscribed partitions since it is no longer associated with topics descriptor of current execution.", entry.getKey()); return true; } return false; }); } LOG.info("Consumer subtask {} will start reading {} partitions with offsets in restored state: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets); } else { // use the partition discoverer to fetch the initial seed partitions, // and set their initial offsets depending on the startup mode. // for SPECIFIC_OFFSETS and TIMESTAMP modes, we set the specific offsets now; // for other modes (EARLIEST, LATEST, and GROUP_OFFSETS), the offset is lazily determined // when the partition is actually read. switch (startupMode) { case SPECIFIC_OFFSETS: if (specificStartupOffsets == null) { throw new IllegalStateException( "Startup mode for the consumer set to " + StartupMode.SPECIFIC_OFFSETS + ", but no specific offsets were specified."); } for (KafkaTopicPartition seedPartition : allPartitions) { Long specificOffset = specificStartupOffsets.get(seedPartition); if (specificOffset != null) { // since the specified offsets represent the next record to read, we subtract // it by one so that the initial state of the consumer will be correct subscribedPartitionsToStartOffsets.put(seedPartition, specificOffset - 1); } else { // default to group offset behaviour if the user-provided specific offsets // do not contain a value for this partition subscribedPartitionsToStartOffsets.put(seedPartition, KafkaTopicPartitionStateSentinel.GROUP_OFFSET); } } break; case TIMESTAMP: if (startupOffsetsTimestamp == null) { throw new IllegalStateException( "Startup mode for the consumer set to " + StartupMode.TIMESTAMP + ", but no startup timestamp was specified."); } for (Map.Entry<KafkaTopicPartition, Long> partitionToOffset : fetchOffsetsWithTimestamp(allPartitions, startupOffsetsTimestamp).entrySet()) { subscribedPartitionsToStartOffsets.put( partitionToOffset.getKey(), (partitionToOffset.getValue() == null) // if an offset cannot be retrieved for a partition with the given timestamp, // we default to using the latest offset for the partition ? KafkaTopicPartitionStateSentinel.LATEST_OFFSET // since the specified offsets represent the next record to read, we subtract // it by one so that the initial state of the consumer will be correct : partitionToOffset.getValue() - 1); } break; default: for (KafkaTopicPartition seedPartition : allPartitions) { subscribedPartitionsToStartOffsets.put(seedPartition, startupMode.getStateSentinel()); } } if (!subscribedPartitionsToStartOffsets.isEmpty()) { switch (startupMode) { case EARLIEST: LOG.info("Consumer subtask {} will start reading the following {} partitions from the earliest offsets: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets.keySet()); break; case LATEST: LOG.info("Consumer subtask {} will start reading the following {} partitions from the latest offsets: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets.keySet()); break; case TIMESTAMP: LOG.info("Consumer subtask {} will start reading the following {} partitions from timestamp {}: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), startupOffsetsTimestamp, subscribedPartitionsToStartOffsets.keySet()); break; case SPECIFIC_OFFSETS: LOG.info("Consumer subtask {} will start reading the following {} partitions from the specified startup offsets {}: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), specificStartupOffsets, subscribedPartitionsToStartOffsets.keySet()); List<KafkaTopicPartition> partitionsDefaultedToGroupOffsets = new ArrayList<>(subscribedPartitionsToStartOffsets.size()); for (Map.Entry<KafkaTopicPartition, Long> subscribedPartition : subscribedPartitionsToStartOffsets.entrySet()) { if (subscribedPartition.getValue() == KafkaTopicPartitionStateSentinel.GROUP_OFFSET) { partitionsDefaultedToGroupOffsets.add(subscribedPartition.getKey()); } } if (partitionsDefaultedToGroupOffsets.size() > 0) { LOG.warn("Consumer subtask {} cannot find offsets for the following {} partitions in the specified startup offsets: {}" + "; their startup offsets will be defaulted to their committed group offsets in Kafka.", getRuntimeContext().getIndexOfThisSubtask(), partitionsDefaultedToGroupOffsets.size(), partitionsDefaultedToGroupOffsets); } break; case GROUP_OFFSETS: LOG.info("Consumer subtask {} will start reading the following {} partitions from the committed group offsets in Kafka: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets.keySet()); } } else { LOG.info("Consumer subtask {} initially has no partitions to read from.", getRuntimeContext().getIndexOfThisSubtask()); } } }
------------------------------------------------------------------------
@Override public final void initializeState(FunctionInitializationContext context) throws Exception { OperatorStateStore stateStore = context.getOperatorStateStore(); ListState<Tuple2<KafkaTopicPartition, Long>> oldRoundRobinListState = stateStore.getSerializableListState(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME); this.unionOffsetStates = stateStore.getUnionListState(new ListStateDescriptor<>( OFFSETS_STATE_NAME, TypeInformation.of(new TypeHint<Tuple2<KafkaTopicPartition, Long>>() {}))); if (context.isRestored() && !restoredFromOldState) { restoredState = new TreeMap<>(new KafkaTopicPartition.Comparator()); // migrate from 1.2 state, if there is any for (Tuple2<KafkaTopicPartition, Long> kafkaOffset : oldRoundRobinListState.get()) { restoredFromOldState = true; unionOffsetStates.add(kafkaOffset); } oldRoundRobinListState.clear(); if (restoredFromOldState && discoveryIntervalMillis != PARTITION_DISCOVERY_DISABLED) { throw new IllegalArgumentException( "Topic / partition discovery cannot be enabled if the job is restored from a savepoint from Flink 1.2.x."); } // populate actual holder for restored state for (Tuple2<KafkaTopicPartition, Long> kafkaOffset : unionOffsetStates.get()) { restoredState.put(kafkaOffset.f0, kafkaOffset.f1); } LOG.info("Setting restore state in the FlinkKafkaConsumer: {}", restoredState); } else { LOG.info("No restore state for FlinkKafkaConsumer."); } }
------------------------------------------------------------------------
private CompletableFuture<KvStateLocation> getKvStateLookupInfo( final JobID jobId, final String queryableStateName, final boolean forceUpdate) { final Tuple2<JobID, String> cacheKey = new Tuple2<>(jobId, queryableStateName); final CompletableFuture<KvStateLocation> cachedFuture = lookupCache.get(cacheKey); if (!forceUpdate && cachedFuture != null && !cachedFuture.isCompletedExceptionally()) { LOG.debug("Retrieving location for state={} of job={} from the cache.", queryableStateName, jobId); return cachedFuture; } final KvStateLocationOracle kvStateLocationOracle = proxy.getKvStateLocationOracle(jobId); if (kvStateLocationOracle != null) { LOG.debug("Retrieving location for state={} of job={} from the key-value state location oracle.", queryableStateName, jobId); final CompletableFuture<KvStateLocation> location = new CompletableFuture<>(); lookupCache.put(cacheKey, location); kvStateLocationOracle .requestKvStateLocation(jobId, queryableStateName) .whenComplete( (KvStateLocation kvStateLocation, Throwable throwable) -> { if (throwable != null) { if (ExceptionUtils.stripCompletionException(throwable) instanceof FlinkJobNotFoundException) { // if the jobId was wrong, remove the entry from the cache. lookupCache.remove(cacheKey); } location.completeExceptionally(throwable); } else { location.complete(kvStateLocation); } }); return location; } else { return FutureUtils.completedExceptionally( new UnknownLocationException( "Could not retrieve location of state=" + queryableStateName + " of job=" + jobId + ". Potential reasons are: i) the state is not ready, or ii) the job does not exist.")); } }
Lookup the {@link KvStateLocation} for the given job and queryable state name. <p>The job manager will be queried for the location only if forced or no cached location can be found. There are no guarantees about @param jobId JobID the state instance belongs to. @param queryableStateName Name under which the state instance has been published. @param forceUpdate Flag to indicate whether to force a update via the lookup service. @return Future holding the KvStateLocation
@VisibleForTesting public SharedStateRegistryKey createSharedStateRegistryKeyFromFileName(StateHandleID shId) { return new SharedStateRegistryKey(String.valueOf(backendIdentifier) + '-' + keyGroupRange, shId); }
Create a unique key to register one of our shared state handles.
public static RocksDBKeyedStateBackend.RocksDbKvStateInfo createStateInfo( RegisteredStateMetaInfoBase metaInfoBase, RocksDB db, Function<String, ColumnFamilyOptions> columnFamilyOptionsFactory, @Nullable RocksDbTtlCompactFiltersManager ttlCompactFiltersManager) { ColumnFamilyDescriptor columnFamilyDescriptor = createColumnFamilyDescriptor( metaInfoBase, columnFamilyOptionsFactory, ttlCompactFiltersManager); return new RocksDBKeyedStateBackend.RocksDbKvStateInfo(createColumnFamily(columnFamilyDescriptor, db), metaInfoBase); }
Creates a state info from a new meta info to use with a k/v state. <p>Creates the column family for the state. Sets TTL compaction filter if {@code ttlCompactFiltersManager} is not {@code null}.
public static ColumnFamilyDescriptor createColumnFamilyDescriptor( RegisteredStateMetaInfoBase metaInfoBase, Function<String, ColumnFamilyOptions> columnFamilyOptionsFactory, @Nullable RocksDbTtlCompactFiltersManager ttlCompactFiltersManager) { ColumnFamilyOptions options = createColumnFamilyOptions(columnFamilyOptionsFactory, metaInfoBase.getName()); if (ttlCompactFiltersManager != null) { ttlCompactFiltersManager.setAndRegisterCompactFilterIfStateTtl(metaInfoBase, options); } byte[] nameBytes = metaInfoBase.getName().getBytes(ConfigConstants.DEFAULT_CHARSET); Preconditions.checkState(!Arrays.equals(RocksDB.DEFAULT_COLUMN_FAMILY, nameBytes), "The chosen state name 'default' collides with the name of the default column family!"); return new ColumnFamilyDescriptor(nameBytes, options); }
Creates a column descriptor for sate column family. <p>Sets TTL compaction filter if {@code ttlCompactFiltersManager} is not {@code null}.
public static <L, R> Either<L, R> Left(L value) { return new Left<L, R>(value); }
Create a Left value of Either
public static <L, R> Either<L, R> Right(R value) { return new Right<L, R>(value); }
Create a Right value of Either
@Internal public static <L, R> Left<L, R> obtainLeft(Either<L, R> input, TypeSerializer<L> leftSerializer) { if (input.isLeft()) { return (Left<L, R>) input; } else { Right<L, R> right = (Right<L, R>) input; if (right.left == null) { right.left = Left.of(leftSerializer.createInstance()); right.left.right = right; } return right.left; } }
Utility function for {@link EitherSerializer} to support object reuse. To support object reuse both subclasses of Either contain a reference to an instance of the other type. This method provides access to and initializes the cross-reference. @param input container for Left or Right value @param leftSerializer for creating an instance of the left type @param <L> the type of Left @param <R> the type of Right @return input if Left type else input's Left reference