code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
public JaccardIndex<K, VV, EV> setMaximumScore(int numerator, int denominator) { Preconditions.checkArgument(numerator >= 0, "Maximum score numerator must be non-negative"); Preconditions.checkArgument(denominator > 0, "Maximum score denominator must be greater than zero"); Preconditions.checkArgument(numerator <= denominator, "Maximum score fraction must be less than or equal to one"); this.unboundedScores = false; this.maximumScoreNumerator = numerator; this.maximumScoreDenominator = denominator; return this; }
Filter out Jaccard Index scores greater than the given maximum fraction. @param numerator numerator of the maximum score @param denominator denominator of the maximum score @return this @see #setMinimumScore(int, int)
@Override public DataSet<Result<K>> runInternal(Graph<K, VV, EV> input) throws Exception { // s, t, d(t) DataSet<Edge<K, Tuple2<EV, LongValue>>> neighborDegree = input .run(new EdgeTargetDegree<K, VV, EV>() .setParallelism(parallelism)); // group span, s, t, d(t) DataSet<Tuple4<IntValue, K, K, IntValue>> groupSpans = neighborDegree .groupBy(0) .sortGroup(1, Order.ASCENDING) .reduceGroup(new GenerateGroupSpans<>(groupSize)) .setParallelism(parallelism) .name("Generate group spans"); // group, s, t, d(t) DataSet<Tuple4<IntValue, K, K, IntValue>> groups = groupSpans .rebalance() .setParallelism(parallelism) .name("Rebalance") .flatMap(new GenerateGroups<>()) .setParallelism(parallelism) .name("Generate groups"); // t, u, d(t)+d(u) DataSet<Tuple3<K, K, IntValue>> twoPaths = groups .groupBy(0, 1) .sortGroup(2, Order.ASCENDING) .reduceGroup(new GenerateGroupPairs<>(groupSize)) .name("Generate group pairs"); // t, u, intersection, union DataSet<Result<K>> scores = twoPaths .groupBy(0, 1) .reduceGroup(new ComputeScores<>(unboundedScores, minimumScoreNumerator, minimumScoreDenominator, maximumScoreNumerator, maximumScoreDenominator)) .name("Compute scores"); if (mirrorResults) { scores = scores .flatMap(new MirrorResult<>()) .name("Mirror results"); } return scores; }
/* Implementation notes: The requirement that "K extends CopyableValue<K>" can be removed when Flink has a self-join which performs the skew distribution handled by GenerateGroupSpans / GenerateGroups / GenerateGroupPairs.
@Override public DataSet<Result<K>> runInternal(Graph<K, VV, EV> input) throws Exception { // u, v, w, bitmask DataSet<TriangleListing.Result<K>> triangles = input .run(new TriangleListing<K, VV, EV>() .setParallelism(parallelism)); // u, edge count DataSet<Tuple2<K, LongValue>> triangleVertices = triangles .flatMap(new SplitTriangles<>()) .name("Split triangle vertices"); // u, triangle count DataSet<Tuple2<K, LongValue>> vertexTriangleCount = triangleVertices .groupBy(0) .reduce(new CountTriangles<>()) .setCombineHint(CombineHint.HASH) .name("Count triangles") .setParallelism(parallelism); // u, deg(u) DataSet<Vertex<K, Degrees>> vertexDegree = input .run(new VertexDegrees<K, VV, EV>() .setIncludeZeroDegreeVertices(includeZeroDegreeVertices.get()) .setParallelism(parallelism)); // u, deg(u), triangle count return vertexDegree .leftOuterJoin(vertexTriangleCount) .where(0) .equalTo(0) .with(new JoinVertexDegreeWithTriangleCount<>()) .setParallelism(parallelism) .name("Clustering coefficient"); }
/* Implementation notes: The requirement that "K extends CopyableValue<K>" can be removed when removed from TriangleListing. CountVertices can be replaced by ".sum(1)" when Flink aggregators use code generation.
@Override protected void putVariables(Map<String, String> variables) { variables.put(ScopeFormat.SCOPE_JOB_ID, jobId.toString()); variables.put(ScopeFormat.SCOPE_JOB_NAME, jobName); }
------------------------------------------------------------------------
@Override public void configure(Configuration parameters) { table = createTable(); if (table != null) { scan = getScanner(); } }
Creates a {@link Scan} object and opens the {@link HTable} connection. These are opened here because they are needed in the createInputSplits which is called before the openInputFormat method. So the connection is opened in {@link #configure(Configuration)} and closed in {@link #closeInputFormat()}. @param parameters The configuration that is to be used @see Configuration
private HTable createTable() { LOG.info("Initializing HBaseConfiguration"); //use files found in the classpath org.apache.hadoop.conf.Configuration hConf = HBaseConfiguration.create(); try { return new HTable(hConf, getTableName()); } catch (Exception e) { LOG.error("Error instantiating a new HTable instance", e); } return null; }
Create an {@link HTable} instance and set it into this format.
public static <E> EmptyIterator<E> get() { @SuppressWarnings("unchecked") EmptyIterator<E> iter = (EmptyIterator<E>) INSTANCE; return iter; }
Gets a singleton instance of the empty iterator. @param <E> The type of the objects (not) returned by the iterator. @return An instance of the iterator.
public static InternalType createInternalTypeFromTypeInfo(TypeInformation typeInfo) { InternalType type = TYPE_INFO_TO_INTERNAL_TYPE.get(typeInfo); if (type != null) { return type; } if (typeInfo instanceof CompositeType) { CompositeType compositeType = (CompositeType) typeInfo; return InternalTypes.createRowType( Stream.iterate(0, x -> x + 1).limit(compositeType.getArity()) .map((Function<Integer, TypeInformation>) compositeType::getTypeAt) .map(TypeConverters::createInternalTypeFromTypeInfo) .toArray(InternalType[]::new), compositeType.getFieldNames() ); } else if (typeInfo instanceof DecimalTypeInfo) { DecimalTypeInfo decimalType = (DecimalTypeInfo) typeInfo; return InternalTypes.createDecimalType(decimalType.precision(), decimalType.scale()); } else if (typeInfo instanceof PrimitiveArrayTypeInfo) { PrimitiveArrayTypeInfo arrayType = (PrimitiveArrayTypeInfo) typeInfo; return InternalTypes.createArrayType( createInternalTypeFromTypeInfo(arrayType.getComponentType())); } else if (typeInfo instanceof BasicArrayTypeInfo) { BasicArrayTypeInfo arrayType = (BasicArrayTypeInfo) typeInfo; return InternalTypes.createArrayType( createInternalTypeFromTypeInfo(arrayType.getComponentInfo())); } else if (typeInfo instanceof ObjectArrayTypeInfo) { ObjectArrayTypeInfo arrayType = (ObjectArrayTypeInfo) typeInfo; return InternalTypes.createArrayType( createInternalTypeFromTypeInfo(arrayType.getComponentInfo())); } else if (typeInfo instanceof MapTypeInfo) { MapTypeInfo mapType = (MapTypeInfo) typeInfo; return InternalTypes.createMapType( createInternalTypeFromTypeInfo(mapType.getKeyTypeInfo()), createInternalTypeFromTypeInfo(mapType.getValueTypeInfo())); } else if (typeInfo instanceof BinaryMapTypeInfo) { BinaryMapTypeInfo mapType = (BinaryMapTypeInfo) typeInfo; return InternalTypes.createMapType( mapType.getKeyType(), mapType.getValueType()); } else if (typeInfo instanceof BinaryArrayTypeInfo) { BinaryArrayTypeInfo arrayType = (BinaryArrayTypeInfo) typeInfo; return InternalTypes.createArrayType(arrayType.getElementType()); } else if (typeInfo instanceof BigDecimalTypeInfo) { BigDecimalTypeInfo decimalType = (BigDecimalTypeInfo) typeInfo; return new DecimalType(decimalType.precision(), decimalType.scale()); } else { return InternalTypes.createGenericType(typeInfo); } }
Create a {@link InternalType} from a {@link TypeInformation}. <p>Note: Information may be lost. For example, after Pojo is converted to InternalType, we no longer know that it is a Pojo and only think it is a Row. <p>Eg: {@link BasicTypeInfo#STRING_TYPE_INFO} => {@link InternalTypes#STRING}. {@link BasicTypeInfo#BIG_DEC_TYPE_INFO} => {@link DecimalType}. {@link RowTypeInfo} => {@link RowType}. {@link PojoTypeInfo} (CompositeType) => {@link RowType}. {@link TupleTypeInfo} (CompositeType) => {@link RowType}.
public static TypeInformation createInternalTypeInfoFromInternalType(InternalType type) { TypeInformation typeInfo = INTERNAL_TYPE_TO_INTERNAL_TYPE_INFO.get(type); if (typeInfo != null) { return typeInfo; } if (type instanceof RowType) { RowType rowType = (RowType) type; return new BaseRowTypeInfo(rowType.getFieldTypes(), rowType.getFieldNames()); } else if (type instanceof ArrayType) { return new BinaryArrayTypeInfo(((ArrayType) type).getElementType()); } else if (type instanceof MapType) { MapType mapType = (MapType) type; return new BinaryMapTypeInfo(mapType.getKeyType(), mapType.getValueType()); } else if (type instanceof DecimalType) { DecimalType decimalType = (DecimalType) type; return new DecimalTypeInfo(decimalType.precision(), decimalType.scale()); } else if (type instanceof GenericType) { GenericType<?> genericType = (GenericType<?>) type; return new BinaryGenericTypeInfo<>(genericType); } else { throw new UnsupportedOperationException("Not support yet: " + type); } }
Create a internal {@link TypeInformation} from a {@link InternalType}. <p>eg: {@link InternalTypes#STRING} => {@link BinaryStringTypeInfo}. {@link RowType} => {@link BaseRowTypeInfo}.
@SuppressWarnings("unchecked") public static TypeInformation createExternalTypeInfoFromInternalType(InternalType type) { TypeInformation typeInfo = INTERNAL_TYPE_TO_EXTERNAL_TYPE_INFO.get(type); if (typeInfo != null) { return typeInfo; } if (type instanceof RowType) { RowType rowType = (RowType) type; return new RowTypeInfo(Arrays.stream(rowType.getFieldTypes()) .map(TypeConverters::createExternalTypeInfoFromInternalType) .toArray(TypeInformation[]::new), rowType.getFieldNames()); } else if (type instanceof ArrayType) { return ObjectArrayTypeInfo.getInfoFor( createExternalTypeInfoFromInternalType(((ArrayType) type).getElementType())); } else if (type instanceof MapType) { MapType mapType = (MapType) type; return new MapTypeInfo( createExternalTypeInfoFromInternalType(mapType.getKeyType()), createExternalTypeInfoFromInternalType(mapType.getValueType())); } else if (type instanceof MultisetType) { MultisetType multisetType = (MultisetType) type; return MultisetTypeInfo.getInfoFor( createExternalTypeInfoFromInternalType(multisetType.getElementType())); } else if (type instanceof DecimalType) { DecimalType decimalType = (DecimalType) type; return new BigDecimalTypeInfo(decimalType.precision(), decimalType.scale()); } else if (type instanceof GenericType) { GenericType genericType = (GenericType) type; return genericType.getTypeInfo(); } else { throw new UnsupportedOperationException("Not support yet: " + type); } }
Create a external {@link TypeInformation} from a {@link InternalType}. <p>eg: {@link InternalTypes#STRING} => {@link BasicTypeInfo#STRING_TYPE_INFO}. {@link RowType} => {@link RowTypeInfo}.
public static void main(String[] args) throws Exception { // Checking input parameters final ParameterTool params = ParameterTool.fromArgs(args); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); DataStream<Integer> trainingData = env.addSource(new FiniteTrainingDataSource()); DataStream<Integer> newData = env.addSource(new FiniteNewDataSource()); // build new model on every second of new data DataStream<Double[]> model = trainingData .assignTimestampsAndWatermarks(new LinearTimestamp()) .timeWindowAll(Time.of(5000, TimeUnit.MILLISECONDS)) .apply(new PartialModelBuilder()); // use partial model for newData DataStream<Integer> prediction = newData.connect(model).map(new Predictor()); // emit result if (params.has("output")) { prediction.writeAsText(params.get("output")); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); prediction.print(); } // execute program env.execute("Streaming Incremental Learning"); }
*************************************************************************
public DataSet<ST> closeWith(DataSet<ST> solutionSetDelta, DataSet<WT> newWorkset) { return new DeltaIterationResultSet<ST, WT>(initialSolutionSet.getExecutionEnvironment(), initialSolutionSet.getType(), initialWorkset.getType(), this, solutionSetDelta, newWorkset, keys, maxIterations); }
Closes the delta iteration. This method defines the end of the delta iteration's function. @param solutionSetDelta The delta for the solution set. The delta will be merged into the solution set at the end of each iteration. @param newWorkset The new workset (feedback data set) that will be fed back to the next iteration. @return The DataSet that represents the result of the iteration, after the computation has terminated. @see DataSet#iterateDelta(DataSet, int, int...)
public DeltaIteration<ST, WT> parallelism(int parallelism) { Preconditions.checkArgument(parallelism > 0 || parallelism == ExecutionConfig.PARALLELISM_DEFAULT, "The parallelism must be positive, or ExecutionConfig.PARALLELISM_DEFAULT (use default)."); this.parallelism = parallelism; return this; }
Sets the parallelism for the iteration. @param parallelism The parallelism. @return The iteration object, for function call chaining.
private DeltaIteration<ST, WT> setResources(ResourceSpec minResources, ResourceSpec preferredResources) { Preconditions.checkNotNull(minResources, "The min resources must be not null."); Preconditions.checkNotNull(preferredResources, "The preferred resources must be not null."); Preconditions.checkArgument(minResources.isValid() && preferredResources.isValid() && minResources.lessThanOrEqual(preferredResources), "The values in resources must be not less than 0 and the preferred resources must be greater than the min resources."); this.minResources = minResources; this.preferredResources = preferredResources; return this; }
Sets the minimum and preferred resources for the iteration. This overrides the default resources. The lower and upper resource limits will be considered in dynamic resource resize feature for future plan. @param minResources The minimum resources for the iteration. @param preferredResources The preferred resources for the iteration. @return The iteration with set minimum and preferred resources.
private DeltaIteration<ST, WT> setResources(ResourceSpec resources) { Preconditions.checkNotNull(resources, "The resources must be not null."); Preconditions.checkArgument(resources.isValid(), "The values in resources must be not less than 0."); this.minResources = resources; this.preferredResources = resources; return this; }
Sets the resources for the iteration, and the minimum and preferred resources are the same by default. The lower and upper resource limits will be considered in dynamic resource resize feature for future plan. @param resources The resources for the iteration. @return The iteration with set minimum and preferred resources.
@PublicEvolving public DeltaIteration<ST, WT> registerAggregator(String name, Aggregator<?> aggregator) { this.aggregators.registerAggregator(name, aggregator); return this; }
Registers an {@link Aggregator} for the iteration. Aggregators can be used to maintain simple statistics during the iteration, such as number of elements processed. The aggregators compute global aggregates: After each iteration step, the values are globally aggregated to produce one aggregate that represents statistics across all parallel instances. The value of an aggregator can be accessed in the next iteration. <p>Aggregators can be accessed inside a function via the {@link org.apache.flink.api.common.functions.AbstractRichFunction#getIterationRuntimeContext()} method. @param name The name under which the aggregator is registered. @param aggregator The aggregator class. @return The DeltaIteration itself, to allow chaining function calls.
@PublicEvolving public <X extends Value> DeltaIteration<ST, WT> registerAggregationConvergenceCriterion( String name, Aggregator<X> aggregator, ConvergenceCriterion<X> convergenceCheck) { this.aggregators.registerAggregationConvergenceCriterion(name, aggregator, convergenceCheck); return this; }
Registers an {@link Aggregator} for the iteration together with a {@link ConvergenceCriterion}. For a general description of aggregators, see {@link #registerAggregator(String, Aggregator)} and {@link Aggregator}. At the end of each iteration, the convergence criterion takes the aggregator's global aggregate value and decides whether the iteration should terminate. A typical use case is to have an aggregator that sums up the total error of change in an iteration step and have to have a convergence criterion that signals termination as soon as the aggregate value is below a certain threshold. @param name The name under which the aggregator is registered. @param aggregator The aggregator class. @param convergenceCheck The convergence criterion. @return The DeltaIteration itself, to allow chaining function calls.
@Override public void run() { LOG.info("Starting to fetch from {}", this.partitions); // set up the config values try { // create the Kafka consumer that we actually use for fetching consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId); // replace earliest of latest starting offsets with actual offset values fetched from Kafka requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, partitions); LOG.info("Starting to consume {} partitions with consumer thread {}", partitions.size(), getName()); // Now, the actual work starts :-) int offsetOutOfRangeCount = 0; int reconnects = 0; while (running) { // ----------------------------------- partitions list maintenance ---------------------------- // check queue for new partitions to read from: List<KafkaTopicPartitionState<TopicAndPartition>> newPartitions = newPartitionsQueue.pollBatch(); if (newPartitions != null) { // found some new partitions for this thread's broker // the new partitions should already be assigned a starting offset checkAllPartitionsHaveDefinedStartingOffsets(newPartitions); // if the new partitions are to start from earliest or latest offsets, // we need to replace them with actual values from Kafka requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, newPartitions); // add the new partitions (and check they are not already in there) for (KafkaTopicPartitionState<TopicAndPartition> newPartition: newPartitions) { if (partitions.contains(newPartition)) { throw new IllegalStateException("Adding partition " + newPartition + " to subscribed partitions even though it is already subscribed"); } partitions.add(newPartition); } LOG.info("Adding {} new partitions to consumer thread {}", newPartitions.size(), getName()); LOG.debug("Partitions list: {}", newPartitions); } if (partitions.size() == 0) { if (newPartitionsQueue.close()) { // close succeeded. Closing thread running = false; LOG.info("Consumer thread {} does not have any partitions assigned anymore. Stopping thread.", getName()); // add the wake-up marker into the queue to make the main thread // immediately wake up and termination faster unassignedPartitions.add(MARKER); break; } else { // close failed: fetcher main thread concurrently added new partitions into the queue. // go to top of loop again and get the new partitions continue; } } // ----------------------------------- request / response with kafka ---------------------------- FetchRequestBuilder frb = new FetchRequestBuilder(); frb.clientId(clientId); frb.maxWait(maxWait); frb.minBytes(minBytes); for (KafkaTopicPartitionState<?> partition : partitions) { frb.addFetch( partition.getKafkaTopicPartition().getTopic(), partition.getKafkaTopicPartition().getPartition(), partition.getOffset() + 1, // request the next record fetchSize); } kafka.api.FetchRequest fetchRequest = frb.build(); LOG.debug("Issuing fetch request {}", fetchRequest); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(fetchRequest); } catch (Throwable cce) { //noinspection ConstantConditions if (cce instanceof ClosedChannelException) { LOG.warn("Fetch failed because of ClosedChannelException."); LOG.debug("Full exception", cce); // we don't know if the broker is overloaded or unavailable. // retry a few times, then return ALL partitions for new leader lookup if (++reconnects >= reconnectLimit) { LOG.warn("Unable to reach broker after {} retries. Returning all current partitions", reconnectLimit); for (KafkaTopicPartitionState<TopicAndPartition> fp: this.partitions) { unassignedPartitions.add(fp); } this.partitions.clear(); continue; // jump to top of loop: will close thread or subscribe to new partitions } try { consumer.close(); } catch (Throwable t) { LOG.warn("Error while closing consumer connection", t); } // delay & retry Thread.sleep(100); consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId); continue; // retry } else { throw cce; } } reconnects = 0; // ---------------------------------------- error handling ---------------------------- if (fetchResponse == null) { throw new IOException("Fetch from Kafka failed (request returned null)"); } if (fetchResponse.hasError()) { String exception = ""; List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToGetOffsetsFor = new ArrayList<>(); // iterate over partitions to get individual error codes Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator(); boolean partitionsRemoved = false; while (partitionsIterator.hasNext()) { final KafkaTopicPartitionState<TopicAndPartition> fp = partitionsIterator.next(); short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition()); if (code == ErrorMapping.OffsetOutOfRangeCode()) { // we were asked to read from an out-of-range-offset (maybe set wrong in Zookeeper) // Kafka's high level consumer is resetting the offset according to 'auto.offset.reset' partitionsToGetOffsetsFor.add(fp); } else if (code == ErrorMapping.NotLeaderForPartitionCode() || code == ErrorMapping.LeaderNotAvailableCode() || code == ErrorMapping.BrokerNotAvailableCode() || code == ErrorMapping.UnknownCode()) { // the broker we are connected to is not the leader for the partition. LOG.warn("{} is not the leader of {}. Reassigning leader for partition", broker, fp); LOG.debug("Error code = {}", code); unassignedPartitions.add(fp); partitionsIterator.remove(); // unsubscribe the partition ourselves partitionsRemoved = true; } else if (code != ErrorMapping.NoError()) { exception += "\nException for " + fp.getTopic() + ":" + fp.getPartition() + ": " + ExceptionUtils.stringifyException(ErrorMapping.exceptionFor(code)); } } if (partitionsToGetOffsetsFor.size() > 0) { // safeguard against an infinite loop. if (offsetOutOfRangeCount++ > 3) { throw new RuntimeException("Found invalid offsets more than three times in partitions " + partitionsToGetOffsetsFor + " Exceptions: " + exception); } // get valid offsets for these partitions and try again. LOG.warn("The following partitions had an invalid offset: {}", partitionsToGetOffsetsFor); requestAndSetSpecificTimeOffsetsFromKafka(consumer, partitionsToGetOffsetsFor, invalidOffsetBehavior); LOG.warn("The new partition offsets are {}", partitionsToGetOffsetsFor); continue; // jump back to create a new fetch request. The offset has not been touched. } else if (partitionsRemoved) { continue; // create new fetch request } else { // partitions failed on an error throw new IOException("Error while fetching from broker '" + broker + "': " + exception); } } else { // successful fetch, reset offsetOutOfRangeCount. offsetOutOfRangeCount = 0; } // ----------------------------------- process fetch response ---------------------------- int messagesInFetch = 0; int deletedMessages = 0; Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator(); partitionsLoop: while (partitionsIterator.hasNext()) { final KafkaTopicPartitionState<TopicAndPartition> currentPartition = partitionsIterator.next(); final ByteBufferMessageSet messageSet = fetchResponse.messageSet( currentPartition.getTopic(), currentPartition.getPartition()); for (MessageAndOffset msg : messageSet) { if (running) { messagesInFetch++; final ByteBuffer payload = msg.message().payload(); final long offset = msg.offset(); if (offset <= currentPartition.getOffset()) { // we have seen this message already LOG.info("Skipping message with offset " + msg.offset() + " because we have seen messages until (including) " + currentPartition.getOffset() + " from topic/partition " + currentPartition.getTopic() + '/' + currentPartition.getPartition() + " already"); continue; } // If the message value is null, this represents a delete command for the message key. // Log this and pass it on to the client who might want to also receive delete messages. byte[] valueBytes; if (payload == null) { deletedMessages++; valueBytes = null; } else { valueBytes = new byte[payload.remaining()]; payload.get(valueBytes); } // put key into byte array byte[] keyBytes = null; int keySize = msg.message().keySize(); if (keySize >= 0) { // message().hasKey() is doing the same. We save one int deserialization ByteBuffer keyPayload = msg.message().key(); keyBytes = new byte[keySize]; keyPayload.get(keyBytes); } final T value = deserializer.deserialize( new ConsumerRecord<>( currentPartition.getTopic(), currentPartition.getPartition(), keyBytes, valueBytes, offset)); if (deserializer.isEndOfStream(value)) { // remove partition from subscribed partitions. partitionsIterator.remove(); continue partitionsLoop; } owner.emitRecord(value, currentPartition, offset); } else { // no longer running return; } } } LOG.debug("This fetch contained {} messages ({} deleted messages)", messagesInFetch, deletedMessages); } // end of fetch loop if (!newPartitionsQueue.close()) { throw new Exception("Bug: Cleanly leaving fetcher thread without having a closed queue."); } } catch (Throwable t) { // report to the fetcher's error handler errorHandler.reportError(t); } finally { if (consumer != null) { // closing the consumer should not fail the program try { consumer.close(); } catch (Throwable t) { LOG.error("Error while closing the Kafka simple consumer", t); } } } }
------------------------------------------------------------------------
private static void requestAndSetSpecificTimeOffsetsFromKafka( SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitions, long whichTime) throws IOException { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) { requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(whichTime, 1)); } requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo); }
Request offsets before a specific time for a set of partitions, via a Kafka consumer. @param consumer The consumer connected to lead broker @param partitions The list of partitions we need offsets for @param whichTime The type of time we are requesting. -1 and -2 are special constants (See OffsetRequest)
private static void requestAndSetOffsetsFromKafka( SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitionStates, Map<TopicAndPartition, PartitionOffsetRequestInfo> partitionToRequestInfo) throws IOException { int retries = 0; OffsetResponse response; while (true) { kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest( partitionToRequestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); response = consumer.getOffsetsBefore(request); if (response.hasError()) { StringBuilder exception = new StringBuilder(); for (KafkaTopicPartitionState<TopicAndPartition> part : partitionStates) { short code; if ((code = response.errorCode(part.getTopic(), part.getPartition())) != ErrorMapping.NoError()) { exception.append("\nException for topic=").append(part.getTopic()) .append(" partition=").append(part.getPartition()).append(": ") .append(ExceptionUtils.stringifyException(ErrorMapping.exceptionFor(code))); } } if (++retries >= 3) { throw new IOException("Unable to get last offset for partitions " + partitionStates + ": " + exception.toString()); } else { LOG.warn("Unable to get last offset for partitions: Exception(s): {}", exception); } } else { break; // leave retry loop } } for (KafkaTopicPartitionState<TopicAndPartition> part: partitionStates) { // there will be offsets only for partitions that were requested for if (partitionToRequestInfo.containsKey(part.getKafkaPartitionHandle())) { final long offset = response.offsets(part.getTopic(), part.getPartition())[0]; // the offset returned is that of the next record to fetch. because our state reflects the latest // successfully emitted record, we subtract one part.setOffset(offset - 1); } } }
Request offsets from Kafka with a specified set of partition's offset request information. The returned offsets are used to set the internal partition states. <p>This method retries three times if the response has an error. @param consumer The consumer connected to lead broker @param partitionStates the partition states, will be set with offsets fetched from Kafka request @param partitionToRequestInfo map of each partition to its offset request info
@Override public void open(Configuration parameters) throws Exception { try { client = new Socket(hostIp, port); outputStream = client.getOutputStream(); streamWriter = new DataOutputViewStreamWrapper(outputStream); } catch (IOException e) { throw new IOException("Cannot connect to the client to send back the stream", e); } }
Initialize the connection with the Socket in the server. @param parameters Configuration.
@Override public void close() throws Exception { try { if (outputStream != null) { outputStream.flush(); outputStream.close(); } // first regular attempt to cleanly close. Failing that will escalate if (client != null) { client.close(); } } catch (Exception e) { throw new IOException("Error while closing connection that streams data back to client at " + hostIp.toString() + ":" + port, e); } finally { // if we failed prior to closing the client, close it if (client != null) { try { client.close(); } catch (Throwable t) { // best effort to close, we do not care about an exception here any more } } } }
Closes the connection with the Socket server.
public static <T> CompletableFuture<T> retry( final Supplier<CompletableFuture<T>> operation, final int retries, final Executor executor) { final CompletableFuture<T> resultFuture = new CompletableFuture<>(); retryOperation(resultFuture, operation, retries, executor); return resultFuture; }
Retry the given operation the given number of times in case of a failure. @param operation to executed @param retries if the operation failed @param executor to use to run the futures @param <T> type of the result @return Future containing either the result of the operation or a {@link RetryException}
private static <T> void retryOperation( final CompletableFuture<T> resultFuture, final Supplier<CompletableFuture<T>> operation, final int retries, final Executor executor) { if (!resultFuture.isDone()) { final CompletableFuture<T> operationFuture = operation.get(); operationFuture.whenCompleteAsync( (t, throwable) -> { if (throwable != null) { if (throwable instanceof CancellationException) { resultFuture.completeExceptionally(new RetryException("Operation future was cancelled.", throwable)); } else { if (retries > 0) { retryOperation( resultFuture, operation, retries - 1, executor); } else { resultFuture.completeExceptionally(new RetryException("Could not complete the operation. Number of retries " + "has been exhausted.", throwable)); } } } else { resultFuture.complete(t); } }, executor); resultFuture.whenComplete( (t, throwable) -> operationFuture.cancel(false)); } }
Helper method which retries the provided operation in case of a failure. @param resultFuture to complete @param operation to retry @param retries until giving up @param executor to run the futures @param <T> type of the future's result
public static <T> CompletableFuture<T> retryWithDelay( final Supplier<CompletableFuture<T>> operation, final int retries, final Time retryDelay, final Predicate<Throwable> retryPredicate, final ScheduledExecutor scheduledExecutor) { final CompletableFuture<T> resultFuture = new CompletableFuture<>(); retryOperationWithDelay( resultFuture, operation, retries, retryDelay, retryPredicate, scheduledExecutor); return resultFuture; }
Retry the given operation with the given delay in between failures. @param operation to retry @param retries number of retries @param retryDelay delay between retries @param retryPredicate Predicate to test whether an exception is retryable @param scheduledExecutor executor to be used for the retry operation @param <T> type of the result @return Future which retries the given operation a given amount of times and delays the retry in case of failures
public static <T> CompletableFuture<T> retryWithDelay( final Supplier<CompletableFuture<T>> operation, final int retries, final Time retryDelay, final ScheduledExecutor scheduledExecutor) { return retryWithDelay( operation, retries, retryDelay, (throwable) -> true, scheduledExecutor); }
Retry the given operation with the given delay in between failures. @param operation to retry @param retries number of retries @param retryDelay delay between retries @param scheduledExecutor executor to be used for the retry operation @param <T> type of the result @return Future which retries the given operation a given amount of times and delays the retry in case of failures
public static <T> CompletableFuture<T> retrySuccessfulWithDelay( final Supplier<CompletableFuture<T>> operation, final Time retryDelay, final Deadline deadline, final Predicate<T> acceptancePredicate, final ScheduledExecutor scheduledExecutor) { final CompletableFuture<T> resultFuture = new CompletableFuture<>(); retrySuccessfulOperationWithDelay( resultFuture, operation, retryDelay, deadline, acceptancePredicate, scheduledExecutor); return resultFuture; }
Retry the given operation with the given delay in between successful completions where the result does not match a given predicate. @param operation to retry @param retryDelay delay between retries @param deadline A deadline that specifies at what point we should stop retrying @param acceptancePredicate Predicate to test whether the result is acceptable @param scheduledExecutor executor to be used for the retry operation @param <T> type of the result @return Future which retries the given operation a given amount of times and delays the retry in case the predicate isn't matched
public static <T> CompletableFuture<T> orTimeout(CompletableFuture<T> future, long timeout, TimeUnit timeUnit) { return orTimeout(future, timeout, timeUnit, Executors.directExecutor()); }
Times the given future out after the timeout. @param future to time out @param timeout after which the given future is timed out @param timeUnit time unit of the timeout @param <T> type of the given future @return The timeout enriched future
public static <T> CompletableFuture<T> orTimeout( CompletableFuture<T> future, long timeout, TimeUnit timeUnit, Executor timeoutFailExecutor) { if (!future.isDone()) { final ScheduledFuture<?> timeoutFuture = Delayer.delay( () -> timeoutFailExecutor.execute(new Timeout(future)), timeout, timeUnit); future.whenComplete((T value, Throwable throwable) -> { if (!timeoutFuture.isDone()) { timeoutFuture.cancel(false); } }); } return future; }
Times the given future out after the timeout. @param future to time out @param timeout after which the given future is timed out @param timeUnit time unit of the timeout @param timeoutFailExecutor executor that will complete the future exceptionally after the timeout is reached @param <T> type of the given future @return The timeout enriched future
public static <T> T runIfNotDoneAndGet(RunnableFuture<T> future) throws ExecutionException, InterruptedException { if (null == future) { return null; } if (!future.isDone()) { future.run(); } return future.get(); }
Run the given {@code RunnableFuture} if it is not done, and then retrieves its result. @param future to run if not done and get @param <T> type of the result @return the result after running the future @throws ExecutionException if a problem occurred @throws InterruptedException if the current thread has been interrupted
public static CompletableFuture<Void> runAfterwards(CompletableFuture<?> future, RunnableWithException runnable) { return runAfterwardsAsync(future, runnable, Executors.directExecutor()); }
Run the given action after the completion of the given future. The given future can be completed normally or exceptionally. In case of an exceptional completion the, the action's exception will be added to the initial exception. @param future to wait for its completion @param runnable action which is triggered after the future's completion @return Future which is completed after the action has completed. This future can contain an exception, if an error occurred in the given future or action.
public static CompletableFuture<Void> runAfterwardsAsync(CompletableFuture<?> future, RunnableWithException runnable) { return runAfterwardsAsync(future, runnable, ForkJoinPool.commonPool()); }
Run the given action after the completion of the given future. The given future can be completed normally or exceptionally. In case of an exceptional completion the, the action's exception will be added to the initial exception. @param future to wait for its completion @param runnable action which is triggered after the future's completion @return Future which is completed after the action has completed. This future can contain an exception, if an error occurred in the given future or action.
public static CompletableFuture<Void> runAfterwardsAsync( CompletableFuture<?> future, RunnableWithException runnable, Executor executor) { final CompletableFuture<Void> resultFuture = new CompletableFuture<>(); future.whenCompleteAsync( (Object ignored, Throwable throwable) -> { try { runnable.run(); } catch (Throwable e) { throwable = ExceptionUtils.firstOrSuppressed(e, throwable); } if (throwable != null) { resultFuture.completeExceptionally(throwable); } else { resultFuture.complete(null); } }, executor); return resultFuture; }
Run the given action after the completion of the given future. The given future can be completed normally or exceptionally. In case of an exceptional completion the, the action's exception will be added to the initial exception. @param future to wait for its completion @param runnable action which is triggered after the future's completion @param executor to run the given action @return Future which is completed after the action has completed. This future can contain an exception, if an error occurred in the given future or action.
public static CompletableFuture<Void> composeAfterwards( CompletableFuture<?> future, Supplier<CompletableFuture<?>> composedAction) { final CompletableFuture<Void> resultFuture = new CompletableFuture<>(); future.whenComplete( (Object outerIgnored, Throwable outerThrowable) -> { final CompletableFuture<?> composedActionFuture = composedAction.get(); composedActionFuture.whenComplete( (Object innerIgnored, Throwable innerThrowable) -> { if (innerThrowable != null) { resultFuture.completeExceptionally(ExceptionUtils.firstOrSuppressed(innerThrowable, outerThrowable)); } else if (outerThrowable != null) { resultFuture.completeExceptionally(outerThrowable); } else { resultFuture.complete(null); } }); }); return resultFuture; }
Run the given asynchronous action after the completion of the given future. The given future can be completed normally or exceptionally. In case of an exceptional completion, the asynchronous action's exception will be added to the initial exception. @param future to wait for its completion @param composedAction asynchronous action which is triggered after the future's completion @return Future which is completed after the asynchronous action has completed. This future can contain an exception if an error occurred in the given future or asynchronous action.
public static <T> ConjunctFuture<Collection<T>> combineAll(Collection<? extends CompletableFuture<? extends T>> futures) { checkNotNull(futures, "futures"); return new ResultConjunctFuture<>(futures); }
Creates a future that is complete once multiple other futures completed. The future fails (completes exceptionally) once one of the futures in the conjunction fails. Upon successful completion, the future returns the collection of the futures' results. <p>The ConjunctFuture gives access to how many Futures in the conjunction have already completed successfully, via {@link ConjunctFuture#getNumFuturesCompleted()}. @param futures The futures that make up the conjunction. No null entries are allowed. @return The ConjunctFuture that completes once all given futures are complete (or one fails).
public static ConjunctFuture<Void> waitForAll(Collection<? extends CompletableFuture<?>> futures) { checkNotNull(futures, "futures"); return new WaitingConjunctFuture(futures); }
Creates a future that is complete once all of the given futures have completed. The future fails (completes exceptionally) once one of the given futures fails. <p>The ConjunctFuture gives access to how many Futures have already completed successfully, via {@link ConjunctFuture#getNumFuturesCompleted()}. @param futures The futures to wait on. No null entries are allowed. @return The WaitingFuture that completes once all given futures are complete (or one fails).
public static <T>CompletableFuture<T> completedExceptionally(Throwable cause) { CompletableFuture<T> result = new CompletableFuture<>(); result.completeExceptionally(cause); return result; }
Returns an exceptionally completed {@link CompletableFuture}. @param cause to complete the future with @param <T> type of the future @return An exceptionally completed CompletableFuture
public static <T> CompletableFuture<T> supplyAsync(SupplierWithException<T, ?> supplier, Executor executor) { return CompletableFuture.supplyAsync( () -> { try { return supplier.get(); } catch (Throwable e) { throw new CompletionException(e); } }, executor); }
Returns a future which is completed with the result of the {@link SupplierWithException}. @param supplier to provide the future's value @param executor to execute the supplier @param <T> type of the result @return Future which is completed with the value of the supplier
public static <T, U extends T> CompletableFuture<T> toJava(Future<U> scalaFuture) { final CompletableFuture<T> result = new CompletableFuture<>(); scalaFuture.onComplete(new OnComplete<U>() { @Override public void onComplete(Throwable failure, U success) { if (failure != null) { result.completeExceptionally(failure); } else { result.complete(success); } } }, Executors.directExecutionContext()); return result; }
Converts a Scala {@link Future} to a {@link CompletableFuture}. @param scalaFuture to convert to a Java 8 CompletableFuture @param <T> type of the future value @param <U> type of the original future @return Java 8 CompletableFuture
public static <IN, OUT> CompletableFuture<OUT> thenApplyAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, Function<? super IN, ? extends OUT> applyFun) { return completableFuture.isDone() ? completableFuture.thenApply(applyFun) : completableFuture.thenApplyAsync(applyFun, executor); }
This function takes a {@link CompletableFuture} and a function to apply to this future. If the input future is already done, this function returns {@link CompletableFuture#thenApply(Function)}. Otherwise, the return value is {@link CompletableFuture#thenApplyAsync(Function, Executor)} with the given executor. @param completableFuture the completable future for which we want to apply. @param executor the executor to run the apply function if the future is not yet done. @param applyFun the function to apply. @param <IN> type of the input future. @param <OUT> type of the output future. @return a completable future that is applying the given function to the input future.
public static <IN, OUT> CompletableFuture<OUT> thenComposeAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, Function<? super IN, ? extends CompletionStage<OUT>> composeFun) { return completableFuture.isDone() ? completableFuture.thenCompose(composeFun) : completableFuture.thenComposeAsync(composeFun, executor); }
This function takes a {@link CompletableFuture} and a function to compose with this future. If the input future is already done, this function returns {@link CompletableFuture#thenCompose(Function)}. Otherwise, the return value is {@link CompletableFuture#thenComposeAsync(Function, Executor)} with the given executor. @param completableFuture the completable future for which we want to compose. @param executor the executor to run the compose function if the future is not yet done. @param composeFun the function to compose. @param <IN> type of the input future. @param <OUT> type of the output future. @return a completable future that is a composition of the input future and the function.
public static <IN> CompletableFuture<IN> whenCompleteAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, BiConsumer<? super IN, ? super Throwable> whenCompleteFun) { return completableFuture.isDone() ? completableFuture.whenComplete(whenCompleteFun) : completableFuture.whenCompleteAsync(whenCompleteFun, executor); }
This function takes a {@link CompletableFuture} and a bi-consumer to call on completion of this future. If the input future is already done, this function returns {@link CompletableFuture#whenComplete(BiConsumer)}. Otherwise, the return value is {@link CompletableFuture#whenCompleteAsync(BiConsumer, Executor)} with the given executor. @param completableFuture the completable future for which we want to call #whenComplete. @param executor the executor to run the whenComplete function if the future is not yet done. @param whenCompleteFun the bi-consumer function to call when the future is completed. @param <IN> type of the input future. @return the new completion stage.
public static <IN> CompletableFuture<Void> thenAcceptAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, Consumer<? super IN> consumer) { return completableFuture.isDone() ? completableFuture.thenAccept(consumer) : completableFuture.thenAcceptAsync(consumer, executor); }
This function takes a {@link CompletableFuture} and a consumer to accept the result of this future. If the input future is already done, this function returns {@link CompletableFuture#thenAccept(Consumer)}. Otherwise, the return value is {@link CompletableFuture#thenAcceptAsync(Consumer, Executor)} with the given executor. @param completableFuture the completable future for which we want to call #thenAccept. @param executor the executor to run the thenAccept function if the future is not yet done. @param consumer the consumer function to call when the future is completed. @param <IN> type of the input future. @return the new completion stage.
public static <IN, OUT> CompletableFuture<OUT> handleAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, BiFunction<? super IN, Throwable, ? extends OUT> handler) { return completableFuture.isDone() ? completableFuture.handle(handler) : completableFuture.handleAsync(handler, executor); }
This function takes a {@link CompletableFuture} and a handler function for the result of this future. If the input future is already done, this function returns {@link CompletableFuture#handle(BiFunction)}. Otherwise, the return value is {@link CompletableFuture#handleAsync(BiFunction, Executor)} with the given executor. @param completableFuture the completable future for which we want to call #handle. @param executor the executor to run the handle function if the future is not yet done. @param handler the handler function to call when the future is completed. @param <IN> type of the handler input argument. @param <OUT> type of the handler return value. @return the new completion stage.
public static String normalizeStartupMode(StartupMode startupMode) { switch (startupMode) { case EARLIEST: return CONNECTOR_STARTUP_MODE_VALUE_EARLIEST; case LATEST: return CONNECTOR_STARTUP_MODE_VALUE_LATEST; case GROUP_OFFSETS: return CONNECTOR_STARTUP_MODE_VALUE_GROUP_OFFSETS; case SPECIFIC_OFFSETS: return CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS; } throw new IllegalArgumentException("Invalid startup mode."); }
utilities
public static void main(String[] args) throws Exception { final KafkaCollector[] collectors = new KafkaCollector[NUM_PARTITIONS]; // create the generator threads for (int i = 0; i < collectors.length; i++) { collectors[i] = new KafkaCollector(BROKER_ADDRESS, TOPIC, i); } StandaloneThreadedGenerator.runGenerator(collectors); }
Entry point to the kafka data producer.
@Override public void configure(Configuration parameters) { // enforce sequential configuration() calls synchronized (CONFIGURE_MUTEX) { if (mapreduceInputFormat instanceof Configurable) { ((Configurable) mapreduceInputFormat).setConf(configuration); } } }
--------------------------------------------------------------------------------------------
private void writeObject(ObjectOutputStream out) throws IOException { super.write(out); out.writeUTF(this.mapreduceInputFormat.getClass().getName()); out.writeUTF(this.keyClass.getName()); out.writeUTF(this.valueClass.getName()); this.configuration.write(out); }
--------------------------------------------------------------------------------------------
public void start( @Nonnull JobMasterId jobMasterId, @Nonnull String newJobManagerAddress, @Nonnull ComponentMainThreadExecutor componentMainThreadExecutor) throws Exception { this.jobMasterId = jobMasterId; this.jobManagerAddress = newJobManagerAddress; this.componentMainThreadExecutor = componentMainThreadExecutor; scheduleRunAsync(this::checkIdleSlot, idleSlotTimeout); if (log.isDebugEnabled()) { scheduleRunAsync(this::scheduledLogStatus, STATUS_LOG_INTERVAL_MS, TimeUnit.MILLISECONDS); } }
Start the slot pool to accept RPC calls. @param jobMasterId The necessary leader id for running the job. @param newJobManagerAddress for the slot requests which are sent to the resource manager @param componentMainThreadExecutor The main thread executor for the job master's main thread.
@Override public void suspend() { componentMainThreadExecutor.assertRunningInMainThread(); log.info("Suspending SlotPool."); // cancel all pending allocations --> we can request these slots // again after we regained the leadership Set<AllocationID> allocationIds = pendingRequests.keySetB(); for (AllocationID allocationId : allocationIds) { resourceManagerGateway.cancelSlotRequest(allocationId); } // do not accept any requests jobMasterId = null; resourceManagerGateway = null; // Clear (but not release!) the available slots. The TaskManagers should re-register them // at the new leader JobManager/SlotPool clear(); }
Suspends this pool, meaning it has lost its authority to accept and distribute slots.
@Override public void connectToResourceManager(@Nonnull ResourceManagerGateway resourceManagerGateway) { this.resourceManagerGateway = checkNotNull(resourceManagerGateway); // work on all slots waiting for this connection for (PendingRequest pendingRequest : waitingForResourceManager.values()) { requestSlotFromResourceManager(resourceManagerGateway, pendingRequest); } // all sent off waitingForResourceManager.clear(); }
------------------------------------------------------------------------
@Nonnull private CompletableFuture<AllocatedSlot> requestNewAllocatedSlotInternal( @Nonnull SlotRequestId slotRequestId, @Nonnull ResourceProfile resourceProfile, @Nonnull Time timeout) { componentMainThreadExecutor.assertRunningInMainThread(); final PendingRequest pendingRequest = new PendingRequest( slotRequestId, resourceProfile); // register request timeout FutureUtils .orTimeout( pendingRequest.getAllocatedSlotFuture(), timeout.toMilliseconds(), TimeUnit.MILLISECONDS, componentMainThreadExecutor) .whenComplete( (AllocatedSlot ignored, Throwable throwable) -> { if (throwable instanceof TimeoutException) { timeoutPendingSlotRequest(slotRequestId); } }); if (resourceManagerGateway == null) { stashRequestWaitingForResourceManager(pendingRequest); } else { requestSlotFromResourceManager(resourceManagerGateway, pendingRequest); } return pendingRequest.getAllocatedSlotFuture(); }
Requests a new slot with the given {@link ResourceProfile} from the ResourceManager. If there is currently not ResourceManager connected, then the request is stashed and send once a new ResourceManager is connected. @param slotRequestId identifying the requested slot @param resourceProfile which the requested slot should fulfill @param timeout timeout before the slot allocation times out @return An {@link AllocatedSlot} future which is completed once the slot is offered to the {@link SlotPool}
@Override public void releaseSlot(@Nonnull SlotRequestId slotRequestId, @Nullable Throwable cause) { componentMainThreadExecutor.assertRunningInMainThread(); log.debug("Releasing slot [{}] because: {}", slotRequestId, cause != null ? cause.getMessage() : "null"); releaseSingleSlot(slotRequestId, cause); }
------------------------------------------------------------------------
@Nullable private PendingRequest removePendingRequest(SlotRequestId requestId) { PendingRequest result = waitingForResourceManager.remove(requestId); if (result != null) { // sanity check assert !pendingRequests.containsKeyA(requestId) : "A pending requests should only be part of either " + "the pendingRequests or waitingForResourceManager but not both."; return result; } else { return pendingRequests.removeKeyA(requestId); } }
Checks whether there exists a pending request with the given slot request id and removes it from the internal data structures. @param requestId identifying the pending request @return pending request if there is one, otherwise null
private void tryFulfillSlotRequestOrMakeAvailable(AllocatedSlot allocatedSlot) { Preconditions.checkState(!allocatedSlot.isUsed(), "Provided slot is still in use."); final PendingRequest pendingRequest = pollMatchingPendingRequest(allocatedSlot); if (pendingRequest != null) { log.debug("Fulfilling pending slot request [{}] early with returned slot [{}]", pendingRequest.getSlotRequestId(), allocatedSlot.getAllocationId()); allocatedSlots.add(pendingRequest.getSlotRequestId(), allocatedSlot); pendingRequest.getAllocatedSlotFuture().complete(allocatedSlot); } else { log.debug("Adding returned slot [{}] to available slots", allocatedSlot.getAllocationId()); availableSlots.add(allocatedSlot, clock.relativeTimeMillis()); } }
Tries to fulfill with the given allocated slot a pending slot request or add the allocated slot to the set of available slots if no matching request is available. @param allocatedSlot which shall be returned
boolean offerSlot( final TaskManagerLocation taskManagerLocation, final TaskManagerGateway taskManagerGateway, final SlotOffer slotOffer) { componentMainThreadExecutor.assertRunningInMainThread(); // check if this TaskManager is valid final ResourceID resourceID = taskManagerLocation.getResourceID(); final AllocationID allocationID = slotOffer.getAllocationId(); if (!registeredTaskManagers.contains(resourceID)) { log.debug("Received outdated slot offering [{}] from unregistered TaskManager: {}", slotOffer.getAllocationId(), taskManagerLocation); return false; } // check whether we have already using this slot AllocatedSlot existingSlot; if ((existingSlot = allocatedSlots.get(allocationID)) != null || (existingSlot = availableSlots.get(allocationID)) != null) { // we need to figure out if this is a repeated offer for the exact same slot, // or another offer that comes from a different TaskManager after the ResourceManager // re-tried the request // we write this in terms of comparing slot IDs, because the Slot IDs are the identifiers of // the actual slots on the TaskManagers // Note: The slotOffer should have the SlotID final SlotID existingSlotId = existingSlot.getSlotId(); final SlotID newSlotId = new SlotID(taskManagerLocation.getResourceID(), slotOffer.getSlotIndex()); if (existingSlotId.equals(newSlotId)) { log.info("Received repeated offer for slot [{}]. Ignoring.", allocationID); // return true here so that the sender will get a positive acknowledgement to the retry // and mark the offering as a success return true; } else { // the allocation has been fulfilled by another slot, reject the offer so the task executor // will offer the slot to the resource manager return false; } } final AllocatedSlot allocatedSlot = new AllocatedSlot( allocationID, taskManagerLocation, slotOffer.getSlotIndex(), slotOffer.getResourceProfile(), taskManagerGateway); // check whether we have request waiting for this slot PendingRequest pendingRequest = pendingRequests.removeKeyB(allocationID); if (pendingRequest != null) { // we were waiting for this! allocatedSlots.add(pendingRequest.getSlotRequestId(), allocatedSlot); if (!pendingRequest.getAllocatedSlotFuture().complete(allocatedSlot)) { // we could not complete the pending slot future --> try to fulfill another pending request allocatedSlots.remove(pendingRequest.getSlotRequestId()); tryFulfillSlotRequestOrMakeAvailable(allocatedSlot); } else { log.debug("Fulfilled slot request [{}] with allocated slot [{}].", pendingRequest.getSlotRequestId(), allocationID); } } else { // we were actually not waiting for this: // - could be that this request had been fulfilled // - we are receiving the slots from TaskManagers after becoming leaders tryFulfillSlotRequestOrMakeAvailable(allocatedSlot); } // we accepted the request in any case. slot will be released after it idled for // too long and timed out return true; }
Slot offering by TaskExecutor with AllocationID. The AllocationID is originally generated by this pool and transfer through the ResourceManager to TaskManager. We use it to distinguish the different allocation we issued. Slot offering may be rejected if we find something mismatching or there is actually no pending request waiting for this slot (maybe fulfilled by some other returned slot). @param taskManagerLocation location from where the offer comes from @param taskManagerGateway TaskManager gateway @param slotOffer the offered slot @return True if we accept the offering
@Override public Optional<ResourceID> failAllocation(final AllocationID allocationID, final Exception cause) { componentMainThreadExecutor.assertRunningInMainThread(); final PendingRequest pendingRequest = pendingRequests.removeKeyB(allocationID); if (pendingRequest != null) { // request was still pending failPendingRequest(pendingRequest, cause); return Optional.empty(); } else { return tryFailingAllocatedSlot(allocationID, cause); } // TODO: add some unit tests when the previous two are ready, the allocation may failed at any phase }
Fail the specified allocation and release the corresponding slot if we have one. This may triggered by JobManager when some slot allocation failed with rpcTimeout. Or this could be triggered by TaskManager, when it finds out something went wrong with the slot, and decided to take it back. @param allocationID Represents the allocation which should be failed @param cause The cause of the failure @return Optional task executor if it has no more slots registered
@Override public boolean registerTaskManager(final ResourceID resourceID) { componentMainThreadExecutor.assertRunningInMainThread(); log.debug("Register new TaskExecutor {}.", resourceID); return registeredTaskManagers.add(resourceID); }
Register TaskManager to this pool, only those slots come from registered TaskManager will be considered valid. Also it provides a way for us to keep "dead" or "abnormal" TaskManagers out of this pool. @param resourceID The id of the TaskManager
@Override public boolean releaseTaskManager(final ResourceID resourceId, final Exception cause) { componentMainThreadExecutor.assertRunningInMainThread(); if (registeredTaskManagers.remove(resourceId)) { releaseTaskManagerInternal(resourceId, cause); return true; } else { return false; } }
Unregister TaskManager from this pool, all the related slots will be released and tasks be canceled. Called when we find some TaskManager becomes "dead" or "abnormal", and we decide to not using slots from it anymore. @param resourceId The id of the TaskManager @param cause for the releasing of the TaskManager
private void checkIdleSlot() { // The timestamp in SlotAndTimestamp is relative final long currentRelativeTimeMillis = clock.relativeTimeMillis(); final List<AllocatedSlot> expiredSlots = new ArrayList<>(availableSlots.size()); for (SlotAndTimestamp slotAndTimestamp : availableSlots.availableSlots.values()) { if (currentRelativeTimeMillis - slotAndTimestamp.timestamp > idleSlotTimeout.toMilliseconds()) { expiredSlots.add(slotAndTimestamp.slot); } } final FlinkException cause = new FlinkException("Releasing idle slot."); for (AllocatedSlot expiredSlot : expiredSlots) { final AllocationID allocationID = expiredSlot.getAllocationId(); if (availableSlots.tryRemove(allocationID) != null) { log.info("Releasing idle slot [{}].", allocationID); final CompletableFuture<Acknowledge> freeSlotFuture = expiredSlot.getTaskManagerGateway().freeSlot( allocationID, cause, rpcTimeout); FutureUtils.whenCompleteAsyncIfNotDone( freeSlotFuture, componentMainThreadExecutor, (Acknowledge ignored, Throwable throwable) -> { if (throwable != null) { if (registeredTaskManagers.contains(expiredSlot.getTaskManagerId())) { log.debug("Releasing slot [{}] of registered TaskExecutor {} failed. " + "Trying to fulfill a different slot request.", allocationID, expiredSlot.getTaskManagerId(), throwable); tryFulfillSlotRequestOrMakeAvailable(expiredSlot); } else { log.debug("Releasing slot [{}] failed and owning TaskExecutor {} is no " + "longer registered. Discarding slot.", allocationID, expiredSlot.getTaskManagerId()); } } }); } } scheduleRunAsync(this::checkIdleSlot, idleSlotTimeout); }
Check the available slots, release the slot that is idle for a long time.
private void clear() { availableSlots.clear(); allocatedSlots.clear(); pendingRequests.clear(); waitingForResourceManager.clear(); registeredTaskManagers.clear(); }
Clear the internal state of the SlotPool.
protected void scheduleRunAsync(Runnable runnable, long delay, TimeUnit unit) { componentMainThreadExecutor.schedule(runnable, delay, unit); }
Execute the runnable in the main thread of the underlying RPC endpoint, with a delay of the given number of milliseconds. @param runnable Runnable to be executed @param delay The delay after which the runnable will be executed
public static RestServerEndpointConfiguration fromConfiguration(Configuration config) throws ConfigurationException { Preconditions.checkNotNull(config); final String restAddress = Preconditions.checkNotNull(config.getString(RestOptions.ADDRESS), "%s must be set", RestOptions.ADDRESS.key()); final String restBindAddress = config.getString(RestOptions.BIND_ADDRESS); final String portRangeDefinition = config.getString(RestOptions.BIND_PORT); final SSLHandlerFactory sslHandlerFactory; if (SSLUtils.isRestSSLEnabled(config)) { try { sslHandlerFactory = SSLUtils.createRestServerSSLEngineFactory(config); } catch (Exception e) { throw new ConfigurationException("Failed to initialize SSLEngineFactory for REST server endpoint.", e); } } else { sslHandlerFactory = null; } final Path uploadDir = Paths.get( config.getString(WebOptions.UPLOAD_DIR, config.getString(WebOptions.TMP_DIR)), "flink-web-upload"); final int maxContentLength = config.getInteger(RestOptions.SERVER_MAX_CONTENT_LENGTH); final Map<String, String> responseHeaders = Collections.singletonMap( HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN, config.getString(WebOptions.ACCESS_CONTROL_ALLOW_ORIGIN)); return new RestServerEndpointConfiguration( restAddress, restBindAddress, portRangeDefinition, sslHandlerFactory, uploadDir, maxContentLength, responseHeaders); }
Creates and returns a new {@link RestServerEndpointConfiguration} from the given {@link Configuration}. @param config configuration from which the REST server endpoint configuration should be created from @return REST server endpoint configuration @throws ConfigurationException if SSL was configured incorrectly
public void startQueryService(RpcService rpcService, ResourceID resourceID) { synchronized (lock) { Preconditions.checkState(!isShutdown(), "The metric registry has already been shut down."); try { metricQueryServiceRpcService = rpcService; queryService = MetricQueryService.createMetricQueryService(rpcService, resourceID, maximumFramesize); queryService.start(); } catch (Exception e) { LOG.warn("Could not start MetricDumpActor. No metrics will be submitted to the WebInterface.", e); } } }
Initializes the MetricQueryService. @param rpcService RpcService to create the MetricQueryService on @param resourceID resource ID used to disambiguate the actor name
@Override @Nullable public String getMetricQueryServiceGatewayRpcAddress() { if (queryService != null) { return queryService.getSelfGateway(MetricQueryServiceGateway.class).getAddress(); } else { return null; } }
Returns the address under which the {@link MetricQueryService} is reachable. @return address of the metric query service
public CompletableFuture<Void> shutdown() { synchronized (lock) { if (isShutdown) { return terminationFuture; } else { isShutdown = true; final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3); final Time gracePeriod = Time.seconds(1L); if (metricQueryServiceRpcService != null) { final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture = metricQueryServiceRpcService.stopService(); terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture); } Throwable throwable = null; for (MetricReporter reporter : reporters) { try { reporter.close(); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } } reporters.clear(); if (throwable != null) { terminationFutures.add( FutureUtils.completedExceptionally( new FlinkException("Could not shut down the metric reporters properly.", throwable))); } final CompletableFuture<Void> executorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, executor); terminationFutures.add(executorShutdownFuture); FutureUtils .completeAll(terminationFutures) .whenComplete( (Void ignored, Throwable error) -> { if (error != null) { terminationFuture.completeExceptionally(error); } else { terminationFuture.complete(null); } }); return terminationFuture; } } }
Shuts down this registry and the associated {@link MetricReporter}. <p>NOTE: This operation is asynchronous and returns a future which is completed once the shutdown operation has been completed. @return Future which is completed once the {@link MetricRegistryImpl} is shut down.
@Override public void register(Metric metric, String metricName, AbstractMetricGroup group) { synchronized (lock) { if (isShutdown()) { LOG.warn("Cannot register metric, because the MetricRegistry has already been shut down."); } else { if (reporters != null) { for (int i = 0; i < reporters.size(); i++) { MetricReporter reporter = reporters.get(i); try { if (reporter != null) { FrontMetricGroup front = new FrontMetricGroup<AbstractMetricGroup<?>>(i, group); reporter.notifyOfAddedMetric(metric, metricName, front); } } catch (Exception e) { LOG.warn("Error while registering metric.", e); } } } try { if (queryService != null) { queryService.addMetric(metricName, metric, group); } } catch (Exception e) { LOG.warn("Error while registering metric.", e); } try { if (metric instanceof View) { if (viewUpdater == null) { viewUpdater = new ViewUpdater(executor); } viewUpdater.notifyOfAddedView((View) metric); } } catch (Exception e) { LOG.warn("Error while registering metric.", e); } } } }
------------------------------------------------------------------------
public void shutDown() throws FlinkException { Exception exception = null; try { taskManagerStateStore.shutdown(); } catch (Exception e) { exception = e; } try { memoryManager.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { ioManager.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { networkEnvironment.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { kvStateService.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { taskSlotTable.stop(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { jobLeaderService.stop(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } taskEventDispatcher.clearAll(); if (exception != null) { throw new FlinkException("Could not properly shut down the TaskManager services.", exception); } }
Shuts the {@link TaskExecutor} services down.
public static TaskManagerServices fromConfiguration( TaskManagerServicesConfiguration taskManagerServicesConfiguration, TaskManagerMetricGroup taskManagerMetricGroup, ResourceID resourceID, Executor taskIOExecutor, long freeHeapMemoryWithDefrag, long maxJvmHeapMemory) throws Exception { // pre-start checks checkTempDirs(taskManagerServicesConfiguration.getTmpDirPaths()); final TaskEventDispatcher taskEventDispatcher = new TaskEventDispatcher(); final NetworkEnvironment network = new NetworkEnvironment( taskManagerServicesConfiguration.getNetworkConfig(), taskEventDispatcher, taskManagerMetricGroup); network.start(); final KvStateService kvStateService = KvStateService.fromConfiguration(taskManagerServicesConfiguration); kvStateService.start(); final TaskManagerLocation taskManagerLocation = new TaskManagerLocation( resourceID, taskManagerServicesConfiguration.getTaskManagerAddress(), network.getConnectionManager().getDataPort()); // this call has to happen strictly after the network stack has been initialized final MemoryManager memoryManager = createMemoryManager(taskManagerServicesConfiguration, freeHeapMemoryWithDefrag, maxJvmHeapMemory); // start the I/O manager, it will create some temp directories. final IOManager ioManager = new IOManagerAsync(taskManagerServicesConfiguration.getTmpDirPaths()); final BroadcastVariableManager broadcastVariableManager = new BroadcastVariableManager(); final List<ResourceProfile> resourceProfiles = new ArrayList<>(taskManagerServicesConfiguration.getNumberOfSlots()); for (int i = 0; i < taskManagerServicesConfiguration.getNumberOfSlots(); i++) { resourceProfiles.add(ResourceProfile.ANY); } final TimerService<AllocationID> timerService = new TimerService<>( new ScheduledThreadPoolExecutor(1), taskManagerServicesConfiguration.getTimerServiceShutdownTimeout()); final TaskSlotTable taskSlotTable = new TaskSlotTable(resourceProfiles, timerService); final JobManagerTable jobManagerTable = new JobManagerTable(); final JobLeaderService jobLeaderService = new JobLeaderService(taskManagerLocation, taskManagerServicesConfiguration.getRetryingRegistrationConfiguration()); final String[] stateRootDirectoryStrings = taskManagerServicesConfiguration.getLocalRecoveryStateRootDirectories(); final File[] stateRootDirectoryFiles = new File[stateRootDirectoryStrings.length]; for (int i = 0; i < stateRootDirectoryStrings.length; ++i) { stateRootDirectoryFiles[i] = new File(stateRootDirectoryStrings[i], LOCAL_STATE_SUB_DIRECTORY_ROOT); } final TaskExecutorLocalStateStoresManager taskStateManager = new TaskExecutorLocalStateStoresManager( taskManagerServicesConfiguration.isLocalRecoveryEnabled(), stateRootDirectoryFiles, taskIOExecutor); return new TaskManagerServices( taskManagerLocation, memoryManager, ioManager, network, kvStateService, broadcastVariableManager, taskSlotTable, jobManagerTable, jobLeaderService, taskStateManager, taskEventDispatcher); }
Creates and returns the task manager services. @param taskManagerServicesConfiguration task manager configuration @param taskManagerMetricGroup metric group of the task manager @param resourceID resource ID of the task manager @param taskIOExecutor executor for async IO operations @param freeHeapMemoryWithDefrag an estimate of the size of the free heap memory @param maxJvmHeapMemory the maximum JVM heap size @return task manager components @throws Exception
private static MemoryManager createMemoryManager( TaskManagerServicesConfiguration taskManagerServicesConfiguration, long freeHeapMemoryWithDefrag, long maxJvmHeapMemory) throws Exception { // computing the amount of memory to use depends on how much memory is available // it strictly needs to happen AFTER the network stack has been initialized // check if a value has been configured long configuredMemory = taskManagerServicesConfiguration.getConfiguredMemory(); MemoryType memType = taskManagerServicesConfiguration.getMemoryType(); final long memorySize; boolean preAllocateMemory = taskManagerServicesConfiguration.isPreAllocateMemory(); if (configuredMemory > 0) { if (preAllocateMemory) { LOG.info("Using {} MB for managed memory." , configuredMemory); } else { LOG.info("Limiting managed memory to {} MB, memory will be allocated lazily." , configuredMemory); } memorySize = configuredMemory << 20; // megabytes to bytes } else { // similar to #calculateNetworkBufferMemory(TaskManagerServicesConfiguration tmConfig) float memoryFraction = taskManagerServicesConfiguration.getMemoryFraction(); if (memType == MemoryType.HEAP) { // network buffers allocated off-heap -> use memoryFraction of the available heap: long relativeMemSize = (long) (freeHeapMemoryWithDefrag * memoryFraction); if (preAllocateMemory) { LOG.info("Using {} of the currently free heap space for managed heap memory ({} MB)." , memoryFraction , relativeMemSize >> 20); } else { LOG.info("Limiting managed memory to {} of the currently free heap space ({} MB), " + "memory will be allocated lazily." , memoryFraction , relativeMemSize >> 20); } memorySize = relativeMemSize; } else if (memType == MemoryType.OFF_HEAP) { // The maximum heap memory has been adjusted according to the fraction (see // calculateHeapSizeMB(long totalJavaMemorySizeMB, Configuration config)), i.e. // maxJvmHeap = jvmTotalNoNet - jvmTotalNoNet * memoryFraction = jvmTotalNoNet * (1 - memoryFraction) // directMemorySize = jvmTotalNoNet * memoryFraction long directMemorySize = (long) (maxJvmHeapMemory / (1.0 - memoryFraction) * memoryFraction); if (preAllocateMemory) { LOG.info("Using {} of the maximum memory size for managed off-heap memory ({} MB)." , memoryFraction, directMemorySize >> 20); } else { LOG.info("Limiting managed memory to {} of the maximum memory size ({} MB)," + " memory will be allocated lazily.", memoryFraction, directMemorySize >> 20); } memorySize = directMemorySize; } else { throw new RuntimeException("No supported memory type detected."); } } // now start the memory manager final MemoryManager memoryManager; try { memoryManager = new MemoryManager( memorySize, taskManagerServicesConfiguration.getNumberOfSlots(), taskManagerServicesConfiguration.getNetworkConfig().networkBufferSize(), memType, preAllocateMemory); } catch (OutOfMemoryError e) { if (memType == MemoryType.HEAP) { throw new Exception("OutOfMemory error (" + e.getMessage() + ") while allocating the TaskManager heap memory (" + memorySize + " bytes).", e); } else if (memType == MemoryType.OFF_HEAP) { throw new Exception("OutOfMemory error (" + e.getMessage() + ") while allocating the TaskManager off-heap memory (" + memorySize + " bytes).Try increasing the maximum direct memory (-XX:MaxDirectMemorySize)", e); } else { throw e; } } return memoryManager; }
Creates a {@link MemoryManager} from the given {@link TaskManagerServicesConfiguration}. @param taskManagerServicesConfiguration to create the memory manager from @param freeHeapMemoryWithDefrag an estimate of the size of the free heap memory @param maxJvmHeapMemory the maximum JVM heap size @return Memory manager @throws Exception
public static long calculateHeapSizeMB(long totalJavaMemorySizeMB, Configuration config) { Preconditions.checkArgument(totalJavaMemorySizeMB > 0); // subtract the Java memory used for network buffers (always off-heap) final long networkBufMB = NetworkEnvironmentConfiguration.calculateNetworkBufferMemory( totalJavaMemorySizeMB << 20, // megabytes to bytes config) >> 20; // bytes to megabytes final long remainingJavaMemorySizeMB = totalJavaMemorySizeMB - networkBufMB; // split the available Java memory between heap and off-heap final boolean useOffHeap = config.getBoolean(TaskManagerOptions.MEMORY_OFF_HEAP); final long heapSizeMB; if (useOffHeap) { long offHeapSize; String managedMemorySizeDefaultVal = TaskManagerOptions.MANAGED_MEMORY_SIZE.defaultValue(); if (!config.getString(TaskManagerOptions.MANAGED_MEMORY_SIZE).equals(managedMemorySizeDefaultVal)) { try { offHeapSize = MemorySize.parse(config.getString(TaskManagerOptions.MANAGED_MEMORY_SIZE), MEGA_BYTES).getMebiBytes(); } catch (IllegalArgumentException e) { throw new IllegalConfigurationException( "Could not read " + TaskManagerOptions.MANAGED_MEMORY_SIZE.key(), e); } } else { offHeapSize = Long.valueOf(managedMemorySizeDefaultVal); } if (offHeapSize <= 0) { // calculate off-heap section via fraction double fraction = config.getFloat(TaskManagerOptions.MANAGED_MEMORY_FRACTION); offHeapSize = (long) (fraction * remainingJavaMemorySizeMB); } ConfigurationParserUtils.checkConfigParameter(offHeapSize < remainingJavaMemorySizeMB, offHeapSize, TaskManagerOptions.MANAGED_MEMORY_SIZE.key(), "Managed memory size too large for " + networkBufMB + " MB network buffer memory and a total of " + totalJavaMemorySizeMB + " MB JVM memory"); heapSizeMB = remainingJavaMemorySizeMB - offHeapSize; } else { heapSizeMB = remainingJavaMemorySizeMB; } return heapSizeMB; }
Calculates the amount of heap memory to use (to set via <tt>-Xmx</tt> and <tt>-Xms</tt>) based on the total memory to use and the given configuration parameters. @param totalJavaMemorySizeMB overall available memory to use (heap and off-heap) @param config configuration object @return heap memory to use (in megabytes)
private static void checkTempDirs(String[] tmpDirs) throws IOException { for (String dir : tmpDirs) { if (dir != null && !dir.equals("")) { File file = new File(dir); if (!file.exists()) { if (!file.mkdirs()) { throw new IOException("Temporary file directory " + file.getAbsolutePath() + " does not exist and could not be created."); } } if (!file.isDirectory()) { throw new IOException("Temporary file directory " + file.getAbsolutePath() + " is not a directory."); } if (!file.canWrite()) { throw new IOException("Temporary file directory " + file.getAbsolutePath() + " is not writable."); } if (LOG.isInfoEnabled()) { long totalSpaceGb = file.getTotalSpace() >> 30; long usableSpaceGb = file.getUsableSpace() >> 30; double usablePercentage = (double) usableSpaceGb / totalSpaceGb * 100; String path = file.getAbsolutePath(); LOG.info(String.format("Temporary file directory '%s': total %d GB, " + "usable %d GB (%.2f%% usable)", path, totalSpaceGb, usableSpaceGb, usablePercentage)); } } else { throw new IllegalArgumentException("Temporary file directory #$id is null."); } } }
Validates that all the directories denoted by the strings do actually exist or can be created, are proper directories (not files), and are writable. @param tmpDirs The array of directory paths to check. @throws IOException Thrown if any of the directories does not exist and cannot be created or is not writable or is a file, rather than a directory.
public static ExternalCatalog findAndCreateExternalCatalog(Descriptor descriptor) { Map<String, String> properties = descriptor.toProperties(); return TableFactoryService .find(ExternalCatalogFactory.class, properties) .createExternalCatalog(properties); }
Returns an external catalog.
public static <T> TableSource<T> findAndCreateTableSource(Descriptor descriptor) { Map<String, String> properties = descriptor.toProperties(); TableSource tableSource; try { tableSource = TableFactoryService .find(TableSourceFactory.class, properties) .createTableSource(properties); } catch (Throwable t) { throw new TableException("findAndCreateTableSource failed.", t); } return tableSource; }
Returns a table source matching the descriptor.
public static <T> TableSink<T> findAndCreateTableSink(Descriptor descriptor) { Map<String, String> properties = descriptor.toProperties(); TableSink tableSink; try { tableSink = TableFactoryService .find(TableSinkFactory.class, properties) .createTableSink(properties); } catch (Throwable t) { throw new TableException("findAndCreateTableSink failed.", t); } return tableSink; }
Returns a table sink matching the descriptor.
public void setBroadcastVariable(String name, Operator<?> root) { if (name == null) { throw new IllegalArgumentException("The broadcast input name may not be null."); } if (root == null) { throw new IllegalArgumentException("The broadcast input root operator may not be null."); } this.broadcastInputs.put(name, root); }
Binds the result produced by a plan rooted at {@code root} to a variable used by the UDF wrapped in this operator. @param root The root of the plan producing this input.
public <T> void setBroadcastVariables(Map<String, Operator<T>> inputs) { this.broadcastInputs.clear(); this.broadcastInputs.putAll(inputs); }
Clears all previous broadcast inputs and binds the given inputs as broadcast variables of this operator. @param inputs The {@code<name, root>} pairs to be set as broadcast inputs.
protected static <U> Class<U>[] asArray(Class<U> clazz) { @SuppressWarnings("unchecked") Class<U>[] array = new Class[] { clazz }; return array; }
Generic utility function that wraps a single class object into an array of that class type. @param <U> The type of the classes. @param clazz The class object to be wrapped. @return An array wrapping the class object.
protected static <U> Class<U>[] emptyClassArray() { @SuppressWarnings("unchecked") Class<U>[] array = new Class[0]; return array; }
Generic utility function that returns an empty class array. @param <U> The type of the classes. @return An empty array of type <tt>Class&lt;U&gt;</tt>.
public static <T0, T1> Tuple2<T0, T1> of(T0 value0, T1 value1) { return new Tuple2<>(value0, value1); }
Creates a new tuple and assigns the given values to the tuple's fields. This is more convenient than using the constructor, because the compiler can infer the generic type arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new Tuple3<Integer, Double, String>(n, x, s)}
@Override public void update() { synchronized (this) { long currentTime = System.currentTimeMillis(); if (currentTime - lastUpdateTime > updateInterval) { lastUpdateTime = currentTime; fetchMetrics(); } } }
This method can be used to signal this MetricFetcher that the metrics are still in use and should be updated.
private void retrieveAndQueryMetrics(String queryServiceAddress) { LOG.debug("Retrieve metric query service gateway for {}", queryServiceAddress); final CompletableFuture<MetricQueryServiceGateway> queryServiceGatewayFuture = queryServiceRetriever.retrieveService(queryServiceAddress); queryServiceGatewayFuture.whenCompleteAsync( (MetricQueryServiceGateway queryServiceGateway, Throwable t) -> { if (t != null) { LOG.debug("Could not retrieve QueryServiceGateway.", t); } else { queryMetrics(queryServiceGateway); } }, executor); }
Retrieves and queries the specified QueryServiceGateway. @param queryServiceAddress specifying the QueryServiceGateway
private void queryMetrics(final MetricQueryServiceGateway queryServiceGateway) { LOG.debug("Query metrics for {}.", queryServiceGateway.getAddress()); queryServiceGateway .queryMetrics(timeout) .whenCompleteAsync( (MetricDumpSerialization.MetricSerializationResult result, Throwable t) -> { if (t != null) { LOG.debug("Fetching metrics failed.", t); } else { metrics.addAll(deserializer.deserialize(result)); } }, executor); }
Query the metrics from the given QueryServiceGateway. @param queryServiceGateway to query for metrics
public static RelOptCluster create(RelOptPlanner planner, RexBuilder rexBuilder) { return new RelOptCluster(planner, rexBuilder.getTypeFactory(), rexBuilder, new AtomicInteger(0), new HashMap<String, RelNode>()); }
Creates a cluster.
@PublicEvolving public static <T, C> ObjectArrayTypeInfo<T, C> getInfoFor(Class<T> arrayClass, TypeInformation<C> componentInfo) { checkNotNull(arrayClass); checkNotNull(componentInfo); checkArgument(arrayClass.isArray(), "Class " + arrayClass + " must be an array."); return new ObjectArrayTypeInfo<T, C>(arrayClass, componentInfo); }
--------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked") @PublicEvolving public static <T, C> ObjectArrayTypeInfo<T, C> getInfoFor(TypeInformation<C> componentInfo) { checkNotNull(componentInfo); return new ObjectArrayTypeInfo<T, C>( (Class<T>)Array.newInstance(componentInfo.getTypeClass(), 0).getClass(), componentInfo); }
Creates a new {@link org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo} from a {@link TypeInformation} for the component type. <p> This must be used in cases where the complete type of the array is not available as a {@link java.lang.reflect.Type} or {@link java.lang.Class}.
@Override public Expression getValueExpression() { return ifThenElse(equalTo(count, literal(0L)), nullOf(getResultType()), div(sum, count)); }
If all input are nulls, count will be 0 and we will get null after the division.
public BucketingSink<T> setFSConfig(Configuration config) { this.fsConfig = new Configuration(); fsConfig.addAll(config); return this; }
Specify a custom {@code Configuration} that will be used when creating the {@link FileSystem} for writing.
public BucketingSink<T> setFSConfig(org.apache.hadoop.conf.Configuration config) { this.fsConfig = new Configuration(); for (Map.Entry<String, String> entry : config) { fsConfig.setString(entry.getKey(), entry.getValue()); } return this; }
Specify a custom {@code Configuration} that will be used when creating the {@link FileSystem} for writing.
private void initFileSystem() throws IOException { if (fs == null) { Path path = new Path(basePath); fs = createHadoopFileSystem(path, fsConfig); } }
Create a file system with the user-defined {@code HDFS} configuration. @throws IOException
private boolean shouldRoll(BucketState<T> bucketState, long currentProcessingTime) throws IOException { boolean shouldRoll = false; int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask(); if (!bucketState.isWriterOpen) { shouldRoll = true; LOG.debug("BucketingSink {} starting new bucket.", subtaskIndex); } else { long writePosition = bucketState.writer.getPos(); if (writePosition > batchSize) { shouldRoll = true; LOG.debug( "BucketingSink {} starting new bucket because file position {} is above batch size {}.", subtaskIndex, writePosition, batchSize); } else { if (currentProcessingTime - bucketState.creationTime > batchRolloverInterval) { shouldRoll = true; LOG.debug( "BucketingSink {} starting new bucket because file is older than roll over interval {}.", subtaskIndex, batchRolloverInterval); } } } return shouldRoll; }
Returns {@code true} if the current {@code part-file} should be closed and a new should be created. This happens if: <ol> <li>no file is created yet for the task to write to, or</li> <li>the current file has reached the maximum bucket size.</li> <li>the current file is older than roll over interval</li> </ol>
private void closePartFilesByTime(long currentProcessingTime) throws Exception { synchronized (state.bucketStates) { for (Map.Entry<String, BucketState<T>> entry : state.bucketStates.entrySet()) { if ((entry.getValue().lastWrittenToTime < currentProcessingTime - inactiveBucketThreshold) || (entry.getValue().creationTime < currentProcessingTime - batchRolloverInterval)) { LOG.debug("BucketingSink {} closing bucket due to inactivity of over {} ms.", getRuntimeContext().getIndexOfThisSubtask(), inactiveBucketThreshold); closeCurrentPartFile(entry.getValue()); } } } }
Checks for inactive buckets, and closes them. Buckets are considered inactive if they have not been written to for a period greater than {@code inactiveBucketThreshold} ms. Buckets are also closed if they are older than {@code batchRolloverInterval} ms. This enables in-progress files to be moved to the pending state and be finalised on the next checkpoint.
private void openNewPartFile(Path bucketPath, BucketState<T> bucketState) throws Exception { closeCurrentPartFile(bucketState); if (!fs.exists(bucketPath)) { try { if (fs.mkdirs(bucketPath)) { LOG.debug("Created new bucket directory: {}", bucketPath); } } catch (IOException e) { throw new RuntimeException("Could not create new bucket path.", e); } } // The following loop tries different partCounter values in ascending order until it reaches the minimum // that is not yet used. This works since there is only one parallel subtask that tries names with this // subtask id. Otherwise we would run into concurrency issues here. This is aligned with the way we now // clean the base directory in case of rescaling. int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask(); Path partPath = assemblePartPath(bucketPath, subtaskIndex, bucketState.partCounter); while (fs.exists(partPath) || fs.exists(getPendingPathFor(partPath)) || fs.exists(getInProgressPathFor(partPath))) { bucketState.partCounter++; partPath = assemblePartPath(bucketPath, subtaskIndex, bucketState.partCounter); } // Record the creation time of the bucket bucketState.creationTime = processingTimeService.getCurrentProcessingTime(); // increase, so we don't have to check for this name next time bucketState.partCounter++; LOG.debug("Next part path is {}", partPath.toString()); bucketState.currentFile = partPath.toString(); Path inProgressPath = getInProgressPathFor(partPath); if (bucketState.writer == null) { bucketState.writer = writerTemplate.duplicate(); if (bucketState.writer == null) { throw new UnsupportedOperationException( "Could not duplicate writer. " + "Class '" + writerTemplate.getClass().getCanonicalName() + "' must implement the 'Writer.duplicate()' method." ); } } bucketState.writer.open(fs, inProgressPath); bucketState.isWriterOpen = true; }
Closes the current part file and opens a new one with a new bucket path, as returned by the {@link Bucketer}. If the bucket is not new, then this will create a new file with the same path as its predecessor, but with an increased rolling counter (see {@link BucketingSink}.
private void closeCurrentPartFile(BucketState<T> bucketState) throws Exception { if (bucketState.isWriterOpen) { bucketState.writer.close(); bucketState.isWriterOpen = false; } if (bucketState.currentFile != null) { Path currentPartPath = new Path(bucketState.currentFile); Path inProgressPath = getInProgressPathFor(currentPartPath); Path pendingPath = getPendingPathFor(currentPartPath); fs.rename(inProgressPath, pendingPath); LOG.debug("Moving in-progress bucket {} to pending file {}", inProgressPath, pendingPath); bucketState.pendingFiles.add(currentPartPath.toString()); bucketState.currentFile = null; } }
Closes the current part file and moves it from the in-progress state to the pending state.
private Method reflectTruncate(FileSystem fs) { // completely disable the check for truncate() because the check can be problematic // on some filesystem implementations if (!useTruncate) { return null; } Method m = null; if (fs != null) { Class<?> fsClass = fs.getClass(); try { m = fsClass.getMethod("truncate", Path.class, long.class); } catch (NoSuchMethodException ex) { LOG.debug("Truncate not found. Will write a file with suffix '{}' " + " and prefix '{}' to specify how many bytes in a bucket are valid.", validLengthSuffix, validLengthPrefix); return null; } // verify that truncate actually works Path testPath = new Path(basePath, UUID.randomUUID().toString()); try { try (FSDataOutputStream outputStream = fs.create(testPath)) { outputStream.writeUTF("hello"); } catch (IOException e) { LOG.error("Could not create file for checking if truncate works.", e); throw new RuntimeException( "Could not create file for checking if truncate works. " + "You can disable support for truncate() completely via " + "BucketingSink.setUseTruncate(false).", e); } try { m.invoke(fs, testPath, 2); } catch (IllegalAccessException | InvocationTargetException e) { LOG.debug("Truncate is not supported.", e); m = null; } } finally { try { fs.delete(testPath, false); } catch (IOException e) { LOG.error("Could not delete truncate test file.", e); throw new RuntimeException("Could not delete truncate test file. " + "You can disable support for truncate() completely via " + "BucketingSink.setUseTruncate(false).", e); } } } return m; }
Gets the truncate() call using reflection. <p><b>NOTE:</b> This code comes from Flume.
public static FileSystem createHadoopFileSystem( Path path, @Nullable Configuration extraUserConf) throws IOException { // try to get the Hadoop File System via the Flink File Systems // that way we get the proper configuration final org.apache.flink.core.fs.FileSystem flinkFs = org.apache.flink.core.fs.FileSystem.getUnguardedFileSystem(path.toUri()); final FileSystem hadoopFs = (flinkFs instanceof HadoopFileSystem) ? ((HadoopFileSystem) flinkFs).getHadoopFileSystem() : null; // fast path: if the Flink file system wraps Hadoop anyways and we need no extra config, // then we use it directly if (extraUserConf == null && hadoopFs != null) { return hadoopFs; } else { // we need to re-instantiate the Hadoop file system, because we either have // a special config, or the Path gave us a Flink FS that is not backed by // Hadoop (like file://) final org.apache.hadoop.conf.Configuration hadoopConf; if (hadoopFs != null) { // have a Hadoop FS but need to apply extra config hadoopConf = hadoopFs.getConf(); } else { // the Path gave us a Flink FS that is not backed by Hadoop (like file://) // we need to get access to the Hadoop file system first // we access the Hadoop FS in Flink, which carries the proper // Hadoop configuration. we should get rid of this once the bucketing sink is // properly implemented against Flink's FS abstraction URI genericHdfsUri = URI.create("hdfs://localhost:12345/"); org.apache.flink.core.fs.FileSystem accessor = org.apache.flink.core.fs.FileSystem.getUnguardedFileSystem(genericHdfsUri); if (!(accessor instanceof HadoopFileSystem)) { throw new IOException( "Cannot instantiate a Hadoop file system to access the Hadoop configuration. " + "FS for hdfs:// is " + accessor.getClass().getName()); } hadoopConf = ((HadoopFileSystem) accessor).getHadoopFileSystem().getConf(); } // finalize the configuration final org.apache.hadoop.conf.Configuration finalConf; if (extraUserConf == null) { finalConf = hadoopConf; } else { finalConf = new org.apache.hadoop.conf.Configuration(hadoopConf); for (String key : extraUserConf.keySet()) { finalConf.set(key, extraUserConf.getString(key, null)); } } // we explicitly re-instantiate the file system here in order to make sure // that the configuration is applied. URI fsUri = path.toUri(); final String scheme = fsUri.getScheme(); final String authority = fsUri.getAuthority(); if (scheme == null && authority == null) { fsUri = FileSystem.getDefaultUri(finalConf); } else if (scheme != null && authority == null) { URI defaultUri = FileSystem.getDefaultUri(finalConf); if (scheme.equals(defaultUri.getScheme()) && defaultUri.getAuthority() != null) { fsUri = defaultUri; } } final Class<? extends FileSystem> fsClass = FileSystem.getFileSystemClass(fsUri.getScheme(), finalConf); final FileSystem fs; try { fs = fsClass.newInstance(); } catch (Exception e) { throw new IOException("Cannot instantiate the Hadoop file system", e); } fs.initialize(fsUri, finalConf); // We don't perform checksums on Hadoop's local filesystem and use the raw filesystem. // Otherwise buffers are not flushed entirely during checkpointing which results in data loss. if (fs instanceof LocalFileSystem) { return ((LocalFileSystem) fs).getRaw(); } return fs; } }
------------------------------------------------------------------------
public void tryAdd(AbstractCheckpointStats checkpoint) { // Don't add in progress checkpoints as they will be replaced by their // completed/failed version eventually. if (cache != null && checkpoint != null && !checkpoint.getStatus().isInProgress()) { cache.put(checkpoint.getCheckpointId(), checkpoint); } }
Try to add the checkpoint to the cache. @param checkpoint Checkpoint to be added.
static void writeField(DataOutputView out, Field field) throws IOException { Class<?> declaringClass = field.getDeclaringClass(); out.writeUTF(declaringClass.getName()); out.writeUTF(field.getName()); }
Writes a field to the given {@link DataOutputView}. <p>This write method avoids Java serialization, by writing only the classname of the field's declaring class and the field name. The written field can be read using {@link #readField(DataInputView, ClassLoader)}. @param out the output view to write to. @param field the field to write.
static Field readField(DataInputView in, ClassLoader userCodeClassLoader) throws IOException { Class<?> declaringClass = InstantiationUtil.resolveClassByName(in, userCodeClassLoader); String fieldName = in.readUTF(); return getField(fieldName, declaringClass); }
Reads a field from the given {@link DataInputView}. <p>This read methods avoids Java serialization, by reading the classname of the field's declaring class and dynamically loading it. The field is also read by field name and obtained via reflection. @param in the input view to read from. @param userCodeClassLoader the user classloader. @return the read field.
public static void gracefulShutdown(long timeout, TimeUnit unit, ExecutorService... executorServices) { for (ExecutorService executorService: executorServices) { executorService.shutdown(); } boolean wasInterrupted = false; final long endTime = unit.toMillis(timeout) + System.currentTimeMillis(); long timeLeft = unit.toMillis(timeout); boolean hasTimeLeft = timeLeft > 0L; for (ExecutorService executorService: executorServices) { if (wasInterrupted || !hasTimeLeft) { executorService.shutdownNow(); } else { try { if (!executorService.awaitTermination(timeLeft, TimeUnit.MILLISECONDS)) { LOG.warn("ExecutorService did not terminate in time. Shutting it down now."); executorService.shutdownNow(); } } catch (InterruptedException e) { LOG.warn("Interrupted while shutting down executor services. Shutting all " + "remaining ExecutorServices down now.", e); executorService.shutdownNow(); wasInterrupted = true; Thread.currentThread().interrupt(); } timeLeft = endTime - System.currentTimeMillis(); hasTimeLeft = timeLeft > 0L; } } }
Gracefully shutdown the given {@link ExecutorService}. The call waits the given timeout that all ExecutorServices terminate. If the ExecutorServices do not terminate in this time, they will be shut down hard. @param timeout to wait for the termination of all ExecutorServices @param unit of the timeout @param executorServices to shut down