code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
@Nonnull static StateMetaInfoReader getReader(int readVersion) { switch (readVersion) { case CURRENT_STATE_META_INFO_SNAPSHOT_VERSION: return CurrentReaderImpl.INSTANCE; case 5: return V5ReaderImpl.INSTANCE; default: throw new IllegalArgumentException("Unsupported read version for state meta info: " + readVersion); } }
Returns a reader for {@link StateMetaInfoSnapshot} with the requested state type and version number. @param readVersion the format version to read. @return the requested reader.
@Override public <T> ValueState<T> getState(ValueStateDescriptor<T> stateProperties) { KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties); stateProperties.initializeSerializerUnlessSet(getExecutionConfig()); return keyedStateStore.getState(stateProperties); }
------------------------------------------------------------------------
public static <T extends SpecificRecord> AvroDeserializationSchema<T> forSpecific(Class<T> tClass) { return new AvroDeserializationSchema<>(tClass, null); }
Creates {@link AvroDeserializationSchema} that produces classes that were generated from avro schema. @param tClass class of record to be produced @return deserialized record
@Override public RocksDBRestoreResult restore() throws Exception { if (restoreStateHandles == null || restoreStateHandles.isEmpty()) { return null; } final KeyedStateHandle theFirstStateHandle = restoreStateHandles.iterator().next(); boolean isRescaling = (restoreStateHandles.size() > 1 || !Objects.equals(theFirstStateHandle.getKeyGroupRange(), keyGroupRange)); if (isRescaling) { restoreWithRescaling(restoreStateHandles); } else { restoreWithoutRescaling(theFirstStateHandle); } return new RocksDBRestoreResult(this.db, defaultColumnFamilyHandle, nativeMetricMonitor, lastCompletedCheckpointId, backendUID, restoredSstFiles); }
Root method that branches for different implementations of {@link KeyedStateHandle}.
private void restoreWithoutRescaling(KeyedStateHandle keyedStateHandle) throws Exception { if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalRemoteKeyedStateHandle); restoreFromRemoteState(incrementalRemoteKeyedStateHandle); } else if (keyedStateHandle instanceof IncrementalLocalKeyedStateHandle) { IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle = (IncrementalLocalKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalLocalKeyedStateHandle); restoreFromLocalState(incrementalLocalKeyedStateHandle); } else { throw new BackendBuildingException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + " or " + IncrementalLocalKeyedStateHandle.class + ", but found " + keyedStateHandle.getClass()); } }
Recovery from a single remote incremental state without rescaling.
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception { // Prepare for restore with rescaling KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial( restoreStateHandles, keyGroupRange); // Init base DB instance if (initialHandle != null) { restoreStateHandles.remove(initialHandle); initDBWithRescaling(initialHandle); } else { openDB(); } // Transfer remaining key-groups from temporary instance into base DB byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes); byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes); for (KeyedStateHandle rawStateHandle : restoreStateHandles) { if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) { throw new IllegalStateException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + ", but found " + rawStateHandle.getClass()); } Path temporaryRestoreInstancePath = new Path(instanceBasePath.getAbsolutePath() + UUID.randomUUID().toString()); try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle( (IncrementalRemoteKeyedStateHandle) rawStateHandle, temporaryRestoreInstancePath); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db)) { List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors; List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles; // iterating only the requested descriptors automatically skips the default column family handle for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) { ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i); ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle( null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i)) .columnFamilyHandle; try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle)) { iterator.seek(startKeyGroupPrefixBytes); while (iterator.isValid()) { if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) { writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value()); } else { // Since the iterator will visit the record according to the sorted order, // we can just break here. break; } iterator.next(); } } // releases native iterator resources } } finally { cleanUpPathQuietly(temporaryRestoreInstancePath); } } }
Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the real restore instance and then the temporary instance is discarded.
private List<ColumnFamilyDescriptor> createAndRegisterColumnFamilyDescriptors( List<StateMetaInfoSnapshot> stateMetaInfoSnapshots, boolean registerTtlCompactFilter) { List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(stateMetaInfoSnapshots.size()); for (StateMetaInfoSnapshot stateMetaInfoSnapshot : stateMetaInfoSnapshots) { RegisteredStateMetaInfoBase metaInfoBase = RegisteredStateMetaInfoBase.fromMetaInfoSnapshot(stateMetaInfoSnapshot); ColumnFamilyDescriptor columnFamilyDescriptor = RocksDBOperationUtils.createColumnFamilyDescriptor( metaInfoBase, columnFamilyOptionsFactory, registerTtlCompactFilter ? ttlCompactFiltersManager : null); columnFamilyDescriptors.add(columnFamilyDescriptor); } return columnFamilyDescriptors; }
This method recreates and registers all {@link ColumnFamilyDescriptor} from Flink's state meta data snapshot.
private void restoreInstanceDirectoryFromPath(Path source, String instanceRocksDBPath) throws IOException { FileSystem fileSystem = source.getFileSystem(); final FileStatus[] fileStatuses = fileSystem.listStatus(source); if (fileStatuses == null) { throw new IOException("Cannot list file statues. Directory " + source + " does not exist."); } for (FileStatus fileStatus : fileStatuses) { final Path filePath = fileStatus.getPath(); final String fileName = filePath.getName(); File restoreFile = new File(source.getPath(), fileName); File targetFile = new File(instanceRocksDBPath, fileName); if (fileName.endsWith(SST_FILE_SUFFIX)) { // hardlink'ing the immutable sst-files. Files.createLink(targetFile.toPath(), restoreFile.toPath()); } else { // true copy for all other files. Files.copy(restoreFile.toPath(), targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING); } } }
This recreates the new working directory of the recovered RocksDB instance and links/copies the contents from a local state.
private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle) throws Exception { FSDataInputStream inputStream = null; try { inputStream = metaStateHandle.openInputStream(); cancelStreamRegistry.registerCloseable(inputStream); DataInputView in = new DataInputViewStreamWrapper(inputStream); return readMetaData(in); } finally { if (cancelStreamRegistry.unregisterCloseable(inputStream)) { inputStream.close(); } } }
Reads Flink's state meta data file from the state handle.
public static void clipDBWithKeyGroupRange( @Nonnull RocksDB db, @Nonnull List<ColumnFamilyHandle> columnFamilyHandles, @Nonnull KeyGroupRange targetKeyGroupRange, @Nonnull KeyGroupRange currentKeyGroupRange, @Nonnegative int keyGroupPrefixBytes) throws RocksDBException { final byte[] beginKeyGroupBytes = new byte[keyGroupPrefixBytes]; final byte[] endKeyGroupBytes = new byte[keyGroupPrefixBytes]; if (currentKeyGroupRange.getStartKeyGroup() < targetKeyGroupRange.getStartKeyGroup()) { RocksDBKeySerializationUtils.serializeKeyGroup( currentKeyGroupRange.getStartKeyGroup(), beginKeyGroupBytes); RocksDBKeySerializationUtils.serializeKeyGroup( targetKeyGroupRange.getStartKeyGroup(), endKeyGroupBytes); deleteRange(db, columnFamilyHandles, beginKeyGroupBytes, endKeyGroupBytes); } if (currentKeyGroupRange.getEndKeyGroup() > targetKeyGroupRange.getEndKeyGroup()) { RocksDBKeySerializationUtils.serializeKeyGroup( targetKeyGroupRange.getEndKeyGroup() + 1, beginKeyGroupBytes); RocksDBKeySerializationUtils.serializeKeyGroup( currentKeyGroupRange.getEndKeyGroup() + 1, endKeyGroupBytes); deleteRange(db, columnFamilyHandles, beginKeyGroupBytes, endKeyGroupBytes); } }
The method to clip the db instance according to the target key group range using the {@link RocksDB#delete(ColumnFamilyHandle, byte[])}. @param db the RocksDB instance to be clipped. @param columnFamilyHandles the column families in the db instance. @param targetKeyGroupRange the target key group range. @param currentKeyGroupRange the key group range of the db instance. @param keyGroupPrefixBytes Number of bytes required to prefix the key groups.
private static void deleteRange( RocksDB db, List<ColumnFamilyHandle> columnFamilyHandles, byte[] beginKeyBytes, byte[] endKeyBytes) throws RocksDBException { for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { try (RocksIteratorWrapper iteratorWrapper = RocksDBOperationUtils.getRocksIterator(db, columnFamilyHandle); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db)) { iteratorWrapper.seek(beginKeyBytes); while (iteratorWrapper.isValid()) { final byte[] currentKey = iteratorWrapper.key(); if (beforeThePrefixBytes(currentKey, endKeyBytes)) { writeBatchWrapper.remove(columnFamilyHandle, currentKey); } else { break; } iteratorWrapper.next(); } } } }
Delete the record falls into [beginKeyBytes, endKeyBytes) of the db. @param db the target need to be clipped. @param columnFamilyHandles the column family need to be clipped. @param beginKeyBytes the begin key bytes @param endKeyBytes the end key bytes
public static boolean beforeThePrefixBytes(@Nonnull byte[] bytes, @Nonnull byte[] prefixBytes) { final int prefixLength = prefixBytes.length; for (int i = 0; i < prefixLength; ++i) { int r = (char) prefixBytes[i] - (char) bytes[i]; if (r != 0) { return r > 0; } } return false; }
check whether the bytes is before prefixBytes in the character order.
@Nullable public static KeyedStateHandle chooseTheBestStateHandleForInitial( @Nonnull Collection<KeyedStateHandle> restoreStateHandles, @Nonnull KeyGroupRange targetKeyGroupRange) { KeyedStateHandle bestStateHandle = null; double bestScore = 0; for (KeyedStateHandle rawStateHandle : restoreStateHandles) { double handleScore = STATE_HANDLE_EVALUATOR.apply(rawStateHandle, targetKeyGroupRange); if (handleScore > bestScore) { bestStateHandle = rawStateHandle; bestScore = handleScore; } } return bestStateHandle; }
Choose the best state handle according to the {@link #STATE_HANDLE_EVALUATOR} to init the initial db. @param restoreStateHandles The candidate state handles. @param targetKeyGroupRange The target key group range. @return The best candidate or null if no candidate was a good fit.
public DoubleParameter setDefaultValue(double defaultValue) { super.setDefaultValue(defaultValue); if (hasMinimumValue) { if (minimumValueInclusive) { Util.checkParameter(defaultValue >= minimumValue, "Default value (" + defaultValue + ") must be greater than or equal to minimum (" + minimumValue + ")"); } else { Util.checkParameter(defaultValue > minimumValue, "Default value (" + defaultValue + ") must be greater than minimum (" + minimumValue + ")"); } } if (hasMaximumValue) { if (maximumValueInclusive) { Util.checkParameter(defaultValue <= maximumValue, "Default value (" + defaultValue + ") must be less than or equal to maximum (" + maximumValue + ")"); } else { Util.checkParameter(defaultValue < maximumValue, "Default value (" + defaultValue + ") must be less than maximum (" + maximumValue + ")"); } } return this; }
Set the default value. @param defaultValue the default value @return this
public DoubleParameter setMinimumValue(double minimumValue, boolean inclusive) { if (hasDefaultValue) { if (inclusive) { Util.checkParameter(minimumValue <= defaultValue, "Minimum value (" + minimumValue + ") must be less than or equal to default (" + defaultValue + ")"); } else { Util.checkParameter(minimumValue < defaultValue, "Minimum value (" + minimumValue + ") must be less than default (" + defaultValue + ")"); } } else if (hasMaximumValue) { if (inclusive && maximumValueInclusive) { Util.checkParameter(minimumValue <= maximumValue, "Minimum value (" + minimumValue + ") must be less than or equal to maximum (" + maximumValue + ")"); } else { Util.checkParameter(minimumValue < maximumValue, "Minimum value (" + minimumValue + ") must be less than maximum (" + maximumValue + ")"); } } this.hasMinimumValue = true; this.minimumValue = minimumValue; this.minimumValueInclusive = inclusive; return this; }
Set the minimum value. The minimum value is an acceptable value if and only if inclusive is set to true. @param minimumValue the minimum value @param inclusive whether the minimum value is a valid value @return this
public DoubleParameter setMaximumValue(double maximumValue, boolean inclusive) { if (hasDefaultValue) { if (inclusive) { Util.checkParameter(maximumValue >= defaultValue, "Maximum value (" + maximumValue + ") must be greater than or equal to default (" + defaultValue + ")"); } else { Util.checkParameter(maximumValue > defaultValue, "Maximum value (" + maximumValue + ") must be greater than default (" + defaultValue + ")"); } } else if (hasMinimumValue) { if (inclusive && minimumValueInclusive) { Util.checkParameter(maximumValue >= minimumValue, "Maximum value (" + maximumValue + ") must be greater than or equal to minimum (" + minimumValue + ")"); } else { Util.checkParameter(maximumValue > minimumValue, "Maximum value (" + maximumValue + ") must be greater than minimum (" + minimumValue + ")"); } } this.hasMaximumValue = true; this.maximumValue = maximumValue; this.maximumValueInclusive = inclusive; return this; }
Set the maximum value. The maximum value is an acceptable value if and only if inclusive is set to true. @param maximumValue the maximum value @param inclusive whether the maximum value is a valid value @return this
@Override protected void processRecord(Tuple2<Boolean, Row> change) { synchronized (resultLock) { // wait if the buffer is full if (changeRecordBuffer.size() >= CHANGE_RECORD_BUFFER_SIZE) { try { resultLock.wait(); } catch (InterruptedException e) { // ignore } } else { changeRecordBuffer.add(change); } } }
--------------------------------------------------------------------------------------------
public void addBroadcastSetForSumFunction(String name, DataSet<?> data) { this.bcVarsSum.add(new Tuple2<>(name, data)); }
Adds a data set as a broadcast set to the sum function. @param name The name under which the broadcast data is available in the sum function. @param data The data set to be broadcast.
public void addBroadcastSetForApplyFunction(String name, DataSet<?> data) { this.bcVarsApply.add(new Tuple2<>(name, data)); }
Adds a data set as a broadcast set to the apply function. @param name The name under which the broadcast data is available in the apply function. @param data The data set to be broadcast.
@Override protected GroupCombineOperatorBase<?, OUT, ?> translateToDataFlow(Operator<IN> input) { String name = getName() != null ? getName() : "GroupCombine at " + defaultName; // distinguish between grouped reduce and non-grouped reduce if (grouper == null) { // non grouped reduce UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType()); GroupCombineOperatorBase<IN, OUT, GroupCombineFunction<IN, OUT>> po = new GroupCombineOperatorBase<>(function, operatorInfo, new int[0], name); po.setInput(input); // the parallelism for a non grouped reduce can only be 1 po.setParallelism(1); return po; } if (grouper.getKeys() instanceof SelectorFunctionKeys) { @SuppressWarnings("unchecked") SelectorFunctionKeys<IN, ?> selectorKeys = (SelectorFunctionKeys<IN, ?>) grouper.getKeys(); if (grouper instanceof SortedGrouping) { SortedGrouping<IN> sortedGrouping = (SortedGrouping<IN>) grouper; SelectorFunctionKeys<IN, ?> sortKeys = sortedGrouping.getSortSelectionFunctionKey(); Ordering groupOrder = sortedGrouping.getGroupOrdering(); PlanUnwrappingSortedGroupCombineOperator<IN, OUT, ?, ?> po = translateSelectorFunctionSortedReducer(selectorKeys, sortKeys, groupOrder, function, getResultType(), name, input); po.setParallelism(this.getParallelism()); return po; } else { PlanUnwrappingGroupCombineOperator<IN, OUT, ?> po = translateSelectorFunctionReducer( selectorKeys, function, getResultType(), name, input); po.setParallelism(this.getParallelism()); return po; } } else if (grouper.getKeys() instanceof Keys.ExpressionKeys) { int[] logicalKeyPositions = grouper.getKeys().computeLogicalKeyPositions(); UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType()); GroupCombineOperatorBase<IN, OUT, GroupCombineFunction<IN, OUT>> po = new GroupCombineOperatorBase<>(function, operatorInfo, logicalKeyPositions, name); po.setInput(input); po.setParallelism(getParallelism()); // set group order if (grouper instanceof SortedGrouping) { SortedGrouping<IN> sortedGrouper = (SortedGrouping<IN>) grouper; int[] sortKeyPositions = sortedGrouper.getGroupSortKeyPositions(); Order[] sortOrders = sortedGrouper.getGroupSortOrders(); Ordering o = new Ordering(); for (int i = 0; i < sortKeyPositions.length; i++) { o.appendOrdering(sortKeyPositions[i], null, sortOrders[i]); } po.setGroupOrder(o); } return po; } else { throw new UnsupportedOperationException("Unrecognized key type."); } }
--------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked") private static <IN, OUT, K> PlanUnwrappingGroupCombineOperator<IN, OUT, K> translateSelectorFunctionReducer( SelectorFunctionKeys<IN, ?> rawKeys, GroupCombineFunction<IN, OUT> function, TypeInformation<OUT> outputType, String name, Operator<IN> input) { final SelectorFunctionKeys<IN, K> keys = (SelectorFunctionKeys<IN, K>) rawKeys; TypeInformation<Tuple2<K, IN>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys); Operator<Tuple2<K, IN>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys); PlanUnwrappingGroupCombineOperator<IN, OUT, K> reducer = new PlanUnwrappingGroupCombineOperator<>(function, keys, name, outputType, typeInfoWithKey); reducer.setInput(keyedInput); return reducer; }
--------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked") public <X> StreamRecord<X> replace(X element) { this.value = (T) element; return (StreamRecord<X>) this; }
Replace the currently stored value by the given new value. This returns a StreamElement with the generic type parameter that matches the new value while keeping the old timestamp. @param element Element to set in this stream value @return Returns the StreamElement with replaced value
@SuppressWarnings("unchecked") public <X> StreamRecord<X> replace(X value, long timestamp) { this.timestamp = timestamp; this.value = (T) value; this.hasTimestamp = true; return (StreamRecord<X>) this; }
Replace the currently stored value by the given new value and the currently stored timestamp with the new timestamp. This returns a StreamElement with the generic type parameter that matches the new value. @param value The new value to wrap in this StreamRecord @param timestamp The new timestamp in milliseconds @return Returns the StreamElement with replaced value
public StreamRecord<T> copy(T valueCopy) { StreamRecord<T> copy = new StreamRecord<>(valueCopy); copy.timestamp = this.timestamp; copy.hasTimestamp = this.hasTimestamp; return copy; }
Creates a copy of this stream record. Uses the copied value as the value for the new record, i.e., only copies timestamp fields.
public void copyTo(T valueCopy, StreamRecord<T> target) { target.value = valueCopy; target.timestamp = this.timestamp; target.hasTimestamp = this.hasTimestamp; }
Copies this record into the new stream record. Uses the copied value as the value for the new record, i.e., only copies timestamp fields.
@Override public void onEvent(TaskEvent event) { if (event instanceof TerminationEvent) { terminationSignaled = true; } else if (event instanceof AllWorkersDoneEvent) { AllWorkersDoneEvent wde = (AllWorkersDoneEvent) event; aggregatorNames = wde.getAggregatorNames(); aggregates = wde.getAggregates(userCodeClassLoader); } else { throw new IllegalArgumentException("Unknown event type."); } latch.countDown(); }
Barrier will release the waiting thread if an event occurs.
public static MesosTaskManagerParameters create(Configuration flinkConfig) { List<ConstraintEvaluator> constraints = parseConstraints(flinkConfig.getString(MESOS_CONSTRAINTS_HARD_HOSTATTR)); // parse the common parameters ContaineredTaskManagerParameters containeredParameters = ContaineredTaskManagerParameters.create( flinkConfig, flinkConfig.getInteger(MESOS_RM_TASKS_MEMORY_MB), flinkConfig.getInteger(MESOS_RM_TASKS_SLOTS)); double cpus = flinkConfig.getDouble(MESOS_RM_TASKS_CPUS); if (cpus <= 0.0) { cpus = Math.max(containeredParameters.numSlots(), 1.0); } int gpus = flinkConfig.getInteger(MESOS_RM_TASKS_GPUS); if (gpus < 0) { throw new IllegalConfigurationException(MESOS_RM_TASKS_GPUS.key() + " cannot be negative"); } int disk = flinkConfig.getInteger(MESOS_RM_TASKS_DISK_MB); // parse the containerization parameters String imageName = flinkConfig.getString(MESOS_RM_CONTAINER_IMAGE_NAME); ContainerType containerType; String containerTypeString = flinkConfig.getString(MESOS_RM_CONTAINER_TYPE); switch (containerTypeString) { case MESOS_RESOURCEMANAGER_TASKS_CONTAINER_TYPE_MESOS: containerType = ContainerType.MESOS; break; case MESOS_RESOURCEMANAGER_TASKS_CONTAINER_TYPE_DOCKER: containerType = ContainerType.DOCKER; if (imageName == null || imageName.length() == 0) { throw new IllegalConfigurationException(MESOS_RM_CONTAINER_IMAGE_NAME.key() + " must be specified for docker container type"); } break; default: throw new IllegalConfigurationException("invalid container type: " + containerTypeString); } Option<String> containerVolOpt = Option.<String>apply(flinkConfig.getString(MESOS_RM_CONTAINER_VOLUMES)); Option<String> dockerParamsOpt = Option.<String>apply(flinkConfig.getString(MESOS_RM_CONTAINER_DOCKER_PARAMETERS)); Option<String> uriParamsOpt = Option.<String>apply(flinkConfig.getString(MESOS_TM_URIS)); boolean dockerForcePullImage = flinkConfig.getBoolean(MESOS_RM_CONTAINER_DOCKER_FORCE_PULL_IMAGE); List<Protos.Volume> containerVolumes = buildVolumes(containerVolOpt); List<Protos.Parameter> dockerParameters = buildDockerParameters(dockerParamsOpt); List<String> uris = buildUris(uriParamsOpt); //obtain Task Manager Host Name from the configuration Option<String> taskManagerHostname = Option.apply(flinkConfig.getString(MESOS_TM_HOSTNAME)); //obtain command-line from the configuration String tmCommand = flinkConfig.getString(MESOS_TM_CMD); Option<String> tmBootstrapCommand = Option.apply(flinkConfig.getString(MESOS_TM_BOOTSTRAP_CMD)); return new MesosTaskManagerParameters( cpus, gpus, disk, containerType, Option.apply(imageName), containeredParameters, containerVolumes, dockerParameters, dockerForcePullImage, constraints, tmCommand, tmBootstrapCommand, taskManagerHostname, uris); }
Create the Mesos TaskManager parameters. @param flinkConfig the TM configuration.
public static List<Protos.Volume> buildVolumes(Option<String> containerVolumes) { if (containerVolumes.isEmpty()) { return Collections.emptyList(); } else { String[] volumeSpecifications = containerVolumes.get().split(","); List<Protos.Volume> volumes = new ArrayList<>(volumeSpecifications.length); for (String volumeSpecification : volumeSpecifications) { if (!volumeSpecification.trim().isEmpty()) { Protos.Volume.Builder volume = Protos.Volume.newBuilder(); volume.setMode(Protos.Volume.Mode.RW); String[] parts = volumeSpecification.split(":"); switch (parts.length) { case 1: volume.setContainerPath(parts[0]); break; case 2: try { Protos.Volume.Mode mode = Protos.Volume.Mode.valueOf(parts[1].trim().toUpperCase()); volume.setMode(mode) .setContainerPath(parts[0]); } catch (IllegalArgumentException e) { volume.setHostPath(parts[0]) .setContainerPath(parts[1]); } break; case 3: Protos.Volume.Mode mode = Protos.Volume.Mode.valueOf(parts[2].trim().toUpperCase()); volume.setMode(mode) .setHostPath(parts[0]) .setContainerPath(parts[1]); break; default: throw new IllegalArgumentException("volume specification is invalid, given: " + volumeSpecification); } volumes.add(volume.build()); } } return volumes; } }
Used to build volume specs for mesos. This allows for mounting additional volumes into a container @param containerVolumes a comma delimited optional string of [host_path:]container_path[:RO|RW] that defines mount points for a container volume. If None or empty string, returns an empty iterator
public static List<String> buildUris(Option<String> uris) { if (uris.isEmpty()) { return Collections.emptyList(); } else { List<String> urisList = new ArrayList<>(); for (String uri : uris.get().split(",")) { urisList.add(uri.trim()); } return urisList; } }
Build a list of URIs for providing custom artifacts to Mesos tasks. @param uris a comma delimited optional string listing artifact URIs
public static <T extends RestfulGateway> Optional<StaticFileServerHandler<T>> tryLoadWebContent( GatewayRetriever<? extends T> leaderRetriever, Time timeout, File tmpDir) throws IOException { if (isFlinkRuntimeWebInClassPath()) { return Optional.of(new StaticFileServerHandler<>( leaderRetriever, timeout, tmpDir)); } else { return Optional.empty(); } }
Checks whether the flink-runtime-web dependency is available and if so returns a StaticFileServerHandler which can serve the static file contents. @param leaderRetriever to be used by the StaticFileServerHandler @param timeout for lookup requests @param tmpDir to be used by the StaticFileServerHandler to store temporary files @param <T> type of the gateway to retrieve @return StaticFileServerHandler if flink-runtime-web is in the classpath; Otherwise Optional.empty @throws IOException if we cannot create the StaticFileServerHandler
public static WebMonitorExtension loadWebSubmissionExtension( GatewayRetriever<? extends DispatcherGateway> leaderRetriever, Time timeout, Map<String, String> responseHeaders, CompletableFuture<String> localAddressFuture, java.nio.file.Path uploadDir, Executor executor, Configuration configuration) throws FlinkException { if (isFlinkRuntimeWebInClassPath()) { try { final Constructor<?> webSubmissionExtensionConstructor = Class .forName("org.apache.flink.runtime.webmonitor.WebSubmissionExtension") .getConstructor( Configuration.class, GatewayRetriever.class, Map.class, CompletableFuture.class, java.nio.file.Path.class, Executor.class, Time.class); return (WebMonitorExtension) webSubmissionExtensionConstructor.newInstance( configuration, leaderRetriever, responseHeaders, localAddressFuture, uploadDir, executor, timeout); } catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | InvocationTargetException | IllegalAccessException e) { throw new FlinkException("Could not load web submission extension.", e); } } else { throw new FlinkException("The module flink-runtime-web could not be found in the class path. Please add " + "this jar in order to enable web based job submission."); } }
Loads the {@link WebMonitorExtension} which enables web submission. @param leaderRetriever to retrieve the leader @param timeout for asynchronous requests @param responseHeaders for the web submission handlers @param localAddressFuture of the underlying REST server endpoint @param uploadDir where the web submission handler store uploaded jars @param executor to run asynchronous operations @param configuration used to instantiate the web submission extension @return Web submission extension @throws FlinkException if the web submission extension could not be loaded
public static Path validateAndNormalizeUri(URI archiveDirUri) { final String scheme = archiveDirUri.getScheme(); final String path = archiveDirUri.getPath(); // some validity checks if (scheme == null) { throw new IllegalArgumentException("The scheme (hdfs://, file://, etc) is null. " + "Please specify the file system scheme explicitly in the URI."); } if (path == null) { throw new IllegalArgumentException("The path to store the job archive data in is null. " + "Please specify a directory path for the archiving the job data."); } return new Path(archiveDirUri); }
Checks and normalizes the given URI. This method first checks the validity of the URI (scheme and path are not null) and then normalizes the URI to a path. @param archiveDirUri The URI to check and normalize. @return A normalized URI as a Path. @throws IllegalArgumentException Thrown, if the URI misses scheme or path.
public static MetricRegistryConfiguration fromConfiguration(Configuration configuration) { ScopeFormats scopeFormats; try { scopeFormats = ScopeFormats.fromConfig(configuration); } catch (Exception e) { LOG.warn("Failed to parse scope format, using default scope formats", e); scopeFormats = ScopeFormats.fromConfig(new Configuration()); } char delim; try { delim = configuration.getString(MetricOptions.SCOPE_DELIMITER).charAt(0); } catch (Exception e) { LOG.warn("Failed to parse delimiter, using default delimiter.", e); delim = '.'; } final long maximumFrameSize = AkkaRpcServiceUtils.extractMaximumFramesize(configuration); // padding to account for serialization overhead final long messageSizeLimitPadding = 256; return new MetricRegistryConfiguration(scopeFormats, delim, maximumFrameSize - messageSizeLimitPadding); }
Create a metric registry configuration object from the given {@link Configuration}. @param configuration to generate the metric registry configuration from @return Metric registry configuration generated from the configuration
@Override public Iterator<T> sample(final Iterator<T> input) { if (fraction == 0) { return emptyIterable; } return new SampledIterator<T>() { T currentElement; int currentCount = 0; @Override public boolean hasNext() { if (currentCount > 0) { return true; } else { samplingProcess(); if (currentCount > 0) { return true; } else { return false; } } } @Override public T next() { if (currentCount <= 0) { samplingProcess(); } currentCount--; return currentElement; } public int poisson_ge1(double p) { // sample 'k' from Poisson(p), conditioned to k >= 1. double q = Math.pow(Math.E, -p); // simulate a poisson trial such that k >= 1. double t = q + (1 - q) * random.nextDouble(); int k = 1; // continue standard poisson generation trials. t = t * random.nextDouble(); while (t > q) { k++; t = t * random.nextDouble(); } return k; } private void skipGapElements(int num) { // skip the elements that occurrence number is zero. int elementCount = 0; while (input.hasNext() && elementCount < num) { currentElement = input.next(); elementCount++; } } private void samplingProcess() { if (fraction <= THRESHOLD) { double u = Math.max(random.nextDouble(), EPSILON); int gap = (int) (Math.log(u) / -fraction); skipGapElements(gap); if (input.hasNext()) { currentElement = input.next(); currentCount = poisson_ge1(fraction); } } else { while (input.hasNext()) { currentElement = input.next(); currentCount = poissonDistribution.sample(); if (currentCount > 0) { break; } } } } }; }
Sample the input elements, for each input element, generate its count following a poisson distribution. @param input Elements to be sampled. @return The sampled result which is lazy computed upon input elements.
private static Calendar parseDateFormat(String s, DateFormat dateFormat, TimeZone tz, ParsePosition pp) { if (tz == null) { tz = DEFAULT_ZONE; } Calendar ret = Calendar.getInstance(tz, Locale.ROOT); dateFormat.setCalendar(ret); dateFormat.setLenient(false); final Date d = dateFormat.parse(s, pp); if (null == d) { return null; } ret.setTime(d); ret.setTimeZone(UTC_ZONE); return ret; }
Parses a string using {@link SimpleDateFormat} and a given pattern. This method parses a string at the specified parse position and if successful, updates the parse position to the index after the last character used. The parsing is strict and requires months to be less than 12, days to be less than 31, etc. @param s string to be parsed @param dateFormat Date format @param tz time zone in which to interpret string. Defaults to the Java default time zone @param pp position to start parsing from @return a Calendar initialized with the parsed value, or null if parsing failed. If returned, the Calendar is configured to the GMT time zone.
public static Calendar parseDateFormat(String s, DateFormat dateFormat, TimeZone tz) { ParsePosition pp = new ParsePosition(0); Calendar ret = parseDateFormat(s, dateFormat, tz, pp); if (pp.getIndex() != s.length()) { // Didn't consume entire string - not good return null; } return ret; }
Parses a string using {@link SimpleDateFormat} and a given pattern. The entire string must match the pattern specified. @param s string to be parsed @param dateFormat Date format @param tz time zone in which to interpret string. Defaults to the Java default time zone @return a Calendar initialized with the parsed value, or null if parsing failed. If returned, the Calendar is configured to the UTC time zone.
public static PrecisionTime parsePrecisionDateTimeLiteral(String s, DateFormat dateFormat, TimeZone tz, int maxPrecision) { final ParsePosition pp = new ParsePosition(0); final Calendar cal = parseDateFormat(s, dateFormat, tz, pp); if (cal == null) { return null; // Invalid date/time format } // Note: the Java SimpleDateFormat 'S' treats any number after // the decimal as milliseconds. That means 12:00:00.9 has 9 // milliseconds and 12:00:00.9999 has 9999 milliseconds. int p = 0; String secFraction = ""; if (pp.getIndex() < s.length()) { // Check to see if rest is decimal portion if (s.charAt(pp.getIndex()) != '.') { return null; } // Skip decimal sign pp.setIndex(pp.getIndex() + 1); // Parse decimal portion if (pp.getIndex() < s.length()) { secFraction = s.substring(pp.getIndex()); if (!secFraction.matches("\\d+")) { return null; } NumberFormat nf = NumberFormat.getIntegerInstance(Locale.ROOT); Number num = nf.parse(s, pp); if ((num == null) || (pp.getIndex() != s.length())) { // Invalid decimal portion return null; } // Determine precision - only support prec 3 or lower // (milliseconds) Higher precisions are quietly rounded away p = secFraction.length(); if (maxPrecision >= 0) { // If there is a maximum precision, ignore subsequent digits p = Math.min(maxPrecision, p); secFraction = secFraction.substring(0, p); } // Calculate milliseconds String millis = secFraction; if (millis.length() > 3) { millis = secFraction.substring(0, 3); } while (millis.length() < 3) { millis = millis + "0"; } int ms = Integer.valueOf(millis); cal.add(Calendar.MILLISECOND, ms); } } assert pp.getIndex() == s.length(); return new PrecisionTime(cal, secFraction, p); }
Parses a string using {@link SimpleDateFormat} and a given pattern, and if present, parses a fractional seconds component. The fractional seconds component must begin with a decimal point ('.') followed by numeric digits. The precision is rounded to a maximum of 3 digits of fractional seconds precision (to obtain milliseconds). @param s string to be parsed @param dateFormat Date format @param tz time zone in which to interpret string. Defaults to the local time zone @return a {@link DateTimeUtils.PrecisionTime PrecisionTime} initialized with the parsed value, or null if parsing failed. The PrecisionTime contains a GMT Calendar and a precision.
public static SimpleDateFormat newDateFormat(String format) { SimpleDateFormat sdf = new SimpleDateFormat(format, Locale.ROOT); sdf.setLenient(false); return sdf; }
Creates a new date formatter with Farrago specific options. Farrago parsing is strict and does not allow values such as day 0, month 13, etc. @param format {@link SimpleDateFormat} pattern
public static String unixDateToString(int date) { final StringBuilder buf = new StringBuilder(10); unixDateToString(buf, date); return buf.toString(); }
Helper for CAST({date} AS VARCHAR(n)).
private static int parseFraction(String v, int multiplier) { int r = 0; for (int i = 0; i < v.length(); i++) { char c = v.charAt(i); int x = c < '0' || c > '9' ? 0 : (c - '0'); r += multiplier * x; if (multiplier < 10) { // We're at the last digit. Check for rounding. if (i + 1 < v.length() && v.charAt(i + 1) >= '5') { ++r; } break; } multiplier /= 10; } return r; }
Parses a fraction, multiplying the first character by {@code multiplier}, the second character by {@code multiplier / 10}, the third character by {@code multiplier / 100}, and so forth. <p>For example, {@code parseFraction("1234", 100)} yields {@code 123}.
private static long firstMondayOfFirstWeek(int year) { final long janFirst = ymdToJulian(year, 1, 1); final long janFirstDow = floorMod(janFirst + 1, 7); // sun=0, sat=6 return janFirst + (11 - janFirstDow) % 7 - 3; }
Returns the first day of the first week of a year. Per ISO-8601 it is the Monday of the week that contains Jan 4, or equivalently, it is a Monday between Dec 29 and Jan 4. Sometimes it is in the year before the given year.
public static int unixTimeExtract(TimeUnitRange range, int time) { assert time >= 0; assert time < MILLIS_PER_DAY; switch (range) { case HOUR: return time / (int) MILLIS_PER_HOUR; case MINUTE: final int minutes = time / (int) MILLIS_PER_MINUTE; return minutes % 60; case SECOND: final int seconds = time / (int) MILLIS_PER_SECOND; return seconds % 60; default: throw new ValidationException("unit " + range + " can not be applied to time variable"); } }
Extracts a time unit from a time value (milliseconds since midnight).
public static long addMonths(long timestamp, int m) { final long millis = DateTimeUtils.floorMod(timestamp, DateTimeUtils.MILLIS_PER_DAY); timestamp -= millis; final long x = addMonths((int) (timestamp / DateTimeUtils.MILLIS_PER_DAY), m); return x * DateTimeUtils.MILLIS_PER_DAY + millis; }
Adds a given number of months to a timestamp, represented as the number of milliseconds since the epoch.
public static int addMonths(int date, int m) { int y0 = (int) DateTimeUtils.unixDateExtract(TimeUnitRange.YEAR, date); int m0 = (int) DateTimeUtils.unixDateExtract(TimeUnitRange.MONTH, date); int d0 = (int) DateTimeUtils.unixDateExtract(TimeUnitRange.DAY, date); int y = m / 12; y0 += y; m0 += m - y * 12; int last = lastDay(y0, m0); if (d0 > last) { d0 = 1; if (++m0 > 12) { m0 = 1; ++y0; } } return DateTimeUtils.ymdToUnixDate(y0, m0, d0); }
Adds a given number of months to a date, represented as the number of days since the epoch.
public static int subtractMonths(int date0, int date1) { if (date0 < date1) { return -subtractMonths(date1, date0); } // Start with an estimate. // Since no month has more than 31 days, the estimate is <= the true value. int m = (date0 - date1) / 31; for (;;) { int date2 = addMonths(date1, m); if (date2 >= date0) { return m; } int date3 = addMonths(date1, m + 1); if (date3 > date0) { return m; } ++m; } }
Finds the number of months between two dates, each represented as the number of days since the epoch.
public static SingleInputSemanticProperties addSourceFieldOffset(SingleInputSemanticProperties props, int numInputFields, int offset) { SingleInputSemanticProperties offsetProps = new SingleInputSemanticProperties(); if (props.getReadFields(0) != null) { FieldSet offsetReadFields = new FieldSet(); for (int r : props.getReadFields(0)) { offsetReadFields = offsetReadFields.addField(r + offset); } offsetProps.addReadFields(offsetReadFields); } for (int s = 0; s < numInputFields; s++) { FieldSet targetFields = props.getForwardingTargetFields(0, s); for (int t : targetFields) { offsetProps.addForwardedField(s + offset, t); } } return offsetProps; }
Creates SemanticProperties by adding an offset to each input field index of the given SemanticProperties. @param props The SemanticProperties to which the offset is added. @param numInputFields The original number of fields of the input. @param offset The offset that is added to each input field index. @return New SemanticProperties with added offset.
public static DualInputSemanticProperties addSourceFieldOffsets(DualInputSemanticProperties props, int numInputFields1, int numInputFields2, int offset1, int offset2) { DualInputSemanticProperties offsetProps = new DualInputSemanticProperties(); // add offset to read fields on first input if (props.getReadFields(0) != null) { FieldSet offsetReadFields = new FieldSet(); for (int r : props.getReadFields(0)) { offsetReadFields = offsetReadFields.addField(r + offset1); } offsetProps.addReadFields(0, offsetReadFields); } // add offset to read fields on second input if (props.getReadFields(1) != null) { FieldSet offsetReadFields = new FieldSet(); for (int r : props.getReadFields(1)) { offsetReadFields = offsetReadFields.addField(r + offset2); } offsetProps.addReadFields(1, offsetReadFields); } // add offset to forward fields on first input for (int s = 0; s < numInputFields1; s++) { FieldSet targetFields = props.getForwardingTargetFields(0, s); for (int t : targetFields) { offsetProps.addForwardedField(0, s + offset1, t); } } // add offset to forward fields on second input for (int s = 0; s < numInputFields2; s++) { FieldSet targetFields = props.getForwardingTargetFields(1, s); for (int t : targetFields) { offsetProps.addForwardedField(1, s + offset2, t); } } return offsetProps; }
Creates SemanticProperties by adding offsets to each input field index of the given SemanticProperties. @param props The SemanticProperties to which the offset is added. @param numInputFields1 The original number of fields of the first input. @param numInputFields2 The original number of fields of the second input. @param offset1 The offset that is added to each input field index of the first input. @param offset2 The offset that is added to each input field index of the second input. @return New SemanticProperties with added offsets.
private static boolean areFieldsCompatible(String sourceField, TypeInformation<?> inType, String targetField, TypeInformation<?> outType, boolean throwException) { try { // get source type information TypeInformation<?> sourceType = getExpressionTypeInformation(sourceField, inType); // get target type information TypeInformation<?> targetType = getExpressionTypeInformation(targetField, outType); return sourceType.equals(targetType); } catch (InvalidFieldReferenceException e) { if (throwException) { throw e; } else { return false; } } }
//////////////////// UTIL METHODS ///////////////////////////////
public static MemorySize parse(String text, MemoryUnit defaultUnit) throws IllegalArgumentException { if (!hasUnit(text)) { return parse(text + defaultUnit.getUnits()[0]); } return parse(text); }
Parses the given string with a default unit. @param text The string to parse. @param defaultUnit specify the default unit. @return The parsed MemorySize. @throws IllegalArgumentException Thrown, if the expression cannot be parsed.
public static long parseBytes(String text) throws IllegalArgumentException { checkNotNull(text, "text"); final String trimmed = text.trim(); checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string"); final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unit = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final long value; try { value = Long.parseLong(number); // this throws a NumberFormatException on overflow } catch (NumberFormatException e) { throw new IllegalArgumentException("The value '" + number + "' cannot be re represented as 64bit number (numeric overflow)."); } final long multiplier; if (unit.isEmpty()) { multiplier = 1L; } else { if (matchesAny(unit, BYTES)) { multiplier = 1L; } else if (matchesAny(unit, KILO_BYTES)) { multiplier = 1024L; } else if (matchesAny(unit, MEGA_BYTES)) { multiplier = 1024L * 1024L; } else if (matchesAny(unit, GIGA_BYTES)) { multiplier = 1024L * 1024L * 1024L; } else if (matchesAny(unit, TERA_BYTES)) { multiplier = 1024L * 1024L * 1024L * 1024L; } else { throw new IllegalArgumentException("Memory size unit '" + unit + "' does not match any of the recognized units: " + MemoryUnit.getAllUnits()); } } final long result = value * multiplier; // check for overflow if (result / multiplier != value) { throw new IllegalArgumentException("The value '" + text + "' cannot be re represented as 64bit number of bytes (numeric overflow)."); } return result; }
Parses the given string as bytes. The supported expressions are listed under {@link MemorySize}. @param text The string to parse @return The parsed size, in bytes. @throws IllegalArgumentException Thrown, if the expression cannot be parsed.
@Override public final void flatMap(IN value, Collector<OUT> out) throws Exception { for (Iterator<OUT> iter = flatMap(value); iter.hasNext(); ) { out.collect(iter.next()); } }
Delegates calls to the {@link #flatMap(Object)} method.
@Override public EdgeMetrics<K, VV, EV> run(Graph<K, VV, EV> input) throws Exception { super.run(input); // s, t, (d(s), d(t)) DataSet<Edge<K, Tuple3<EV, Degrees, Degrees>>> edgeDegreesPair = input .run(new EdgeDegreesPair<K, VV, EV>() .setParallelism(parallelism)); // s, d(s), count of (u, v) where deg(u) < deg(v) or (deg(u) == deg(v) and u < v) DataSet<Tuple3<K, Degrees, LongValue>> edgeStats = edgeDegreesPair .flatMap(new EdgeStats<>()) .setParallelism(parallelism) .name("Edge stats") .groupBy(0, 1) .reduceGroup(new ReduceEdgeStats<>()) .setParallelism(parallelism) .name("Reduce edge stats") .groupBy(0) .reduce(new SumEdgeStats<>()) .setCombineHint(CombineHint.HASH) .setParallelism(parallelism) .name("Sum edge stats"); edgeMetricsHelper = new EdgeMetricsHelper<>(); edgeStats .output(edgeMetricsHelper) .setParallelism(parallelism) .name("Edge metrics"); return this; }
/* Implementation notes: <p>Use aggregator to replace SumEdgeStats when aggregators are rewritten to use a hash-combineable hashable-reduce. <p>Use distinct to replace ReduceEdgeStats when the combiner can be disabled with a sorted-reduce forced.
@Override protected void putVariables(Map<String, String> variables) { variables.put(ScopeFormat.asVariable(this.key), value); }
------------------------------------------------------------------------
public void open(final MutableObjectIterator<BT> buildSide, final MutableObjectIterator<PT> probeSide) throws IOException { open(buildSide, probeSide, false); }
Opens the hash join. This method reads the build-side input and constructs the initial hash table, gradually spilling partitions that do not fit into memory. @param buildSide Build side input. @param probeSide Probe side input. @throws IOException Thrown, if an I/O problem occurs while spilling a partition.
public void open(final MutableObjectIterator<BT> buildSide, final MutableObjectIterator<PT> probeSide, boolean buildOuterJoin) throws IOException { this.buildSideOuterJoin = buildOuterJoin; // sanity checks if (!this.closed.compareAndSet(true, false)) { throw new IllegalStateException("Hash Join cannot be opened, because it is currently not closed."); } // grab the write behind buffers first for (int i = this.numWriteBehindBuffers; i > 0; --i) { this.writeBehindBuffers.add(this.availableMemory.remove(this.availableMemory.size() - 1)); } // open builds the initial table by consuming the build-side input this.currentRecursionDepth = 0; buildInitialTable(buildSide); // the first prober is the probe-side input this.probeIterator = new ProbeIterator<PT>(probeSide, this.probeSideSerializer.createInstance()); // the bucket iterator can remain constant over the time this.bucketIterator = new HashBucketIterator<BT, PT>(this.buildSideSerializer, this.recordComparator, probedSet, buildOuterJoin); }
Opens the hash join. This method reads the build-side input and constructs the initial hash table, gradually spilling partitions that do not fit into memory. @param buildSide Build side input. @param probeSide Probe side input. @param buildOuterJoin Whether outer join on build side. @throws IOException Thrown, if an I/O problem occurs while spilling a partition.
public void close() { // make sure that we close only once if (!this.closed.compareAndSet(false, true)) { return; } // clear the iterators, so the next call to next() will notice this.bucketIterator = null; this.probeIterator = null; // release the table structure releaseTable(); // clear the memory in the partitions clearPartitions(); // clear the current probe side channel, if there is one if (this.currentSpilledProbeSide != null) { try { this.currentSpilledProbeSide.closeAndDelete(); } catch (Throwable t) { LOG.warn("Could not close and delete the temp file for the current spilled partition probe side.", t); } } // clear the partitions that are still to be done (that have files on disk) for (int i = 0; i < this.partitionsPending.size(); i++) { final HashPartition<BT, PT> p = this.partitionsPending.get(i); p.clearAllMemory(this.availableMemory); } // return the write-behind buffers for (int i = 0; i < this.numWriteBehindBuffers + this.writeBehindBuffersAvailable; i++) { try { this.availableMemory.add(this.writeBehindBuffers.take()); } catch (InterruptedException iex) { throw new RuntimeException("Hashtable closing was interrupted"); } } this.writeBehindBuffersAvailable = 0; }
Closes the hash table. This effectively releases all internal structures and closes all open files and removes them. The call to this method is valid both as a cleanup after the complete inputs were properly processed, and as an cancellation call, which cleans up all resources that are currently held by the hash join.
protected void buildInitialTable(final MutableObjectIterator<BT> input) throws IOException { // create the partitions final int partitionFanOut = getPartitioningFanOutNoEstimates(this.availableMemory.size()); if (partitionFanOut > MAX_NUM_PARTITIONS) { throw new RuntimeException("Hash join partitions estimate exeeds maximum number of partitions."); } createPartitions(partitionFanOut, 0); // set up the table structure. the write behind buffers are taken away, as are one buffer per partition final int numBuckets = getInitialTableSize(this.availableMemory.size(), this.segmentSize, partitionFanOut, this.avgRecordLen); initTable(numBuckets, (byte) partitionFanOut); final TypeComparator<BT> buildTypeComparator = this.buildSideComparator; BT record = this.buildSideSerializer.createInstance(); // go over the complete input and insert every element into the hash table while (this.running && ((record = input.next(record)) != null)) { final int hashCode = hash(buildTypeComparator.hash(record), 0); insertIntoTable(record, hashCode); } if (!this.running) { return; } // finalize the partitions for (int i = 0; i < this.partitionsBeingBuilt.size(); i++) { HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i); p.finalizeBuildPhase(this.ioManager, this.currentEnumerator, this.writeBehindBuffers); } }
Creates the initial hash table. This method sets up partitions, hash index, and inserts the data from the given iterator. @param input The iterator with the build side data. @throws IOException Thrown, if an element could not be fetched and deserialized from the iterator, or if serialization fails.
protected void clearPartitions() { for (int i = this.partitionsBeingBuilt.size() - 1; i >= 0; --i) { final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i); try { p.clearAllMemory(this.availableMemory); } catch (Exception e) { LOG.error("Error during partition cleanup.", e); } } this.partitionsBeingBuilt.clear(); }
This method clears all partitions currently residing (partially) in memory. It releases all memory and deletes all spilled partitions. <p> This method is intended for a hard cleanup in the case that the join is aborted.
protected void releaseTable() { // set the counters back this.numBuckets = 0; if (this.buckets != null) { for (MemorySegment bucket : this.buckets) { this.availableMemory.add(bucket); } this.buckets = null; } }
Releases the table (the array of buckets) and returns the occupied memory segments to the list of free segments.
protected int spillPartition() throws IOException { // find the largest partition ArrayList<HashPartition<BT, PT>> partitions = this.partitionsBeingBuilt; int largestNumBlocks = 0; int largestPartNum = -1; for (int i = 0; i < partitions.size(); i++) { HashPartition<BT, PT> p = partitions.get(i); if (p.isInMemory() && p.getNumOccupiedMemorySegments() > largestNumBlocks) { largestNumBlocks = p.getNumOccupiedMemorySegments(); largestPartNum = i; } } final HashPartition<BT, PT> p = partitions.get(largestPartNum); if (useBloomFilters) { buildBloomFilterForBucketsInPartition(largestPartNum, p); } // spill the partition int numBuffersFreed = p.spillPartition(this.availableMemory, this.ioManager, this.currentEnumerator.next(), this.writeBehindBuffers); this.writeBehindBuffersAvailable += numBuffersFreed; // grab as many buffers as are available directly MemorySegment currBuff; while (this.writeBehindBuffersAvailable > 0 && (currBuff = this.writeBehindBuffers.poll()) != null) { this.availableMemory.add(currBuff); this.writeBehindBuffersAvailable--; } return largestPartNum; }
Selects a partition and spills it. The number of the spilled partition is returned. @return The number of the spilled partition.
final void buildBloomFilterForBucket(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) { final int count = bucket.getShort(bucketInSegmentPos + HEADER_COUNT_OFFSET); if (count <= 0) { return; } int[] hashCodes = new int[count]; // As the hashcode and bloom filter occupy same bytes, so we read all hashcode out at first and then write back to bloom filter. for (int i = 0; i < count; i++) { hashCodes[i] = bucket.getInt(bucketInSegmentPos + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN); } this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH); for (int hashCode : hashCodes) { this.bloomFilter.addHash(hashCode); } buildBloomFilterForExtraOverflowSegments(bucketInSegmentPos, bucket, p); }
Set all the bucket memory except bucket header as the bit set of bloom filter, and use hash code of build records to build bloom filter.
final void ensureNumBuffersReturned(final int minRequiredAvailable) { if (minRequiredAvailable > this.availableMemory.size() + this.writeBehindBuffersAvailable) { throw new IllegalArgumentException("More buffers requested available than totally available."); } try { while (this.availableMemory.size() < minRequiredAvailable) { this.availableMemory.add(this.writeBehindBuffers.take()); this.writeBehindBuffersAvailable--; } } catch (InterruptedException iex) { throw new RuntimeException("Hash Join was interrupted."); } }
This method makes sure that at least a certain number of memory segments is in the list of free segments. Free memory can be in the list of free segments, or in the return-queue where segments used to write behind are put. The number of segments that are in that return-queue, but are actually reclaimable is tracked. This method makes sure at least a certain number of buffers is reclaimed. @param minRequiredAvailable The minimum number of buffers that needs to be reclaimed.
public static int getNumWriteBehindBuffers(int numBuffers) { int numIOBufs = (int) (Math.log(numBuffers) / Math.log(4) - 1.5); return numIOBufs > 6 ? 6 : numIOBufs; }
Determines the number of buffers to be used for asynchronous write behind. It is currently computed as the logarithm of the number of buffers to the base 4, rounded up, minus 2. The upper limit for the number of write behind buffers is however set to six. @param numBuffers The number of available buffers. @return The number
public static int hash(int code, int level) { final int rotation = level * 11; code = Integer.rotateLeft(code, rotation); return MathUtils.jenkinsHash(code); }
The level parameter is needed so that we can have different hash functions when we recursively apply the partitioning, so that the working set eventually fits into memory.
@Override public int getNumberLeaves() { while (true) { try { int result = 0; for (Slot slot: subSlots){ result += slot.getNumberLeaves(); } return result; } catch (ConcurrentModificationException e) { // ignore and retry } } }
------------------------------------------------------------------------
SimpleSlot allocateSubSlot(AbstractID groupId) { if (isAlive()) { SimpleSlot slot = new SimpleSlot( getOwner(), getTaskManagerLocation(), subSlots.size(), getTaskManagerGateway(), this, groupId); subSlots.add(slot); return slot; } else { return null; } }
Creates a new sub slot if the slot is not dead, yet. This method should only be called from the assignment group instance to guarantee synchronization. <b>NOTE:</b> This method is not synchronized and must only be called from the slot's assignment group. @param groupId The ID to identify tasks which can be deployed in this sub slot. @return The new sub slot if the shared slot is still alive, otherwise null.
SharedSlot allocateSharedSlot(AbstractID groupId){ if (isAlive()) { SharedSlot slot = new SharedSlot( getOwner(), getTaskManagerLocation(), subSlots.size(), getTaskManagerGateway(), assignmentGroup, this, groupId); subSlots.add(slot); return slot; } else { return null; } }
Creates a new sub slot if the slot is not dead, yet. This method should only be called from the assignment group instance to guarantee synchronization. NOTE: This method should only be called from the slot's assignment group. @param groupId The ID to identify tasks which can be deployed in this sub slot. @return The new sub slot if the shared slot is still alive, otherwise null.
int removeDisposedChildSlot(Slot slot) { if (!slot.isReleased() || !subSlots.remove(slot)) { throw new IllegalArgumentException(); } return subSlots.size(); }
Removes the given slot from this shared slot. This method Should only be called through this shared slot's {@link SlotSharingGroupAssignment} @param slot slot to be removed from the set of sub slots. @return Number of remaining sub slots
static <IN, BucketID> Bucket<IN, BucketID> getNew( final RecoverableWriter fsWriter, final int subtaskIndex, final BucketID bucketId, final Path bucketPath, final long initialPartCounter, final PartFileWriter.PartFileFactory<IN, BucketID> partFileFactory, final RollingPolicy<IN, BucketID> rollingPolicy) { return new Bucket<>(fsWriter, subtaskIndex, bucketId, bucketPath, initialPartCounter, partFileFactory, rollingPolicy); }
Creates a new empty {@code Bucket}. @param fsWriter the filesystem-specific {@link RecoverableWriter}. @param subtaskIndex the index of the subtask creating the bucket. @param bucketId the identifier of the bucket, as returned by the {@link BucketAssigner}. @param bucketPath the path to where the part files for the bucket will be written to. @param initialPartCounter the initial counter for the part files of the bucket. @param partFileFactory the {@link PartFileWriter.PartFileFactory} the factory creating part file writers. @param <IN> the type of input elements to the sink. @param <BucketID> the type of the identifier of the bucket, as returned by the {@link BucketAssigner} @return The new Bucket.
static <IN, BucketID> Bucket<IN, BucketID> restore( final RecoverableWriter fsWriter, final int subtaskIndex, final long initialPartCounter, final PartFileWriter.PartFileFactory<IN, BucketID> partFileFactory, final RollingPolicy<IN, BucketID> rollingPolicy, final BucketState<BucketID> bucketState) throws IOException { return new Bucket<>(fsWriter, subtaskIndex, initialPartCounter, partFileFactory, rollingPolicy, bucketState); }
Restores a {@code Bucket} from the state included in the provided {@link BucketState}. @param fsWriter the filesystem-specific {@link RecoverableWriter}. @param subtaskIndex the index of the subtask creating the bucket. @param initialPartCounter the initial counter for the part files of the bucket. @param partFileFactory the {@link PartFileWriter.PartFileFactory} the factory creating part file writers. @param bucketState the initial state of the restored bucket. @param <IN> the type of input elements to the sink. @param <BucketID> the type of the identifier of the bucket, as returned by the {@link BucketAssigner} @return The restored Bucket.
public JobWithJars getPlanWithoutJars() throws ProgramInvocationException { if (isUsingProgramEntryPoint()) { return new JobWithJars(getPlan(), Collections.<URL>emptyList(), classpaths, userCodeClassLoader); } else { throw new ProgramInvocationException("Cannot create a " + JobWithJars.class.getSimpleName() + " for a program that is using the interactive mode.", getPlan().getJobId()); } }
Returns the plan without the required jars when the files are already provided by the cluster. @return The plan without attached jar files. @throws ProgramInvocationException
public JobWithJars getPlanWithJars() throws ProgramInvocationException { if (isUsingProgramEntryPoint()) { return new JobWithJars(getPlan(), getAllLibraries(), classpaths, userCodeClassLoader); } else { throw new ProgramInvocationException("Cannot create a " + JobWithJars.class.getSimpleName() + " for a program that is using the interactive mode.", getPlan().getJobId()); } }
Returns the plan with all required jars. @return The plan with attached jar files. @throws ProgramInvocationException
public List<URL> getAllLibraries() { List<URL> libs = new ArrayList<URL>(this.extractedTempLibraries.size() + 1); if (jarFile != null) { libs.add(jarFile); } for (File tmpLib : this.extractedTempLibraries) { try { libs.add(tmpLib.getAbsoluteFile().toURI().toURL()); } catch (MalformedURLException e) { throw new RuntimeException("URL is invalid. This should not happen.", e); } } return libs; }
Returns all provided libraries needed to run the program.
private Plan getPlan() throws ProgramInvocationException { if (this.plan == null) { Thread.currentThread().setContextClassLoader(this.userCodeClassLoader); this.plan = createPlanFromProgram(this.program, this.args); } return this.plan; }
Returns the plan as generated from the Pact Assembler. @return The program's plan. @throws ProgramInvocationException Thrown, if an error occurred in the program while creating the program's {@link Plan}.
private static Plan createPlanFromProgram(Program program, String[] options) throws ProgramInvocationException { try { return program.getPlan(options); } catch (Throwable t) { throw new ProgramInvocationException("Error while calling the program: " + t.getMessage(), t); } }
Takes the jar described by the given file and invokes its pact assembler class to assemble a plan. The assembler class name is either passed through a parameter, or it is read from the manifest of the jar. The assembler is handed the given options for its assembly. @param program The program to create the plan for. @param options The options for the assembler. @return The plan created by the program. @throws ProgramInvocationException Thrown, if an error occurred in the user-provided pact assembler.
public static List<File> extractContainedLibraries(URL jarFile) throws ProgramInvocationException { Random rnd = new Random(); JarFile jar = null; try { jar = new JarFile(new File(jarFile.toURI())); final List<JarEntry> containedJarFileEntries = new ArrayList<JarEntry>(); Enumeration<JarEntry> entries = jar.entries(); while (entries.hasMoreElements()) { JarEntry entry = entries.nextElement(); String name = entry.getName(); if (name.length() > 8 && name.startsWith("lib/") && name.endsWith(".jar")) { containedJarFileEntries.add(entry); } } if (containedJarFileEntries.isEmpty()) { return Collections.emptyList(); } else { // go over all contained jar files final List<File> extractedTempLibraries = new ArrayList<File>(containedJarFileEntries.size()); final byte[] buffer = new byte[4096]; boolean incomplete = true; try { for (int i = 0; i < containedJarFileEntries.size(); i++) { final JarEntry entry = containedJarFileEntries.get(i); String name = entry.getName(); // '/' as in case of zip, jar // java.util.zip.ZipEntry#isDirectory always looks only for '/' not for File.separator name = name.replace('/', '_'); File tempFile; try { tempFile = File.createTempFile(rnd.nextInt(Integer.MAX_VALUE) + "_", name); tempFile.deleteOnExit(); } catch (IOException e) { throw new ProgramInvocationException( "An I/O error occurred while creating temporary file to extract nested library '" + entry.getName() + "'.", e); } extractedTempLibraries.add(tempFile); // copy the temp file contents to a temporary File OutputStream out = null; InputStream in = null; try { out = new FileOutputStream(tempFile); in = new BufferedInputStream(jar.getInputStream(entry)); int numRead = 0; while ((numRead = in.read(buffer)) != -1) { out.write(buffer, 0, numRead); } } catch (IOException e) { throw new ProgramInvocationException("An I/O error occurred while extracting nested library '" + entry.getName() + "' to temporary file '" + tempFile.getAbsolutePath() + "'."); } finally { if (out != null) { out.close(); } if (in != null) { in.close(); } } } incomplete = false; } finally { if (incomplete) { deleteExtractedLibraries(extractedTempLibraries); } } return extractedTempLibraries; } } catch (Throwable t) { throw new ProgramInvocationException("Unknown I/O error while extracting contained jar files.", t); } finally { if (jar != null) { try { jar.close(); } catch (Throwable t) {} } } }
Takes all JAR files that are contained in this program's JAR file and extracts them to the system's temp directory. @return The file names of the extracted temporary files. @throws ProgramInvocationException Thrown, if the extraction process failed.
private CodecFactory getCompressionCodec(Map<String, String> conf) { if (getBoolean(conf, CONF_COMPRESS, false)) { int deflateLevel = getInt(conf, CONF_DEFLATE_LEVEL, CodecFactory.DEFAULT_DEFLATE_LEVEL); int xzLevel = getInt(conf, CONF_XZ_LEVEL, CodecFactory.DEFAULT_XZ_LEVEL); String outputCodec = conf.get(CONF_COMPRESS_CODEC); if (DataFileConstants.DEFLATE_CODEC.equals(outputCodec)) { return CodecFactory.deflateCodec(deflateLevel); } else if (DataFileConstants.XZ_CODEC.equals(outputCodec)) { return CodecFactory.xzCodec(xzLevel); } else { return CodecFactory.fromString(outputCodec); } } return CodecFactory.nullCodec(); }
this derived from AvroOutputFormatBase.getCompressionCodec(..)
public List<Protos.Resource> takeScalar(String resourceName, double amount, Set<String> roles) { if (LOG.isDebugEnabled()) { LOG.debug("Allocating {} {}", amount, resourceName); } List<Protos.Resource> result = new ArrayList<>(1); for (ListIterator<Protos.Resource> i = resources.listIterator(); i.hasNext();) { if (amount <= EPSILON) { break; } // take from next available scalar resource that is unreserved or reserved for an applicable role Protos.Resource available = i.next(); if (!resourceName.equals(available.getName()) || !available.hasScalar()) { continue; } if (!UNRESERVED_ROLE.equals(available.getRole()) && !roles.contains(available.getRole())) { continue; } double amountToTake = Math.min(available.getScalar().getValue(), amount); Protos.Resource taken = available.toBuilder().setScalar(Protos.Value.Scalar.newBuilder().setValue(amountToTake)).build(); amount -= amountToTake; result.add(taken); if (LOG.isDebugEnabled()) { LOG.debug("Taking {} from {}", amountToTake, Utils.toString(available)); } // keep remaining amount (if any) double remaining = available.getScalar().getValue() - taken.getScalar().getValue(); if (remaining > EPSILON) { i.set(available.toBuilder().setScalar(Protos.Value.Scalar.newBuilder().setValue(remaining)).build()); } else { i.remove(); } } if (LOG.isDebugEnabled()) { LOG.debug("Allocated: {}, unsatisfied: {}", Utils.toString(result), amount); } return result; }
Takes some amount of scalar resources (e.g. cpus, mem). @param amount the (approximate) amount to take from the available quantity. @param roles the roles to accept
public List<Protos.Resource> takeRanges(String resourceName, int amount, Set<String> roles) { if (LOG.isDebugEnabled()) { LOG.debug("Allocating {} {}", amount, resourceName); } List<Protos.Resource> result = new ArrayList<>(1); for (ListIterator<Protos.Resource> i = resources.listIterator(); i.hasNext();) { if (amount <= 0) { break; } // take from next available range resource that is unreserved or reserved for an applicable role Protos.Resource available = i.next(); if (!resourceName.equals(available.getName()) || !available.hasRanges()) { continue; } if (!UNRESERVED_ROLE.equals(available.getRole()) && !roles.contains(available.getRole())) { continue; } List<Protos.Value.Range> takenRanges = new ArrayList<>(); List<Protos.Value.Range> remainingRanges = new ArrayList<>(available.getRanges().getRangeList()); for (ListIterator<Protos.Value.Range> j = remainingRanges.listIterator(); j.hasNext();) { if (amount <= 0) { break; } // take from next available range (note: ranges are inclusive) Protos.Value.Range availableRange = j.next(); long amountToTake = Math.min(availableRange.getEnd() - availableRange.getBegin() + 1, amount); Protos.Value.Range takenRange = availableRange.toBuilder().setEnd(availableRange.getBegin() + amountToTake - 1).build(); amount -= amountToTake; takenRanges.add(takenRange); // keep remaining range (if any) long remaining = availableRange.getEnd() - takenRange.getEnd(); if (remaining > 0) { j.set(availableRange.toBuilder().setBegin(takenRange.getEnd() + 1).build()); } else { j.remove(); } } Protos.Resource taken = available.toBuilder().setRanges(Protos.Value.Ranges.newBuilder().addAllRange(takenRanges)).build(); if (LOG.isDebugEnabled()) { LOG.debug("Taking {} from {}", Utils.toString(taken.getRanges()), Utils.toString(available)); } result.add(taken); // keep remaining ranges (if any) if (remainingRanges.size() > 0) { i.set(available.toBuilder().setRanges(Protos.Value.Ranges.newBuilder().addAllRange(remainingRanges)).build()); } else { i.remove(); } } if (LOG.isDebugEnabled()) { LOG.debug("Allocated: {}, unsatisfied: {}", Utils.toString(result), amount); } return result; }
Takes some amount of range resources (e.g. ports). @param amount the number of values to take from the available range(s). @param roles the roles to accept
static <T> PojoSerializerSnapshotData<T> createFrom( Class<T> pojoClass, Field[] fields, TypeSerializer<?>[] fieldSerializers, LinkedHashMap<Class<?>, TypeSerializer<?>> registeredSubclassSerializers, Map<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializers) { final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots = new LinkedOptionalMap<>(fields.length); for (int i = 0; i < fields.length; i++) { Field field = fields[i]; String fieldName = (field == null) ? getDummyNameForMissingField(i) : field.getName(); fieldSerializerSnapshots.put(fieldName, field, TypeSerializerUtils.snapshotBackwardsCompatible(fieldSerializers[i])); } LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots = new LinkedHashMap<>(registeredSubclassSerializers.size()); registeredSubclassSerializers.forEach((k, v) -> registeredSubclassSerializerSnapshots.put(k, TypeSerializerUtils.snapshotBackwardsCompatible(v))); Map<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots = new HashMap<>(nonRegisteredSubclassSerializers.size()); nonRegisteredSubclassSerializers.forEach((k, v) -> nonRegisteredSubclassSerializerSnapshots.put(k, TypeSerializerUtils.snapshotBackwardsCompatible(v))); return new PojoSerializerSnapshotData<>( pojoClass, fieldSerializerSnapshots, optionalMapOf(registeredSubclassSerializerSnapshots, Class::getName), optionalMapOf(nonRegisteredSubclassSerializerSnapshots, Class::getName)); }
Creates a {@link PojoSerializerSnapshotData} from configuration of a {@link PojoSerializer}. <p>This factory method is meant to be used in regular write paths, i.e. when taking a snapshot of the {@link PojoSerializer}. All registered subclass classes, and non-registered subclass classes are all present. Some POJO fields may be absent, if the originating {@link PojoSerializer} was a restored one with already missing fields, and was never replaced by a new {@link PojoSerializer} (i.e. because the serialized old data was never accessed).
static <T> PojoSerializerSnapshotData<T> createFrom(DataInputView in, ClassLoader userCodeClassLoader) throws IOException { return PojoSerializerSnapshotData.readSnapshotData(in, userCodeClassLoader); }
Creates a {@link PojoSerializerSnapshotData} from serialized data stream. <p>This factory method is meant to be used in regular read paths, i.e. when reading back a snapshot of the {@link PojoSerializer}. POJO fields, registered subclass classes, and non-registered subclass classes may no longer be present anymore.
static <T> PojoSerializerSnapshotData<T> createFrom( Class<T> pojoClass, Field[] fields, TypeSerializerSnapshot<?>[] existingFieldSerializerSnapshots, LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> existingRegisteredSubclassSerializerSnapshots, Map<Class<?>, TypeSerializerSnapshot<?>> existingNonRegisteredSubclassSerializerSnapshots) { final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots = new LinkedOptionalMap<>(fields.length); for (int i = 0; i < fields.length; i++) { Field field = fields[i]; String fieldName = (field == null) ? getDummyNameForMissingField(i) : field.getName(); fieldSerializerSnapshots.put(fieldName, field, existingFieldSerializerSnapshots[i]); } return new PojoSerializerSnapshotData<>( pojoClass, fieldSerializerSnapshots, optionalMapOf(existingRegisteredSubclassSerializerSnapshots, Class::getName), optionalMapOf(existingNonRegisteredSubclassSerializerSnapshots, Class::getName)); }
Creates a {@link PojoSerializerSnapshotData} from existing snapshotted configuration of a {@link PojoSerializer}.
void writeSnapshotData(DataOutputView out) throws IOException { out.writeUTF(pojoClass.getName()); writeOptionalMap(out, fieldSerializerSnapshots, PojoFieldUtils::writeField, TypeSerializerSnapshot::writeVersionedSnapshot); writeOptionalMap(out, registeredSubclassSerializerSnapshots, NoOpWriter.noopWriter(), TypeSerializerSnapshot::writeVersionedSnapshot); writeOptionalMap(out, nonRegisteredSubclassSerializerSnapshots, NoOpWriter.noopWriter(), TypeSerializerSnapshot::writeVersionedSnapshot); }
---------------------------------------------------------------------------------------------
@Override public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) { final String fullName = group.getMetricIdentifier(metricName, this); synchronized (this) { if (metric instanceof Counter) { counters.put((Counter) metric, fullName); registry.register(fullName, new FlinkCounterWrapper((Counter) metric)); } else if (metric instanceof Gauge) { gauges.put((Gauge<?>) metric, fullName); registry.register(fullName, FlinkGaugeWrapper.fromGauge((Gauge<?>) metric)); } else if (metric instanceof Histogram) { Histogram histogram = (Histogram) metric; histograms.put(histogram, fullName); if (histogram instanceof DropwizardHistogramWrapper) { registry.register(fullName, ((DropwizardHistogramWrapper) histogram).getDropwizardHistogram()); } else { registry.register(fullName, new FlinkHistogramWrapper(histogram)); } } else if (metric instanceof Meter) { Meter meter = (Meter) metric; meters.put(meter, fullName); if (meter instanceof DropwizardMeterWrapper) { registry.register(fullName, ((DropwizardMeterWrapper) meter).getDropwizardMeter()); } else { registry.register(fullName, new FlinkMeterWrapper(meter)); } } else { log.warn("Cannot add metric of type {}. This indicates that the reporter " + "does not support this metric type.", metric.getClass().getName()); } } }
------------------------------------------------------------------------
@Override public void report() { // we do not need to lock here, because the dropwizard registry is // internally a concurrent map @SuppressWarnings("rawtypes") final SortedMap<String, com.codahale.metrics.Gauge> gauges = registry.getGauges(); final SortedMap<String, com.codahale.metrics.Counter> counters = registry.getCounters(); final SortedMap<String, com.codahale.metrics.Histogram> histograms = registry.getHistograms(); final SortedMap<String, com.codahale.metrics.Meter> meters = registry.getMeters(); final SortedMap<String, com.codahale.metrics.Timer> timers = registry.getTimers(); this.reporter.report(gauges, counters, histograms, meters, timers); }
------------------------------------------------------------------------
public static TaskManagerServicesConfiguration fromConfiguration( Configuration configuration, long maxJvmHeapMemory, InetAddress remoteAddress, boolean localCommunication) { final String[] tmpDirs = ConfigurationUtils.parseTempDirectories(configuration); String[] localStateRootDir = ConfigurationUtils.parseLocalStateDirectories(configuration); if (localStateRootDir.length == 0) { // default to temp dirs. localStateRootDir = tmpDirs; } boolean localRecoveryMode = configuration.getBoolean(CheckpointingOptions.LOCAL_RECOVERY); final NetworkEnvironmentConfiguration networkConfig = NetworkEnvironmentConfiguration.fromConfiguration( configuration, maxJvmHeapMemory, localCommunication, remoteAddress); final QueryableStateConfiguration queryableStateConfig = QueryableStateConfiguration.fromConfiguration(configuration); boolean preAllocateMemory = configuration.getBoolean(TaskManagerOptions.MANAGED_MEMORY_PRE_ALLOCATE); long timerServiceShutdownTimeout = AkkaUtils.getTimeout(configuration).toMillis(); final RetryingRegistrationConfiguration retryingRegistrationConfiguration = RetryingRegistrationConfiguration.fromConfiguration(configuration); return new TaskManagerServicesConfiguration( remoteAddress, tmpDirs, localStateRootDir, localRecoveryMode, networkConfig, queryableStateConfig, ConfigurationParserUtils.getSlot(configuration), ConfigurationParserUtils.getManagedMemorySize(configuration), ConfigurationParserUtils.getMemoryType(configuration), preAllocateMemory, ConfigurationParserUtils.getManagedMemoryFraction(configuration), timerServiceShutdownTimeout, retryingRegistrationConfiguration, ConfigurationUtils.getSystemResourceMetricsProbingInterval(configuration)); }
Utility method to extract TaskManager config parameters from the configuration and to sanity check them. @param configuration The configuration. @param maxJvmHeapMemory The maximum JVM heap size, in bytes. @param remoteAddress identifying the IP address under which the TaskManager will be accessible @param localCommunication True, to skip initializing the network stack. Use only in cases where only one task manager runs. @return TaskExecutorConfiguration that wrappers InstanceConnectionInfo, NetworkEnvironmentConfiguration, etc.
void onConsumedPartition(ResultPartition partition) { final ResultPartition previous; LOG.debug("Received consume notification from {}.", partition); synchronized (registeredPartitions) { previous = registeredPartitions.remove(partition.getPartitionId()); } // Release the partition if it was successfully removed if (partition == previous) { partition.release(); LOG.debug("Released {}.", partition); } }
------------------------------------------------------------------------
public final void streamBufferWithGroups(Iterator<IN1> iterator1, Iterator<IN2> iterator2, Collector<OUT> c) { SingleElementPushBackIterator<IN1> i1 = new SingleElementPushBackIterator<>(iterator1); SingleElementPushBackIterator<IN2> i2 = new SingleElementPushBackIterator<>(iterator2); try { int size; if (i1.hasNext() || i2.hasNext()) { while (true) { int sig = in.readInt(); switch (sig) { case SIGNAL_BUFFER_REQUEST_G0: if (i1.hasNext()) { size = sender.sendBuffer1(i1); sendWriteNotification(size, i1.hasNext()); } break; case SIGNAL_BUFFER_REQUEST_G1: if (i2.hasNext()) { size = sender.sendBuffer2(i2); sendWriteNotification(size, i2.hasNext()); } break; case SIGNAL_FINISHED: return; case SIGNAL_ERROR: try { outPrinter.join(); } catch (InterruptedException e) { outPrinter.interrupt(); } try { errorPrinter.join(); } catch (InterruptedException e) { errorPrinter.interrupt(); } throw new RuntimeException( "External process for task " + function.getRuntimeContext().getTaskName() + " terminated prematurely due to an error." + msg); default: receiver.collectBuffer(c, sig); sendReadConfirmation(); break; } } } } catch (SocketTimeoutException ignored) { throw new RuntimeException("External process for task " + function.getRuntimeContext().getTaskName() + " stopped responding." + msg); } catch (Exception e) { throw new RuntimeException("Critical failure for task " + function.getRuntimeContext().getTaskName() + ". " + msg.get(), e); } }
Sends all values contained in both iterators to the external process and collects all results. @param iterator1 first input stream @param iterator2 second input stream @param c collector
public TableOperation create( SetTableOperationType type, TableOperation left, TableOperation right, boolean all) { failIfStreaming(type, all); validateSetOperation(type, left, right); return new SetTableOperation(left, right, type, all); }
Creates a valid algebraic operation. @param type type of operation to create @param left first relational operation of the operation @param right second relational operation of the operation @param all flag defining how duplicates should be handled @return creates a valid algebraic operation
@Override protected Union<T> translateToDataFlow(Operator<T> input1, Operator<T> input2) { return new Union<T>(input1, input2, unionLocationName); }
Returns the BinaryNodeTranslation of the Union. @param input1 The first input of the union, as a common API operator. @param input2 The second input of the union, as a common API operator. @return The common API union operator.
@Override protected void flush() { // The Kafka 0.8 producer doesn't support flushing, we wait here // until all pending records are confirmed synchronized (pendingRecordsLock) { while (pendingRecords > 0) { try { pendingRecordsLock.wait(); } catch (InterruptedException e) { // this can be interrupted when the Task has been cancelled. // by throwing an exception, we ensure that this checkpoint doesn't get confirmed throw new RuntimeException("Flushing got interrupted while checkpointing", e); } } } }
---------------------------------------------------------------------
public <T> T getFieldNotNull(int pos){ T field = getField(pos); if (field != null) { return field; } else { throw new NullFieldException(pos); } }
Gets the field at the specified position, throws NullFieldException if the field is null. Used for comparing key fields. @param pos The position of the field, zero indexed. @return The field at the specified position. @throws IndexOutOfBoundsException Thrown, if the position is negative, or equal to, or larger than the number of fields. @throws NullFieldException Thrown, if the field at pos is null.
@SuppressWarnings("unchecked") public static Class<? extends Tuple> getTupleClass(int arity) { if (arity < 0 || arity > MAX_ARITY) { throw new IllegalArgumentException("The tuple arity must be in [0, " + MAX_ARITY + "]."); } return (Class<? extends Tuple>) CLASSES[arity]; }
Gets the class corresponding to the tuple of the given arity (dimensions). For example, {@code getTupleClass(3)} will return the {@code Tuple3.class}. @param arity The arity of the tuple class to get. @return The tuple class with the given arity.
public static Tuple newInstance(int arity) { switch (arity) { case 0: return Tuple0.INSTANCE; case 1: return new Tuple1(); case 2: return new Tuple2(); case 3: return new Tuple3(); case 4: return new Tuple4(); case 5: return new Tuple5(); case 6: return new Tuple6(); case 7: return new Tuple7(); case 8: return new Tuple8(); case 9: return new Tuple9(); case 10: return new Tuple10(); case 11: return new Tuple11(); case 12: return new Tuple12(); case 13: return new Tuple13(); case 14: return new Tuple14(); case 15: return new Tuple15(); case 16: return new Tuple16(); case 17: return new Tuple17(); case 18: return new Tuple18(); case 19: return new Tuple19(); case 20: return new Tuple20(); case 21: return new Tuple21(); case 22: return new Tuple22(); case 23: return new Tuple23(); case 24: return new Tuple24(); case 25: return new Tuple25(); default: throw new IllegalArgumentException("The tuple arity must be in [0, " + MAX_ARITY + "]."); } }
GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); // may be null if this serializer was deserialized from an older version if (this.values == null) { this.values = enumClass.getEnumConstants(); this.valueToOrdinal = new EnumMap<>(this.enumClass); int i = 0; for (T value : values) { this.valueToOrdinal.put(value, i++); } } }
--------------------------------------------------------------------------------------------
public final void streamBufferWithoutGroups(Iterator<IN> iterator, Collector<OUT> c) { SingleElementPushBackIterator<IN> i = new SingleElementPushBackIterator<>(iterator); try { int size; if (i.hasNext()) { while (true) { int sig = in.readInt(); switch (sig) { case SIGNAL_BUFFER_REQUEST: if (i.hasNext()) { size = sender.sendBuffer(i); sendWriteNotification(size, i.hasNext()); } else { throw new RuntimeException("External process requested data even though none is available."); } break; case SIGNAL_FINISHED: return; case SIGNAL_ERROR: try { outPrinter.join(); } catch (InterruptedException e) { outPrinter.interrupt(); } try { errorPrinter.join(); } catch (InterruptedException e) { errorPrinter.interrupt(); } throw new RuntimeException( "External process for task " + function.getRuntimeContext().getTaskName() + " terminated prematurely due to an error." + msg); default: receiver.collectBuffer(c, sig); sendReadConfirmation(); break; } } } } catch (SocketTimeoutException ignored) { throw new RuntimeException("External process for task " + function.getRuntimeContext().getTaskName() + " stopped responding." + msg.get()); } catch (Exception e) { throw new RuntimeException("Critical failure for task " + function.getRuntimeContext().getTaskName() + ". " + msg.get(), e); } }
Sends all values contained in the iterator to the external process and collects all results. @param iterator input stream @param c collector
public TaskMetricGroup addTaskForJob( final JobID jobId, final String jobName, final JobVertexID jobVertexId, final ExecutionAttemptID executionAttemptId, final String taskName, final int subtaskIndex, final int attemptNumber) { Preconditions.checkNotNull(jobId); String resolvedJobName = jobName == null || jobName.isEmpty() ? jobId.toString() : jobName; // we cannot strictly lock both our map modification and the job group modification // because it might lead to a deadlock while (true) { // get or create a jobs metric group TaskManagerJobMetricGroup currentJobGroup; synchronized (this) { currentJobGroup = jobs.get(jobId); if (currentJobGroup == null || currentJobGroup.isClosed()) { currentJobGroup = new TaskManagerJobMetricGroup(registry, this, jobId, resolvedJobName); jobs.put(jobId, currentJobGroup); } } // try to add another task. this may fail if we found a pre-existing job metrics // group and it is closed concurrently TaskMetricGroup taskGroup = currentJobGroup.addTask( jobVertexId, executionAttemptId, taskName, subtaskIndex, attemptNumber); if (taskGroup != null) { // successfully added the next task return taskGroup; } // else fall through the loop } }
------------------------------------------------------------------------
@Override protected void putVariables(Map<String, String> variables) { variables.put(ScopeFormat.SCOPE_HOST, hostname); variables.put(ScopeFormat.SCOPE_TASKMANAGER_ID, taskManagerId); }
------------------------------------------------------------------------
@VisibleForTesting protected <K, V> KafkaProducer<K, V> getKafkaProducer(Properties props) { return new KafkaProducer<>(props); }
Used for testing only.
@Override public void open(Configuration configuration) { producer = getKafkaProducer(this.producerConfig); RuntimeContext ctx = getRuntimeContext(); if (null != flinkKafkaPartitioner) { if (flinkKafkaPartitioner instanceof FlinkKafkaDelegatePartitioner) { ((FlinkKafkaDelegatePartitioner) flinkKafkaPartitioner).setPartitions( getPartitionsByTopic(this.defaultTopicId, this.producer)); } flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks()); } LOG.info("Starting FlinkKafkaProducer ({}/{}) to produce into default topic {}", ctx.getIndexOfThisSubtask() + 1, ctx.getNumberOfParallelSubtasks(), defaultTopicId); // register Kafka metrics to Flink accumulators if (!Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) { Map<MetricName, ? extends Metric> metrics = this.producer.metrics(); if (metrics == null) { // MapR's Kafka implementation returns null here. LOG.info("Producer implementation does not support metrics"); } else { final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer"); for (Map.Entry<MetricName, ? extends Metric> metric: metrics.entrySet()) { kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue())); } } } if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) { LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing."); flushOnCheckpoint = false; } if (logFailuresOnly) { callback = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception e) { if (e != null) { LOG.error("Error while sending record to Kafka: " + e.getMessage(), e); } acknowledgeMessage(); } }; } else { callback = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null && asyncException == null) { asyncException = exception; } acknowledgeMessage(); } }; } }
Initializes the connection to Kafka.