name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_BlobServerConnection_writeErrorToStream_rdh | /**
* Writes to the output stream the error return code, and the given exception in serialized
* form.
*
* @param out
* Thr output stream to write to.
* @param t
* The exception to send.
* @throws IOException
* Thrown, if the output stream could not be written to.
*/
private static void writeErrorToStream(OutputStream out, Throwable t) throws IOException {byte[] bytes = InstantiationUtil.serializeObject(t);
out.write(RETURN_ERROR);
writeLength(bytes.length, out);
out.write(bytes);
} | 3.26 |
flink_KubernetesEntrypointUtils_loadConfiguration_rdh | /**
* For non-HA cluster, {@link JobManagerOptions#ADDRESS} has be set to Kubernetes service name
* on client side. See {@link KubernetesClusterDescriptor#deployClusterInternal}. So the
* TaskManager will use service address to contact with JobManager. For HA cluster, {@link JobManagerOptions#ADDRESS} will be set to the pod ip address. The TaskManager use Zookeeper
* or other high-availability service to find the address of JobManager.
*
* @return Updated configuration
*/
static Configuration loadConfiguration(Configuration dynamicParameters) {
final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR);
Preconditions.checkNotNull(configDir, "Flink configuration directory (%s) in environment should not be null!", ConfigConstants.ENV_FLINK_CONF_DIR);
final Configuration configuration = GlobalConfiguration.loadConfiguration(configDir, dynamicParameters);
if (KubernetesUtils.isHostNetwork(configuration)) {
configuration.setString(RestOptions.BIND_PORT, "0");
configuration.setInteger(JobManagerOptions.PORT, 0);
configuration.setString(BlobServerOptions.PORT, "0");
configuration.setString(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, "0");
configuration.setString(TaskManagerOptions.RPC_PORT, "0");
}
if (HighAvailabilityMode.isHighAvailabilityModeActivated(configuration)) {
final String ipAddress = System.getenv().get(Constants.ENV_FLINK_POD_IP_ADDRESS);
Preconditions.checkState(ipAddress != null, "JobManager ip address environment variable %s not set", Constants.ENV_FLINK_POD_IP_ADDRESS);
configuration.setString(JobManagerOptions.ADDRESS, ipAddress);
configuration.setString(RestOptions.ADDRESS, ipAddress);
}
return configuration;
} | 3.26 |
flink_StateBootstrapTransformation_getMaxParallelism_rdh | /**
*
* @return The max parallelism for this operator.
*/
int getMaxParallelism(int globalMaxParallelism) { return operatorMaxParallelism.orElse(globalMaxParallelism);} | 3.26 |
flink_StateBootstrapTransformation_writeOperatorState_rdh | /**
*
* @param operatorID
* The operator id for the stream operator.
* @param stateBackend
* The state backend for the job.
* @param config
* Additional configurations applied to the bootstrap stream tasks.
* @param globalMaxParallelism
* Global max parallelism set for the savepoint.
* @param savepointPath
* The path where the savepoint will be written.
* @return The operator subtask states for this bootstrap transformation.
*/
DataStream<OperatorState> writeOperatorState(OperatorID operatorID, StateBackend stateBackend, Configuration config, int globalMaxParallelism, Path savepointPath) {
int localMaxParallelism = getMaxParallelism(globalMaxParallelism);
return writeOperatorSubtaskStates(operatorID, stateBackend, config, savepointPath, localMaxParallelism).transform("reduce(OperatorSubtaskState)", TypeInformation.of(OperatorState.class), new GroupReduceOperator<>(new OperatorSubtaskStateReducer(operatorID, localMaxParallelism))).forceNonParallel();
} | 3.26 |
flink_RocksDBMemoryControllerUtils_calculateRocksDBMutableLimit_rdh | /**
* Calculate {@code mutable_limit_} as RocksDB calculates it in <a
* href="https://github.com/dataArtisans/frocksdb/blob/FRocksDB-5.17.2/memtable/write_buffer_manager.cc#L54">
* here</a>.
*
* @param bufferSize
* write buffer size
* @return mutableLimit
*/
static long calculateRocksDBMutableLimit(long bufferSize) {
return (bufferSize * 7) / 8;
} | 3.26 |
flink_RocksDBMemoryControllerUtils_validateArenaBlockSize_rdh | /**
* RocksDB starts flushing the active memtable constantly in the case when the arena block size
* is greater than mutable limit (as calculated in {@link #calculateRocksDBMutableLimit(long)}).
*
* <p>This happens because in such a case the check <a
* href="https://github.com/dataArtisans/frocksdb/blob/958f191d3f7276ae59b270f9db8390034d549ee0/include/rocksdb/write_buffer_manager.h#L47">
* here</a> is always true.
*
* <p>This method checks that arena block size is smaller than mutable limit.
*
* @param arenaBlockSize
* Arena block size
* @param mutableLimit
* mutable limit
* @return whether arena block size is sensible
*/
@VisibleForTestingstatic boolean validateArenaBlockSize(long arenaBlockSize, long mutableLimit) {
return arenaBlockSize <= mutableLimit;
} | 3.26 |
flink_RocksDBMemoryControllerUtils_m0_rdh | /**
* Allocate memory controllable RocksDB shared resources.
*
* @param totalMemorySize
* The total memory limit size.
* @param writeBufferRatio
* The ratio of total memory which is occupied by write buffer manager.
* @param highPriorityPoolRatio
* The high priority pool ratio of cache.
* @param factory
* creates Write Buffer Manager and Bock Cache
* @return memory controllable RocksDB shared resources.
*/
public static RocksDBSharedResources m0(long totalMemorySize, double writeBufferRatio, double highPriorityPoolRatio, boolean usingPartitionedIndexFilters, RocksDBMemoryFactory factory) {
long calculatedCacheCapacity = RocksDBMemoryControllerUtils.calculateActualCacheCapacity(totalMemorySize, writeBufferRatio);
final Cache cache = factory.createCache(calculatedCacheCapacity, highPriorityPoolRatio);
long writeBufferManagerCapacity = RocksDBMemoryControllerUtils.calculateWriteBufferManagerCapacity(totalMemorySize, writeBufferRatio);
final WriteBufferManager wbm = factory.createWriteBufferManager(writeBufferManagerCapacity, cache);
LOG.debug("Allocated RocksDB shared resources, calculatedCacheCapacity: {}, highPriorityPoolRatio: {}, writeBufferManagerCapacity: {}, usingPartitionedIndexFilters: {}", calculatedCacheCapacity, highPriorityPoolRatio, writeBufferManagerCapacity, usingPartitionedIndexFilters);
return new RocksDBSharedResources(cache, wbm, writeBufferManagerCapacity, usingPartitionedIndexFilters);
} | 3.26 |
flink_RocksDBMemoryControllerUtils_calculateWriteBufferManagerCapacity_rdh | /**
* Calculate the actual memory capacity of write buffer manager, which would be shared among
* rocksDB instance(s). The formula to use here could refer to the doc of {@link #calculateActualCacheCapacity(long, double)}.
*
* @param totalMemorySize
* Total off-heap memory size reserved for RocksDB instance(s).
* @param writeBufferRatio
* The ratio of total memory size which would be reserved for write
* buffer manager and its over-capacity part.
* @return The actual calculated write buffer manager capacity.
*/
@VisibleForTesting
static long calculateWriteBufferManagerCapacity(long totalMemorySize, double writeBufferRatio) {
return ((long) (((2 * totalMemorySize) * writeBufferRatio) / 3));
} | 3.26 |
flink_RocksDBMemoryControllerUtils_calculateActualCacheCapacity_rdh | /**
* Calculate the actual memory capacity of cache, which would be shared among rocksDB
* instance(s). We introduce this method because: a) We cannot create a strict capacity limit
* cache util FLINK-15532 resolved. b) Regardless of the memory usage of blocks pinned by
* RocksDB iterators, which is difficult to calculate and only happened when we iterator entries
* in RocksDBMapState, the overuse of memory is mainly occupied by at most half of the write
* buffer usage. (see <a
* href="https://github.com/dataArtisans/frocksdb/blob/958f191d3f7276ae59b270f9db8390034d549ee0/include/rocksdb/write_buffer_manager.h#L51">the
* flush implementation of write buffer manager</a>). Thus, we have four equations below:
* write_buffer_manager_memory = 1.5 * write_buffer_manager_capacity write_buffer_manager_memory
* = total_memory_size * write_buffer_ratio write_buffer_manager_memory + other_part =
* total_memory_size write_buffer_manager_capacity + other_part = cache_capacity And we would
* deduce the formula: cache_capacity = (3 - write_buffer_ratio) * total_memory_size / 3
* write_buffer_manager_capacity = 2 * total_memory_size * write_buffer_ratio / 3
*
* @param totalMemorySize
* Total off-heap memory size reserved for RocksDB instance(s).
* @param writeBufferRatio
* The ratio of total memory size which would be reserved for write
* buffer manager and its over-capacity part.
* @return The actual calculated cache capacity.
*/
@VisibleForTesting
public static long calculateActualCacheCapacity(long totalMemorySize, double writeBufferRatio) {
return ((long) (((3 - writeBufferRatio) * totalMemorySize) / 3));
} | 3.26 |
flink_RocksDBMemoryControllerUtils_calculateRocksDBDefaultArenaBlockSize_rdh | /**
* Calculate the default arena block size as RocksDB calculates it in <a
* href="https://github.com/dataArtisans/frocksdb/blob/49bc897d5d768026f1eb816d960c1f2383396ef4/db/column_family.cc#L196-L201">
* here</a>.
*
* @return the default arena block size
* @param writeBufferSize
* the write buffer size (bytes)
*/
static long calculateRocksDBDefaultArenaBlockSize(long writeBufferSize) {
long arenaBlockSize = writeBufferSize /
8;
// Align up to 4k
final long align = 4 * 1024;
return (((arenaBlockSize + align) - 1) / align) * align;
} | 3.26 |
flink_SortedMapTypeInfo_getTypeClass_rdh | // ------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public Class<SortedMap<K, V>> getTypeClass() {return ((Class<SortedMap<K, V>>) (Class<?>) (SortedMap.class));
} | 3.26 |
flink_MetadataSerializers_getSerializer_rdh | /**
* Returns the {@link MetadataSerializer} for the given savepoint version.
*
* @param version
* Savepoint version to get serializer for
* @return Savepoint for the given version
* @throws IllegalArgumentException
* If unknown savepoint version
*/
public static MetadataSerializer
getSerializer(int version) {
MetadataSerializer serializer = SERIALIZERS.get(version);
if (serializer != null) {
return serializer;
} else {throw new IllegalArgumentException("Unrecognized checkpoint version number: " + version);
}
} | 3.26 |
flink_StateTable_remove_rdh | /**
* Removes the mapping for the composite of active key and given namespace. This method should
* be preferred over {@link #removeAndGetOld(N)} when the caller is not interested in the old
* state.
*
* @param namespace
* the namespace of the mapping to remove. Not null.
*/
public void remove(N namespace) {
remove(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace);
}
/**
* Removes the mapping for the composite of active key and given namespace, returning the state
* that was found under the entry.
*
* @param namespace
* the namespace of the mapping to remove. Not null.
* @return the state of the removed mapping or {@code null} | 3.26 |
flink_StateTable_put_rdh | // Snapshot / Restore -------------------------------------------------------------------------
public void put(K key, int keyGroup, N namespace, S state) {
checkKeyNamespacePreconditions(key,
namespace);
getMapForKeyGroup(keyGroup).put(key, namespace, state);
} | 3.26 |
flink_StateTable_getState_rdh | // ------------------------------------------------------------------------
// access to maps
// ------------------------------------------------------------------------
/**
* Returns the internal data structure.
*/@VisibleForTesting
public StateMap<K, N, S>[] getState() {
return keyGroupedStateMaps;
} | 3.26 |
flink_StateTable_size_rdh | /**
* Returns the total number of entries in this {@link StateTable}. This is the sum of both
* sub-tables.
*
* @return the number of entries in this {@link StateTable}.
*/
public int size() {
int count = 0;
for (StateMap<K, N, S> stateMap : keyGroupedStateMaps) {
count += stateMap.size();
}
return count;
} | 3.26 |
flink_StateTable_m0_rdh | /**
* Applies the given {@link StateTransformationFunction} to the state (1st input argument),
* using the given value as second input argument. The result of {@link StateTransformationFunction#apply(Object, Object)} is then stored as the new state. This
* function is basically an optimization for get-update-put pattern.
*
* @param namespace
* the namespace. Not null.
* @param value
* the value to use in transforming the state. Can be null.
* @param transformation
* the transformation function.
* @throws Exception
* if some exception happens in the transformation function.
*/public <T> void m0(N namespace, T value, StateTransformationFunction<S, T> transformation) throws Exception {
K key = keyContext.getCurrentKey();
checkKeyNamespacePreconditions(key, namespace);
int keyGroup = keyContext.getCurrentKeyGroupIndex();
getMapForKeyGroup(keyGroup).transform(key, namespace, value, transformation);
} | 3.26 |
flink_StateTable_isEmpty_rdh | // Main interface methods of StateTable -------------------------------------------------------
/**
* Returns whether this {@link StateTable} is empty.
*
* @return {@code true} if this {@link StateTable} has no elements, {@code false} otherwise.
* @see #size()
*/
public boolean isEmpty() {
return size() == 0;
} | 3.26 |
flink_StateTable_containsKey_rdh | /**
* Returns whether this table contains a mapping for the composite of active key and given
* namespace.
*
* @param namespace
* the namespace in the composite key to search for. Not null.
* @return {@code true} if this map contains the specified key/namespace composite key, {@code false} otherwise.
*/
public boolean containsKey(N namespace) {
return containsKey(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace);} | 3.26 |
flink_StateTable_getKeySerializer_rdh | // Meta data setter / getter and toString -----------------------------------------------------
public TypeSerializer<K> getKeySerializer() {
return keySerializer;} | 3.26 |
flink_StateTable_get_rdh | // ------------------------------------------------------------------------
private S get(K
key, int keyGroupIndex, N namespace) {
checkKeyNamespacePreconditions(key, namespace);return getMapForKeyGroup(keyGroupIndex).get(key, namespace);
} | 3.26 |
flink_StateTable_indexToOffset_rdh | /**
* Translates a key-group id to the internal array offset.
*/
private int indexToOffset(int index) {
return index - getKeyGroupOffset();
} | 3.26 |
flink_StateTable_sizeOfNamespace_rdh | // For testing --------------------------------------------------------------------------------
@VisibleForTesting
public int sizeOfNamespace(Object namespace) {
int count = 0;
for (StateMap<K, N, S> stateMap : keyGroupedStateMaps) {
count += stateMap.sizeOfNamespace(namespace);
}
return count;
} | 3.26 |
flink_SqlTimestampParser_parseField_rdh | /**
* Static utility to parse a field of type Timestamp from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final Timestamp parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if ((limitedLen > 0) && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[(startPos + limitedLen) - 1]))) {
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return
Timestamp.valueOf(str);
} | 3.26 |
flink_DefaultScheduler_getNumberOfRestarts_rdh | // ------------------------------------------------------------------------
// SchedulerNG
// ------------------------------------------------------------------------
@Override
protected long getNumberOfRestarts() {
return f0.getNumberOfRestarts();
} | 3.26 |
flink_DefaultScheduler_allocateSlotsAndDeploy_rdh | // ------------------------------------------------------------------------
// SchedulerOperations
// ------------------------------------------------------------------------
@Override
public void allocateSlotsAndDeploy(final List<ExecutionVertexID> verticesToDeploy) {
final Map<ExecutionVertexID, ExecutionVertexVersion> requiredVersionByVertex = executionVertexVersioner.recordVertexModifications(verticesToDeploy);
final List<Execution> executionsToDeploy = verticesToDeploy.stream().map(this::getCurrentExecutionOfVertex).collect(Collectors.toList());
executionDeployer.allocateSlotsAndDeploy(executionsToDeploy, requiredVersionByVertex);
} | 3.26 |
flink_OrcNoHiveColumnarRowInputFormat_createPartitionedFormat_rdh | /**
* Create a partitioned {@link OrcColumnarRowInputFormat}, the partition columns can be
* generated by split.
*/
public static <SplitT extends FileSourceSplit> OrcColumnarRowInputFormat<?, SplitT> createPartitionedFormat(Configuration hadoopConfig, RowType tableType, List<String> partitionKeys, PartitionFieldExtractor<SplitT> extractor, int[] selectedFields, List<OrcFilters.Predicate> conjunctPredicates, int batchSize, Function<RowType, TypeInformation<RowData>> rowTypeInfoFactory) {
// TODO FLINK-25113 all this partition keys code should be pruned from the orc format,
// because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition
// keys.
String[] tableFieldNames = tableType.getFieldNames().toArray(new String[0]);
LogicalType[] tableFieldTypes = tableType.getChildren().toArray(new LogicalType[0]);
List<String> orcFieldNames = getNonPartNames(tableFieldNames, partitionKeys);
int[] orcSelectedFields = getSelectedOrcFields(tableFieldNames, selectedFields, orcFieldNames);
ColumnBatchFactory<VectorizedRowBatch, SplitT> batchGenerator = (SplitT split,VectorizedRowBatch rowBatch) -> {
// create and initialize the row batch
ColumnVector[] vectors = new ColumnVector[selectedFields.length];for (int i = 0; i < vectors.length; i++) {
String name = tableFieldNames[selectedFields[i]];
LogicalType v8 = tableFieldTypes[selectedFields[i]];
vectors[i] = (partitionKeys.contains(name)) ? createFlinkVectorFromConstant(v8, extractor.extract(split, name, v8), batchSize) : createFlinkVector(rowBatch.cols[orcFieldNames.indexOf(name)]);
}
return new VectorizedColumnBatch(vectors);
};
return new OrcColumnarRowInputFormat<>(new OrcNoHiveShim(), hadoopConfig, convertToOrcTypeWithPart(tableFieldNames, tableFieldTypes, partitionKeys), orcSelectedFields, conjunctPredicates, batchSize, batchGenerator, rowTypeInfoFactory.apply(new RowType(Arrays.stream(selectedFields).mapToObj(i -> tableType.getFields().get(i)).collect(Collectors.toList()))));
} | 3.26 |
flink_LocalityAwareSplitAssigner_getNext_rdh | // --------------------------------------------------------------------------------------------
@Override
public Optional<FileSourceSplit> getNext(@Nullable
String host) {
// for a null host, we always return a remote split
if (StringUtils.isNullOrWhitespaceOnly(host)) {final Optional<FileSourceSplit> split = getRemoteSplit();
if (split.isPresent()) {
LOG.info("Assigning split to non-localized request: {}", split);
}
return split;
}
host = normalizeHostName(host);
// for any non-null host, we take the list of non-null splits
final LocatableSplitChooser v2 =
localPerHost.computeIfAbsent(host, theHost -> buildChooserForHost(theHost, unassigned));
final SplitWithInfo localSplit = v2.getNextUnassignedMinLocalCountSplit(unassigned);
if (localSplit != null) {
checkState(unassigned.remove(localSplit), "Selected split has already been assigned. This should not happen!");
LOG.info("Assigning local split to requesting host '{}': {}", host, localSplit.getSplit());
localAssignments.inc();
return Optional.of(localSplit.getSplit());
}
// we did not find a local split, return a remote split
final Optional<FileSourceSplit> remoteSplit = getRemoteSplit();
if (remoteSplit.isPresent()) {
LOG.info("Assigning remote split to requesting host '{}': {}", host, remoteSplit);
}
return remoteSplit;
} | 3.26 |
flink_LocalityAwareSplitAssigner_getNextUnassignedMinLocalCountSplit_rdh | /**
* Retrieves a LocatableInputSplit with minimum local count. InputSplits which have already
* been assigned (i.e., which are not contained in the provided set) are filtered out. The
* returned input split is NOT removed from the provided set.
*
* @param unassignedSplits
* Set of unassigned input splits.
* @return An input split with minimum local count or null if all splits have been assigned.
*/
@Nullable
SplitWithInfo getNextUnassignedMinLocalCountSplit(Set<SplitWithInfo> unassignedSplits) {
if (splits.size() == 0) {
return null;
}
do {
elementCycleCount--;
// take first split of the list
SplitWithInfo v18 = splits.pollFirst();
if (unassignedSplits.contains(v18)) {
int localCount = v18.getLocalCount();
// still unassigned, check local count
if (localCount > minLocalCount) {
// re-insert at end of the list and continue to look for split with smaller
// local count
splits.offerLast(v18);
// check and update second smallest local count
if ((nextMinLocalCount ==
(-1)) || (v18.getLocalCount() < nextMinLocalCount)) {
nextMinLocalCount = v18.getLocalCount();
}
v18 = null;
}
} else {
// split was already assigned
v18 = null;
}
if (elementCycleCount ==
0) {
// one full cycle, but no split with min local count found
// update minLocalCnt and element cycle count for next pass over the splits
minLocalCount = nextMinLocalCount;
nextMinLocalCount = -1;
elementCycleCount = splits.size();
}
if (v18 != null) {
// found a split to assign
return v18;
}
} while
(elementCycleCount > 0 );
// no split left
return null;
} | 3.26 |
flink_LocalityAwareSplitAssigner_addInputSplit_rdh | /**
* Adds a single input split.
*/
void addInputSplit(SplitWithInfo split) {
int localCount = split.getLocalCount();
if (minLocalCount == (-1)) {
// first split to add
this.minLocalCount
= localCount;
this.elementCycleCount = 1;
this.splits.offerFirst(split);
} else if (localCount < minLocalCount) {
// split with new min local count
this.nextMinLocalCount = this.minLocalCount;
this.minLocalCount = localCount;
// all other splits have more local host than this one
this.elementCycleCount = 1;splits.offerFirst(split);
} else if (localCount ==
minLocalCount) {
this.elementCycleCount++;
this.splits.offerFirst(split);
} else {
if (localCount < nextMinLocalCount) {
nextMinLocalCount = localCount;
}splits.offerLast(split);
}
} | 3.26 |
flink_ProcessFunction_onTimer_rdh | /**
* Called when a timer set using {@link TimerService} fires.
*
* @param timestamp
* The timestamp of the firing timer.
* @param ctx
* An {@link OnTimerContext} that allows querying the timestamp of the firing timer,
* querying the {@link TimeDomain} of the firing timer and getting a {@link TimerService}
* for registering timers and querying the time. The context is only valid during the
* invocation of this method, do not store it.
* @param out
* The collector for returning result values.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
public void onTimer(long timestamp, OnTimerContext ctx, Collector<O> out) throws Exception {
} | 3.26 |
flink_ChainedReduceCombineDriver_m0_rdh | // ------------------------------------------------------------------------
@Override
public Function m0() {
return reducer;
} | 3.26 |
flink_NettyShuffleMetricFactory_registerLegacyNetworkMetrics_rdh | /**
* Registers legacy network metric groups before shuffle service refactoring.
*
* <p>Registers legacy metric groups if shuffle service implementation is original default one.
*
* @deprecated should be removed in future
*/
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public static void registerLegacyNetworkMetrics(boolean isDetailedMetrics, MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) {
checkNotNull(metricGroup);
checkNotNull(producedPartitions);
checkNotNull(inputGates);
// add metrics for buffers
final MetricGroup buffersGroup = metricGroup.addGroup(METRIC_GROUP_BUFFERS_DEPRECATED);
// similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup
// (metricGroup)
final MetricGroup networkGroup = metricGroup.addGroup(METRIC_GROUP_NETWORK_DEPRECATED);
final MetricGroup outputGroup = networkGroup.addGroup(METRIC_GROUP_OUTPUT);
final MetricGroup inputGroup = networkGroup.addGroup(METRIC_GROUP_INPUT);
ResultPartition[] resultPartitions = Arrays.copyOf(producedPartitions,
producedPartitions.length, ResultPartition[].class);
registerOutputMetrics(isDetailedMetrics, outputGroup, buffersGroup, resultPartitions);
SingleInputGate[] singleInputGates = Arrays.copyOf(inputGates, inputGates.length, SingleInputGate[].class);
registerInputMetrics(isDetailedMetrics, inputGroup, buffersGroup, singleInputGates);
} | 3.26 |
flink_YearMonthIntervalType_getResolutionFormat_rdh | // --------------------------------------------------------------------------------------------
private String getResolutionFormat() {
switch (resolution) {
case YEAR :
return YEAR_FORMAT;
case YEAR_TO_MONTH :
return YEAR_TO_MONTH_FORMAT;
case MONTH :
return MONTH_FORMAT;
default :
throw new UnsupportedOperationException();
}
} | 3.26 |
flink_RexNodeJsonDeserializer_deserializeSqlOperator_rdh | // --------------------------------------------------------------------------------------------
/**
* Logic shared with {@link AggregateCallJsonDeserializer}.
*/
static SqlOperator deserializeSqlOperator(JsonNode jsonNode, SerdeContext serdeContext) {
final SqlSyntax syntax;
if (jsonNode.has(FIELD_NAME_SYNTAX)) {
syntax = serializableToCalcite(SqlSyntax.class, jsonNode.required(FIELD_NAME_SYNTAX).asText());
} else {
syntax = SqlSyntax.FUNCTION;
}
if (jsonNode.has(FIELD_NAME_INTERNAL_NAME)) {
return deserializeInternalFunction(jsonNode.required(FIELD_NAME_INTERNAL_NAME).asText(), syntax, serdeContext);
} else if (jsonNode.has(FIELD_NAME_CATALOG_NAME)) { return deserializeCatalogFunction(jsonNode, syntax, serdeContext);
} else if (jsonNode.has(FIELD_NAME_CLASS)) {
return deserializeFunctionClass(jsonNode, serdeContext);
} else if (jsonNode.has(FIELD_NAME_SYSTEM_NAME)) {
return deserializeSystemFunction(jsonNode.required(FIELD_NAME_SYSTEM_NAME).asText(), syntax, serdeContext);
} else if (jsonNode.has(FIELD_NAME_SQL_KIND)) {
return deserializeInternalFunction(syntax, SqlKind.valueOf(jsonNode.get(FIELD_NAME_SQL_KIND).asText()));
} else {
throw new TableException("Invalid function call.");
}
} | 3.26 |
flink_IterationSynchronizationSinkTask_invoke_rdh | // --------------------------------------------------------------------------------------------
@Override
public void invoke() throws Exception {
this.headEventReader = new MutableRecordReader<>(getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories());
TaskConfig taskConfig = new TaskConfig(getTaskConfiguration());
// store all aggregators
this.aggregators = new HashMap<>();
for (AggregatorWithName<?> aggWithName : taskConfig.getIterationAggregators(getUserCodeClassLoader())) {aggregators.put(aggWithName.getName(), aggWithName.getAggregator());
}
// store the aggregator convergence criterion
if (taskConfig.usesConvergenceCriterion()) {
convergenceCriterion = taskConfig.getConvergenceCriterion(getUserCodeClassLoader());
convergenceAggregatorName = taskConfig.getConvergenceCriterionAggregatorName();
Preconditions.checkNotNull(convergenceAggregatorName);
}
// store the default aggregator convergence criterion
if (taskConfig.usesImplicitConvergenceCriterion()) {
implicitConvergenceCriterion = taskConfig.getImplicitConvergenceCriterion(getUserCodeClassLoader());
implicitConvergenceAggregatorName = taskConfig.getImplicitConvergenceCriterionAggregatorName();
Preconditions.checkNotNull(implicitConvergenceAggregatorName);}
maxNumberOfIterations = taskConfig.getNumberOfIterations();
// set up the event handler
int numEventsTillEndOfSuperstep = taskConfig.getNumberOfEventsUntilInterruptInIterativeGate(0);
eventHandler = new SyncEventHandler(numEventsTillEndOfSuperstep, aggregators, getEnvironment().getUserCodeClassLoader().asClassLoader());
headEventReader.registerTaskEventListener(eventHandler, WorkerDoneEvent.class);
IntValue dummy = new IntValue();
while (!terminationRequested()) {
if (log.isInfoEnabled()) {
log.info(formatLogString(("starting iteration [" + currentIteration) + "]"));}
// this call listens for events until the end-of-superstep is reached
readHeadEventChannel(dummy);
if (log.isInfoEnabled()) {
log.info(formatLogString(("finishing iteration [" +
currentIteration) + "]"));
}
if (checkForConvergence()) {
if (log.isInfoEnabled()) {
log.info(formatLogString(("signaling that all workers are to terminate in iteration [" + currentIteration) + "]"));
}
requestTermination();
sendToAllWorkers(new TerminationEvent());
} else {
if (log.isInfoEnabled()) {
log.info(formatLogString(("signaling that all workers are done in iteration [" + currentIteration) + "]"));
}
AllWorkersDoneEvent allWorkersDoneEvent = new AllWorkersDoneEvent(aggregators);
sendToAllWorkers(allWorkersDoneEvent);
// reset all aggregators
for (Aggregator<?> agg : aggregators.values()) {
agg.reset();}
currentIteration++;
}
}
} | 3.26 |
flink_IterationSynchronizationSinkTask_terminationRequested_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean terminationRequested() {
return terminated.get();
} | 3.26 |
flink_Watermark_getTimestamp_rdh | /**
* Returns the timestamp associated with this {@link Watermark} in milliseconds.
*/
public long getTimestamp() {
return timestamp;
} | 3.26 |
flink_Watermark_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
return (this == o) || (((o != null) && (o.getClass() == Watermark.class)) && (((Watermark) (o)).timestamp == timestamp));
} | 3.26 |
flink_GeneratingIteratorSourceReader_convert_rdh | // ------------------------------------------------------------------------
@Override
protected O convert(E value) {
try {
return generatorFunction.map(value);
} catch (Exception e) {
String message = String.format("A user-provided generator function threw an exception on this input: %s",
value.toString());
throw new FlinkRuntimeException(message, e);
}
} | 3.26 |
flink_PartitionCommitPolicy_validatePolicyChain_rdh | /**
* Validate commit policy.
*/
static void
validatePolicyChain(boolean isEmptyMetastore, String policyKind) {
if (policyKind != null) {
String[] policyStrings
= policyKind.split(",");
for (String policy : policyStrings) {if (isEmptyMetastore && METASTORE.equalsIgnoreCase(policy)) {
throw new ValidationException(("Can not configure a 'metastore' partition commit" + " policy for a file system table. You can only configure 'metastore'") + " partition commit policy for a hive table.");
}
}
} } | 3.26 |
flink_StringUtils_byteToHexString_rdh | /**
* Given an array of bytes it will convert the bytes to a hex string representation of the
* bytes.
*
* @param bytes
* the bytes to convert in a hex string
* @return hex string representation of the byte array
*/ public static String byteToHexString(final byte[] bytes) {
return byteToHexString(bytes, 0, bytes.length);
} | 3.26 |
flink_StringUtils_readString_rdh | /**
* Reads a non-null String from the given input.
*
* @param in
* The input to read from
* @return The deserialized String
* @throws IOException
* Thrown, if the reading or the deserialization fails.
*/
public static String readString(DataInputView in) throws IOException {
return StringValue.readString(in);
} | 3.26 |
flink_StringUtils_writeString_rdh | /**
* Writes a String to the given output. The written string can be read with {@link #readString(DataInputView)}.
*
* @param str
* The string to write
* @param out
* The output to write to
* @throws IOException
* Thrown, if the writing or the serialization fails.
*/
public static void writeString(@Nonnull
String str, DataOutputView out) throws IOException {
checkNotNull(str);
StringValue.writeString(str, out);
} | 3.26 |
flink_StringUtils_readNullableString_rdh | /**
* Reads a String from the given input. The string may be null and must have been written with
* {@link #writeNullableString(String, DataOutputView)}.
*
* @param in
* The input to read from.
* @return The deserialized string, or null.
* @throws IOException
* Thrown, if the reading or the deserialization fails.
*/
@Nullable
public static String readNullableString(DataInputView
in) throws IOException {
if (in.readBoolean()) {
return readString(in);
} else {
return null;
}
} | 3.26 |
flink_StringUtils_isNullOrWhitespaceOnly_rdh | /**
* Checks if the string is null, empty, or contains only whitespace characters. A whitespace
* character is defined via {@link Character#isWhitespace(char)}.
*
* @param str
* The string to check
* @return True, if the string is null or blank, false otherwise.
*/
public static boolean isNullOrWhitespaceOnly(String str) {
if ((str == null) || (str.length() == 0)) {
return true;
}
final
int len = str.length();
for (int i = 0; i < len; i++) {
if (!Character.isWhitespace(str.charAt(i))) {
return false;}
}
return true;
} | 3.26 |
flink_StringUtils_toQuotedListString_rdh | /**
* Generates a string containing a comma-separated list of values in double-quotes. Uses
* lower-cased values returned from {@link Object#toString()} method for each element in the
* given array. Null values are skipped.
*
* @param values
* array of elements for the list
* @return The string with quoted list of elements
*/public static String toQuotedListString(Object[] values) {
return Arrays.stream(values).filter(Objects::nonNull).map(v -> v.toString().toLowerCase()).collect(Collectors.joining(", ", "\"", "\""));
} | 3.26 |
flink_StringUtils_showControlCharacters_rdh | /**
* Replaces control characters by their escape-coded version. For example, if the string
* contains a line break character ('\n'), this character will be replaced by the two characters
* backslash '\' and 'n'. As a consequence, the resulting string will not contain any more
* control characters.
*
* @param str
* The string in which to replace the control characters.
* @return The string with the replaced characters.
*/
public static String showControlCharacters(String str) {
int len = str.length();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < len; i += 1) {
char c = str.charAt(i);
switch (c) {
case '\b' :sb.append("\\b");
break;
case '\t' :
sb.append("\\t");
break;case '\n' :
sb.append("\\n");
break;
case '\f' :
sb.append("\\f");
break;
case '\r' :sb.append("\\r");
break;
default :
sb.append(c);
}
}
return sb.toString();
} | 3.26 |
flink_StringUtils_writeNullableString_rdh | /**
* Writes a String to the given output. The string may be null. The written string can be read
* with {@link #readNullableString(DataInputView)}-
*
* @param str
* The string to write, or null.
* @param out
* The output to write to.
* @throws IOException
* Thrown, if the writing or the serialization fails.
*/
public static void writeNullableString(@Nullable
String str, DataOutputView out) throws IOException {
if (str != null) {
out.writeBoolean(true);
writeString(str, out);
} else {
out.writeBoolean(false);
}
} | 3.26 |
flink_StringUtils_getRandomString_rdh | /**
* Creates a random string with a length within the given interval. The string contains only
* characters that can be represented as a single code point.
*
* @param rnd
* The random used to create the strings.
* @param minLength
* The minimum string length.
* @param maxLength
* The maximum string length (inclusive).
* @param minValue
* The minimum character value to occur.
* @param maxValue
* The maximum character value to occur.
* @return A random String.
*/
public static String getRandomString(Random rnd, int minLength, int maxLength, char minValue, char maxValue) {int len = rnd.nextInt((maxLength - minLength) + 1) + minLength;
char[] data = new char[len];
int diff = (maxValue - minValue) + 1;
for (int i = 0; i < data.length; i++) {
data[i] = ((char) (rnd.nextInt(diff) + minValue));
}
return new String(data);
} | 3.26 |
flink_StaticFileServerHandler_checkFileValidity_rdh | /**
* Checks various conditions for file access. If all checks pass this method returns, and
* processing of the request may continue. If any check fails this method throws a {@link RestHandlerException}, and further processing of the request must be limited to sending an
* error response.
*/
public static void checkFileValidity(File file, File rootPath, Logger logger) throws IOException, RestHandlerException {
// this check must be done first to prevent probing for arbitrary files
if (!file.getCanonicalFile().toPath().startsWith(rootPath.toPath())) {
if (logger.isDebugEnabled()) {logger.debug("Requested path {} points outside the root directory.", file.getAbsolutePath());
}
throw new RestHandlerException("Forbidden.", FORBIDDEN);}
if ((!file.exists()) || file.isHidden())
{
if (logger.isDebugEnabled()) {
logger.debug("Requested path {} cannot be found.", file.getAbsolutePath());
}
throw new RestHandlerException("File not found.", NOT_FOUND);
}
if (file.isDirectory() || (!file.isFile())) { if (logger.isDebugEnabled()) {
logger.debug("Requested path {} does not point to a file.", file.getAbsolutePath());
}
throw new RestHandlerException("File not found.", METHOD_NOT_ALLOWED);
}
} | 3.26 |
flink_StaticFileServerHandler_sendNotModified_rdh | // ------------------------------------------------------------------------
// Utilities to encode headers and responses
// ------------------------------------------------------------------------
/**
* Send the "304 Not Modified" response. This response can be used when the file timestamp is
* the same as what the browser is sending up.
*
* @param ctx
* The channel context to write the response to.
*/
public static void sendNotModified(ChannelHandlerContext ctx)
{
FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, NOT_MODIFIED);
setDateHeader(response);
// close the connection as soon as the error message is sent.
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
} | 3.26 |
flink_StaticFileServerHandler_respondToRequest_rdh | /**
* Response when running with leading JobManager.
*/
private void respondToRequest(ChannelHandlerContext ctx, HttpRequest request, String requestPath) throws IOException, ParseException, URISyntaxException, RestHandlerException {
// convert to absolute path
final File file = new File(rootPath, requestPath);
if (!file.exists()) {
// file does not exist. Try to load it with the classloader
ClassLoader cl = StaticFileServerHandler.class.getClassLoader();
try (InputStream resourceStream = cl.getResourceAsStream("web" + requestPath)) {
boolean success = false;
try {
if (resourceStream != null) {
URL root = cl.getResource("web");
URL requested = cl.getResource("web" + requestPath);
if ((root != null) && (requested != null)) { URI rootURI = new URI(root.getPath()).normalize();
URI v9 = new URI(requested.getPath()).normalize();
// Check that we don't load anything from outside of the
// expected scope.
if (!rootURI.relativize(v9).equals(v9)) {
logger.debug("Loading missing file from classloader: {}", requestPath);
// ensure that directory to file exists.
file.getParentFile().mkdirs();
Files.copy(resourceStream, file.toPath());
success = true;}
}
}
} catch (Throwable t) {logger.error("error while responding", t);
} finally {
if (!success) {
logger.debug("Unable to load requested file {} from classloader", requestPath);
throw new NotFoundException(String.format("Unable to load requested file %s.", requestPath));
}
}
}
}
checkFileValidity(file, rootPath, logger);
// cache validation
final String ifModifiedSince = request.headers().get(IF_MODIFIED_SINCE);
if ((ifModifiedSince != null) && (!ifModifiedSince.isEmpty())) {
SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US);
Date ifModifiedSinceDate = dateFormatter.parse(ifModifiedSince);
// Only compare up to the second because the datetime format we send to the client
// does not have milliseconds
long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() / 1000;
long fileLastModifiedSeconds = file.lastModified() / 1000;
if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) {
if (logger.isDebugEnabled()) {
logger.debug(("Responding 'NOT MODIFIED' for file '" + file.getAbsolutePath()) + '\'');
} sendNotModified(ctx);
return;
}
}
if (logger.isDebugEnabled()) {
logger.debug(("Responding with file '" + file.getAbsolutePath()) + '\'');
}
// Don't need to close this manually. Netty's DefaultFileRegion will take care of it.
final RandomAccessFile raf;
try {
raf = new RandomAccessFile(file, "r");
} catch (FileNotFoundException e) {
if (logger.isDebugEnabled()) {
logger.debug("Could not find file {}.", file.getAbsolutePath());
}
throw new
NotFoundException("File not found.");}
try {
long fileLength = raf.length();
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
setContentTypeHeader(response, file);
setDateAndCacheHeaders(response, file);
if (HttpUtil.isKeepAlive(request)) {
response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
HttpUtil.setContentLength(response, fileLength);
// write the initial line and the header.
ctx.write(response);
// write the content.
ChannelFuture lastContentFuture;
if (ctx.pipeline().get(SslHandler.class) == null) {
ctx.write(new DefaultFileRegion(raf.getChannel(), 0, fileLength), ctx.newProgressivePromise());
lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)), ctx.newProgressivePromise());
// HttpChunkedInput will write the end marker (LastHttpContent) for us.
}
// close the connection, if no keep-alive is needed
if (!HttpUtil.isKeepAlive(request)) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
} catch (Exception e) {
raf.close();
logger.error("Failed to serve file.", e);
throw new RestHandlerException("Internal server error.", INTERNAL_SERVER_ERROR);
}
} | 3.26 |
flink_StaticFileServerHandler_setContentTypeHeader_rdh | /**
* Sets the content type header for the HTTP Response.
*
* @param response
* HTTP response
* @param file
* file to extract content type
*/
public static void setContentTypeHeader(HttpResponse response, File file) {
String mimeType = MimeTypes.getMimeTypeForFileName(file.getName());
String mimeFinal = (mimeType != null) ? mimeType : MimeTypes.getDefaultMimeType();
response.headers().set(CONTENT_TYPE, mimeFinal);
} | 3.26 |
flink_StaticFileServerHandler_setDateAndCacheHeaders_rdh | /**
* Sets the "date" and "cache" headers for the HTTP Response.
*
* @param response
* The HTTP response object.
* @param fileToCache
* File to extract the modification timestamp from.
*/ public static void setDateAndCacheHeaders(HttpResponse
response, File fileToCache) {
SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US);
dateFormatter.setTimeZone(GMT_TIMEZONE);
// date header
Calendar time = new GregorianCalendar();
response.headers().set(DATE, dateFormatter.format(time.getTime()));
// cache headers
time.add(Calendar.SECOND, HTTP_CACHE_SECONDS);
response.headers().set(EXPIRES, dateFormatter.format(time.getTime()));
response.headers().set(CACHE_CONTROL, "private, max-age=" + HTTP_CACHE_SECONDS);
response.headers().set(LAST_MODIFIED, dateFormatter.format(new Date(fileToCache.lastModified())));
} | 3.26 |
flink_StaticFileServerHandler_respondAsLeader_rdh | // ------------------------------------------------------------------------
// Responses to requests
// ------------------------------------------------------------------------
@Override
protected void respondAsLeader(ChannelHandlerContext channelHandlerContext, RoutedRequest routedRequest, T gateway) throws Exception {
final HttpRequest request = routedRequest.getRequest();final String requestPath;
// make sure we request the "index.html" in case there is a directory request
if (routedRequest.getPath().endsWith("/")) {
requestPath = routedRequest.getPath() + "index.html";
} else {
requestPath = routedRequest.getPath();
}
try {
respondToRequest(channelHandlerContext, request,
requestPath);
} catch (RestHandlerException rhe) {
HandlerUtils.sendErrorResponse(channelHandlerContext,
routedRequest.getRequest(), new ErrorResponseBody(rhe.getMessage()), rhe.getHttpResponseStatus(), responseHeaders);
}
} | 3.26 |
flink_StaticFileServerHandler_setDateHeader_rdh | /**
* Sets the "date" header for the HTTP response.
*
* @param response
* HTTP response
*/
public static void setDateHeader(FullHttpResponse response) {
SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US);
dateFormatter.setTimeZone(GMT_TIMEZONE);
Calendar time = new GregorianCalendar();
response.headers().set(DATE, dateFormatter.format(time.getTime()));
} | 3.26 |
flink_WritableTypeInfo_getWritableTypeInfo_rdh | // --------------------------------------------------------------------------------------------
@PublicEvolving
static <T
extends Writable> TypeInformation<T> getWritableTypeInfo(Class<T> typeClass) {
if (Writable.class.isAssignableFrom(typeClass) && (!typeClass.equals(Writable.class))) {
return new WritableTypeInfo<T>(typeClass);
} else {
throw new InvalidTypesException("The given class is no subclass of " +
Writable.class.getName());
}
} | 3.26 |
flink_ContextEnvironment_setAsContext_rdh | // --------------------------------------------------------------------------------------------
public static void setAsContext(final PipelineExecutorServiceLoader executorServiceLoader, final Configuration configuration, final ClassLoader userCodeClassLoader, final boolean enforceSingleJobExecution, final boolean suppressSysout) {
ExecutionEnvironmentFactory factory = () -> new ContextEnvironment(executorServiceLoader, configuration, userCodeClassLoader, enforceSingleJobExecution, suppressSysout);
initializeContextEnvironment(factory);
} | 3.26 |
flink_PlannerTypeInferenceUtilImpl_getValidationErrorMessage_rdh | /**
* Return the validation error message of this {@link PlannerExpression} or return the
* validation error message of it's children if it passes the validation. Return empty if all
* validation succeeded.
*/
private Optional<String> getValidationErrorMessage(PlannerExpression plannerCall) {
ValidationResult validationResult = plannerCall.validateInput();
if (validationResult instanceof ValidationFailure) {
return Optional.of(((ValidationFailure) (validationResult)).message());
} else {
for (Expression plannerExpression : plannerCall.getChildren()) {
Optional<String> errorMessage = getValidationErrorMessage(((PlannerExpression) (plannerExpression)));
if (errorMessage.isPresent()) {
return errorMessage;
}
}
}
return Optional.empty();
} | 3.26 |
flink_RocksDBMapState_get_rdh | // ------------------------------------------------------------------------
// MapState Implementation
// ------------------------------------------------------------------------
@Overridepublic UV get(UK userKey) throws IOException, RocksDBException {
byte[] rawKeyBytes = serializeCurrentKeyWithGroupAndNamespacePlusUserKey(userKey, userKeySerializer);
byte[] rawValueBytes = backend.db.get(columnFamily, rawKeyBytes);
return rawValueBytes == null ? null : deserializeUserValue(dataInputView, rawValueBytes, userValueSerializer);
} | 3.26 |
flink_RocksDBMapState_deserializeUserKey_rdh | // ------------------------------------------------------------------------
// Serialization Methods
// ------------------------------------------------------------------------
private static <UK> UK deserializeUserKey(DataInputDeserializer dataInputView, int userKeyOffset, byte[] rawKeyBytes, TypeSerializer<UK> keySerializer) throws IOException {
dataInputView.setBuffer(rawKeyBytes, userKeyOffset, rawKeyBytes.length -
userKeyOffset);
return keySerializer.deserialize(dataInputView);
} | 3.26 |
flink_TaskExecutorLocalStateStoresManager_retainLocalStateForAllocations_rdh | /**
* Retains the given set of allocations. All other allocations will be released.
*
* @param allocationsToRetain
*/
public void retainLocalStateForAllocations(Set<AllocationID> allocationsToRetain) {
final Collection<AllocationID> allocationIds = findStoredAllocations();
allocationIds.stream().filter(allocationId -> !allocationsToRetain.contains(allocationId)).forEach(this::releaseLocalStateForAllocationId);
} | 3.26 |
flink_TaskExecutorLocalStateStoresManager_cleanupAllocationBaseDirs_rdh | /**
* Deletes the base dirs for this allocation id (recursively).
*/
private void cleanupAllocationBaseDirs(AllocationID allocationID) {
// clear the base dirs for this allocation id.
File[] allocationDirectories = allocationBaseDirectories(allocationID);
for (File directory : allocationDirectories) {
try {
FileUtils.deleteFileOrDirectory(directory);
} catch (IOException e) {
LOG.warn("Exception while deleting local state directory for allocation id {}.", allocationID, e);
}
}
} | 3.26 |
flink_GenericMetricGroup_makeScopeComponents_rdh | // ------------------------------------------------------------------------
private static String[] makeScopeComponents(AbstractMetricGroup parent, String name) {
if (parent != null) {
String[] parentComponents = parent.getScopeComponents();
if ((parentComponents != null) && (parentComponents.length > 0)) { String[]
parts = new String[parentComponents.length + 1];System.arraycopy(parentComponents, 0, parts, 0, parentComponents.length);
parts[parts.length - 1] = name;
return parts;
}
}
return new String[]{ name };
} | 3.26 |
flink_AbstractBytesHashMap_appendRecord_rdh | // ----------------------- Append -----------------------
public int appendRecord(LookupInfo<K, BinaryRowData> lookupInfo, BinaryRowData value) throws IOException {
final long oldLastPosition = outView.getCurrentOffset();
// serialize the key into the BytesHashMap record area
int skip = keySerializer.serializeToPages(lookupInfo.getKey(), outView);
long v7 = oldLastPosition + skip;
// serialize the value into the BytesHashMap record area
valueSerializer.serializeToPages(value, outView);
if (v7 > Integer.MAX_VALUE) {
LOG.warn("We can't handle key area with more than Integer.MAX_VALUE bytes," + " because the pointer is a integer.");
throw new EOFException();
}
return ((int) (v7));
} | 3.26 |
flink_AbstractBytesHashMap_free_rdh | /**
*
* @param reservedRecordMemory
* reserved fixed memory or not.
*/
public void free(boolean reservedRecordMemory) {
recordArea.release();
destructiveIterator = null;
super.free(reservedRecordMemory);
} | 3.26 |
flink_AbstractBytesHashMap_getVariableLength_rdh | // ----------------------- Private methods -----------------------
static int getVariableLength(LogicalType[] types) {
int length = 0;
for (LogicalType type : types) {
if (!BinaryRowData.isInFixedLengthPart(type)) {
// find a better way of computing generic type field variable-length
// right now we use a small value assumption
length += 16;
}
}
return length;
} | 3.26 |
flink_AbstractBytesHashMap_m0_rdh | /**
* Returns an iterator for iterating over the entries of this map.
*/
@SuppressWarnings("WeakerAccess")
public KeyValueIterator<K, BinaryRowData> m0(boolean requiresCopy) {
if (destructiveIterator != null) {
throw new IllegalArgumentException("DestructiveIterator is not null, so this method can't be invoke!");
}
return ((RecordArea)
(recordArea)).entryIterator(requiresCopy);
} | 3.26 |
flink_AbstractBytesHashMap_getNumKeys_rdh | // ----------------------- Abstract Interface -----------------------
@Override
public long getNumKeys() {
return numElements;
} | 3.26 |
flink_AbstractBytesHashMap_reset_rdh | /**
* reset the map's record and bucket area's memory segments for reusing.
*/
public void reset() {
// reset the record segments.
recordArea.reset();
destructiveIterator = null;
super.reset();
} | 3.26 |
flink_AbstractBytesHashMap_entryIterator_rdh | // ----------------------- Iterator -----------------------
private KeyValueIterator<K, BinaryRowData> entryIterator(boolean requiresCopy) {
return new EntryIterator(requiresCopy);
} | 3.26 |
flink_HiveGenericUDTF_setCollector_rdh | // Will only take effect after calling open()
@VisibleForTesting
protected final void setCollector(Collector collector) {
function.setCollector(collector);
} | 3.26 |
flink_TimestampsAndWatermarks_createMainOutput_rdh | /**
* Creates the ReaderOutput for the source reader, than internally runs the timestamp extraction
* and watermark generation.
*/
ReaderOutput<T> createMainOutput(PushingAsyncDataInput.DataOutput<T> output, WatermarkUpdateListener watermarkCallback) {
} | 3.26 |
flink_TimestampsAndWatermarks_createProgressiveEventTimeLogic_rdh | // ------------------------------------------------------------------------
// factories
// ------------------------------------------------------------------------
static <E> TimestampsAndWatermarks<E> createProgressiveEventTimeLogic(WatermarkStrategy<E> watermarkStrategy, MetricGroup metrics, ProcessingTimeService timeService, long periodicWatermarkIntervalMillis) {
final TimestampsAndWatermarksContext context = new TimestampsAndWatermarksContext(metrics);
final TimestampAssigner<E> timestampAssigner = watermarkStrategy.createTimestampAssigner(context);
return new ProgressiveTimestampsAndWatermarks<>(timestampAssigner, watermarkStrategy, context, timeService, Duration.ofMillis(periodicWatermarkIntervalMillis));
} | 3.26 |
flink_TaskManagerSlotInformation_isMatchingRequirement_rdh | /**
* Returns true if the required {@link ResourceProfile} can be fulfilled by this slot.
*
* @param required
* resources
* @return true if the this slot can fulfill the resource requirements
*/
default boolean isMatchingRequirement(ResourceProfile required) {
return getResourceProfile().isMatching(required);
} | 3.26 |
flink_TaskMetricGroup_putVariables_rdh | // ------------------------------------------------------------------------
// Component Metric Group Specifics
// ------------------------------------------------------------------------
@Override
protected void putVariables(Map<String,
String> variables) {
variables.put(ScopeFormat.SCOPE_TASK_VERTEX_ID, vertexId.toString());
variables.put(ScopeFormat.SCOPE_TASK_NAME, taskName);
variables.put(ScopeFormat.SCOPE_TASK_ATTEMPT_ID, executionId.toString());
variables.put(ScopeFormat.SCOPE_TASK_ATTEMPT_NUM, String.valueOf(attemptNumber));
variables.put(ScopeFormat.SCOPE_TASK_SUBTASK_INDEX, String.valueOf(f0));
} | 3.26 |
flink_TaskMetricGroup_parent_rdh | // ------------------------------------------------------------------------
// properties
// ------------------------------------------------------------------------
public final TaskManagerJobMetricGroup parent() {
return parent;
} | 3.26 |
flink_TaskMetricGroup_getOrAddOperator_rdh | // operators and cleanup
// ------------------------------------------------------------------------
public InternalOperatorMetricGroup getOrAddOperator(String operatorName) {
return getOrAddOperator(OperatorID.fromJobVertexID(vertexId), operatorName);
} | 3.26 |
flink_NetworkBufferAllocator_allocatePooledNetworkBuffer_rdh | /**
* Allocates a pooled network buffer for the specific input channel.
*
* @param receiverId
* The id of the requested input channel.
* @return The pooled network buffer.
*/
@Nullable
Buffer
allocatePooledNetworkBuffer(InputChannelID receiverId) {
Buffer buffer = null;
RemoteInputChannel inputChannel = networkClientHandler.getInputChannel(receiverId);
// If the input channel has been released, we cannot allocate buffer and the received
// message
// will be discarded.
if (inputChannel
!= null) {
buffer = inputChannel.requestBuffer();
}
return buffer;} | 3.26 |
flink_NetworkBufferAllocator_allocateUnPooledNetworkBuffer_rdh | /**
* Allocates an un-pooled network buffer with the specific size.
*
* @param size
* The requested buffer size.
* @param dataType
* The data type this buffer represents.
* @return The un-pooled network buffer.
*/
Buffer allocateUnPooledNetworkBuffer(int size, Buffer.DataType dataType) {
checkArgument(size > 0, "Illegal buffer size, must be positive.");
byte[] byteArray = new byte[size];
MemorySegment memSeg = MemorySegmentFactory.wrap(byteArray); return new NetworkBuffer(memSeg, FreeingBufferRecycler.INSTANCE, dataType);
} | 3.26 |
flink_ClearJoinHintsWithInvalidPropagationShuttle_getInvalidJoinHint_rdh | /**
* Get the invalid join hint in this node.
*
* <p>The invalid join meets the following requirement:
*
* <p>1. This hint name is same with the join hint that needs to be removed
*
* <p>2.The length of this hint should be same with the length of propagating this removed
* join hint.
*
* <p>3. The inherited path of this hint should match the inherited path of this removed
* join hint.
*
* @param hints
* all hints
* @return return the invalid join hint if exists, else return empty
*/
private Optional<RelHint> getInvalidJoinHint(List<RelHint> hints) {
for (RelHint hint : hints) {
if (hint.hintName.equals(joinHintNeedRemove.hintName) && isMatchInvalidInheritPath(new ArrayList<>(currentInheritPath), hint.inheritPath)) {
return Optional.of(hint);
}}
return Optional.empty();
} | 3.26 |
flink_BinaryExternalSorter_run_rdh | /**
* Implements exception handling and delegates to go().
*/
public void run() {
try {
go();
} catch (Throwable t) {
internalHandleException(new IOException((("Thread '" + getName()) +
"' terminated due to an exception: ") + t.getMessage(), t));}
} | 3.26 |
flink_BinaryExternalSorter_setResultIterator_rdh | // ------------------------------------------------------------------------
// Inter-Thread Communication
// ------------------------------------------------------------------------
/**
* Sets the result iterator. By setting the result iterator, all threads that are waiting for
* the result iterator are notified and will obtain it.
*
* @param iterator
* The result iterator to set.
*/
private void setResultIterator(MutableObjectIterator<BinaryRowData> iterator) {synchronized(this.iteratorLock) {// set the result iterator only, if no exception has occurred
if (this.iteratorException == null) {
this.iterator = iterator;
this.iteratorLock.notifyAll();
}}
} | 3.26 |
flink_BinaryExternalSorter_go_rdh | /**
* Entry point of the thread.
*/
public void go() throws IOException {
final Queue<CircularElement> cache = new ArrayDeque<>();
CircularElement element;
boolean cacheOnly = false;
// ------------------- In-Memory Cache ------------------------
// fill cache
while (isRunning()) {
// take next currWriteBuffer from queue
try {
element = this.f2.spill.take();
} catch (InterruptedException iex) {
throw new IOException("The spilling thread was interrupted.");
}
if (element == SPILLING_MARKER) {
break;
} else if (element == EOF_MARKER) {
cacheOnly = true;
break;
}
cache.add(element);
}
// check whether the thread was canceled
if (!isRunning()) {
return; }// ------------------- In-Memory Merge ------------------------
if (cacheOnly) {List<MutableObjectIterator<BinaryRowData>> iterators = new ArrayList<>(cache.size());
for (CircularElement cached : cache) {
iterators.add(cached.buffer.getIterator());
}
// set lazy iterator
List<BinaryRowData> reusableEntries = new ArrayList<>();
for (int i = 0; i < iterators.size(); i++) {
reusableEntries.add(serializer.createInstance());
}
setResultIterator(iterators.isEmpty() ? EmptyMutableObjectIterator.get() : iterators.size() == 1 ? iterators.get(0) : new BinaryMergeIterator<>(iterators, reusableEntries, comparator::compare));
releaseEmptyBuffers();
// signal merging thread to exit (because there is nothing to merge externally)
this.f2.merge.add(FINAL_MERGE_MARKER);
return;
}
// ------------------- Spilling Phase ------------------------
final FileIOChannel.Enumerator enumerator = this.ioManager.createChannelEnumerator();
// loop as long as the thread is marked alive and we do not see the final
// currWriteBuffer
while (isRunning()) {
try { element = (cache.isEmpty()) ? f2.spill.take() : cache.poll();
} catch (InterruptedException iex) {
if (isRunning()) {LOG.error("Spilling thread was interrupted (without being shut down) while grabbing a buffer. " +
"Retrying to grab buffer...");
continue;
} else {
return;
}
}
// check if we are still running
if (!isRunning()) {
return;
}
// check if this is the end-of-work buffer
if (element == EOF_MARKER) {
break;
}
if (element.buffer.getOccupancy() > 0) {// open next channel
FileIOChannel.ID channel = enumerator.next();
channelManager.addChannel(channel);
AbstractChannelWriterOutputView output = null;
int bytesInLastBuffer;
int blockCount;
try {
numSpillFiles++;
output = FileChannelUtil.createOutputView(ioManager, channel, compressionEnabled, compressionCodecFactory, compressionBlockSize, memorySegmentSize);
element.buffer.writeToOutput(output);
spillInBytes += output.getNumBytes();
spillInCompressedBytes += output.getNumCompressedBytes();
bytesInLastBuffer = output.close();
blockCount = output.getBlockCount();
LOG.info("here spill the {}th sort buffer data with {} bytes and {} compressed bytes", numSpillFiles, spillInBytes, spillInCompressedBytes);
} catch (IOException e) {
if (output != null) {
output.close();
output.getChannel().deleteChannel();}
throw e;
}
// pass spill file meta to merging thread
this.f2.merge.add(new ChannelWithMeta(channel, blockCount, bytesInLastBuffer));
}
// pass empty sort-buffer to reading thread
element.buffer.reset();this.f2.empty.add(element);
}
// clear the sort buffers, as both sorting and spilling threads are done.
releaseSortMemory();
// signal merging thread to begin the final merge
this.f2.merge.add(FINAL_MERGE_MARKER);
// Spilling thread done.
} | 3.26 |
flink_BinaryExternalSorter_internalHandleException_rdh | /**
* Internally handles an exception and makes sure that this method returns without a
* problem.
*
* @param ioex
* The exception to handle.
*/
private void internalHandleException(IOException ioex) {
if (!isRunning()) {
// discard any exception that occurs when after the thread is killed.
return;
}
if (this.exceptionHandler != null) {
try {
this.exceptionHandler.handleException(ioex);
} catch (Throwable ignored) {
}}
} | 3.26 |
flink_BinaryExternalSorter_startThreads_rdh | // ------------------------------------------------------------------------
// Factory Methods
// ------------------------------------------------------------------------
/**
* Starts all the threads that are used by this sorter.
*/
public void startThreads()
{
if (this.sortThread != null) {
this.sortThread.start();
}
if (this.spillThread != null) {
this.spillThread.start();
}
if
(this.mergeThread != null) {
this.mergeThread.start();
}
} | 3.26 |
flink_BinaryExternalSorter_close_rdh | /**
* Shuts down all the threads initiated by this sorter. Also releases all previously allocated
* memory, if it has not yet been released by the threads, and closes and deletes all channels
* (removing the temporary files).
*
* <p>The threads are set to exit directly, but depending on their operation, it may take a
* while to actually happen. The sorting thread will for example not finish before the current
* batch is sorted. This method attempts to wait for the working thread to exit. If it is
* however interrupted, the method exits immediately and is not guaranteed how long the threads
* continue to exist and occupy resources afterwards.
*/
@Override
public void close() {
// check if the sorter has been closed before
synchronized(this) {
if (this.closed) {
return;}
// mark as closed
this.closed = true;
}
// from here on, the code is in a try block, because even through errors might be thrown in
// this block,
// we need to make sure that all the memory is released.
try {
// if the result iterator has not been obtained yet, set the exception
synchronized(this.iteratorLock) { if (this.iteratorException == null) {
this.iteratorException = new IOException("The sorter has been closed.");
this.iteratorLock.notifyAll();
}
}
// stop all the threads
if (this.sortThread != null) {
try {
this.sortThread.shutdown();} catch (Throwable t) {
LOG.error("Error shutting down sorter thread: " + t.getMessage(), t);
}
}
if (this.spillThread != null) {
try {
this.spillThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down spilling thread: " + t.getMessage(), t);
}
}
if (this.mergeThread != null) {
try {
this.mergeThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down merging thread: " + t.getMessage(), t);
}
}
try {
if (this.sortThread != null) {
this.sortThread.join();
this.sortThread = null;
}
if (this.spillThread !=
null) {
this.spillThread.join();
this.spillThread = null;
}
if (this.mergeThread != null) {
this.mergeThread.join();
this.mergeThread = null;
}
} catch (InterruptedException iex) {
LOG.debug("Closing of sort/merger was interrupted. " + "The reading/sorting/spilling/merging threads may still be working.", iex);
}
} finally {
releaseSortMemory();
// Eliminate object references for MemorySegments.
circularQueues = null;
currWriteBuffer = null;
iterator =
null;
merger.close();
channelManager.close();
}
} | 3.26 |
flink_BinaryExternalSorter_setResultIteratorException_rdh | /**
* Reports an exception to all threads that are waiting for the result iterator.
*
* @param ioex
* The exception to be reported to the threads that wait for the result iterator.
*/
private void setResultIteratorException(IOException ioex) {
synchronized(this.iteratorLock) {
if (this.iteratorException == null) {
this.iteratorException = ioex;
this.iteratorLock.notifyAll();
}
}
} | 3.26 |
flink_SingleLogicalSlot_release_rdh | // -------------------------------------------------------------------------
// AllocatedSlot.Payload implementation
// -------------------------------------------------------------------------
/**
* A release of the payload by the {@link AllocatedSlot} triggers a release of the payload of
* the logical slot.
*
* @param cause
* of the payload release
*/
@Override
public void release(Throwable
cause) {
if (STATE_UPDATER.compareAndSet(this, State.ALIVE, State.RELEASING)) {signalPayloadRelease(cause);}
markReleased();
releaseFuture.complete(null);
} | 3.26 |
flink_StateTableByKeyGroupReaders_readerForVersion_rdh | /**
* Creates a new StateTableByKeyGroupReader that inserts de-serialized mappings into the given
* table, using the de-serialization algorithm that matches the given version.
*
* @param <K>
* type of key.
* @param <N>
* type of namespace.
* @param <S>
* type of state.
* @param stateTable
* the {@link StateTable} into which de-serialized mappings are inserted.
* @param version
* version for the de-serialization algorithm.
* @return the appropriate reader.
*/
public static <K, N, S> StateSnapshotKeyGroupReader readerForVersion(StateTable<K, N, S> stateTable, int version) {
switch (version) {
case 1 :
return new StateTableByKeyGroupReaderV1<>(stateTable);
case 2 :
case 3 :
case 4 :
case 5 :
case 6 :
return m0(stateTable);
default :
throw new IllegalArgumentException("Unknown version: " + version);
}
} | 3.26 |
flink_CanalJsonFormatFactory_validateEncodingFormatOptions_rdh | /**
* Validator for canal encoding format.
*/
private static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);
} | 3.26 |
flink_CanalJsonFormatFactory_validateDecodingFormatOptions_rdh | /**
* Validator for canal decoding format.
*/
private static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions);
} | 3.26 |
flink_DistinctType_newBuilder_rdh | /**
* Creates a builder for a {@link DistinctType}.
*/
public static DistinctType.Builder newBuilder(ObjectIdentifier objectIdentifier, LogicalType sourceType) {
return new DistinctType.Builder(objectIdentifier, sourceType);
} | 3.26 |
flink_JarHandlerUtils_getProgramArgs_rdh | /**
* Parse program arguments in jar run or plan request.
*/
private static <R extends JarRequestBody, M extends MessageParameters> List<String> getProgramArgs(HandlerRequest<R> request, Logger log) throws RestHandlerException {
JarRequestBody requestBody = request.getRequestBody();
@SuppressWarnings("deprecation")
List<String> programArgs = tokenizeArguments(fromRequestBodyOrQueryParameter(emptyToNull(requestBody.getProgramArguments()), () -> getQueryParameter(request, ProgramArgsQueryParameter.class), null, log));
List<String> v12 = fromRequestBodyOrQueryParameter(requestBody.getProgramArgumentsList(), () -> request.getQueryParameter(ProgramArgQueryParameter.class), null, log);
if (!v12.isEmpty()) {
if (!programArgs.isEmpty()) {
throw new RestHandlerException("Confusing request: programArgs and programArgsList are specified, please, use only programArgsList", HttpResponseStatus.BAD_REQUEST);
}
return v12;
} else {
return programArgs;
}
} | 3.26 |
flink_JarHandlerUtils_tokenizeArguments_rdh | /**
* Takes program arguments as a single string, and splits them into a list of string.
*
* <pre>
* tokenizeArguments("--foo bar") = ["--foo" "bar"]
* tokenizeArguments("--foo \"bar baz\"") = ["--foo" "bar baz"]
* tokenizeArguments("--foo 'bar baz'") = ["--foo" "bar baz"]
* tokenizeArguments(null) = []
* </pre>
*
* <strong>WARNING: </strong>This method does not respect escaped quotes.
*/
@VisibleForTesting
static List<String> tokenizeArguments(@Nullable
final String args) {
if (args == null) {
return Collections.emptyList();
}
final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN.matcher(args);
final List<String> tokens = new ArrayList<>();
while (matcher.find())
{
tokens.add(matcher.group().trim().replace("\"", "").replace("'", ""));
}
return tokens;
} | 3.26 |
flink_StreamContextEnvironment_collectNotAllowedConfigurations_rdh | /**
* Collects programmatic configuration changes.
*
* <p>Configuration is spread across instances of {@link Configuration} and POJOs (e.g. {@link ExecutionConfig}), so we need to have logic for comparing both. For supporting wildcards, the
* first can be accomplished by simply removing keys, the latter by setting equal fields before
* comparison.
*/
private Collection<String> collectNotAllowedConfigurations() {
if (programConfigEnabled) {
return Collections.emptyList();
}
final List<String>
errors = new ArrayList<>();
final Configuration clusterConfigMap = new Configuration(clusterConfiguration);
// Removal must happen on Configuration objects (not instances of Map)
// to also ignore map-typed config options with prefix key notation
removeProgramConfigWildcards(clusterConfigMap);
checkMainConfiguration(clusterConfigMap,
errors);
checkCheckpointConfig(clusterConfigMap, errors);
checkExecutionConfig(clusterConfigMap, errors);
return errors;
} | 3.26 |
flink_StreamContextEnvironment_checkNotAllowedConfigurations_rdh | // --------------------------------------------------------------------------------------------
// Program Configuration Validation
// --------------------------------------------------------------------------------------------
private void checkNotAllowedConfigurations() throws MutatedConfigurationException { final Collection<String> errorMessages = collectNotAllowedConfigurations();if (!errorMessages.isEmpty()) {
throw new MutatedConfigurationException(errorMessages);
}
} | 3.26 |
flink_StreamContextEnvironment_setAsContext_rdh | // --------------------------------------------------------------------------------------------
public static void setAsContext(final PipelineExecutorServiceLoader executorServiceLoader, final Configuration clusterConfiguration,
final ClassLoader userCodeClassLoader, final boolean enforceSingleJobExecution, final boolean suppressSysout) {
final StreamExecutionEnvironmentFactory factory = envInitConfig -> {
final boolean programConfigEnabled = clusterConfiguration.get(DeploymentOptions.PROGRAM_CONFIG_ENABLED);
final List<String> programConfigWildcards = clusterConfiguration.get(DeploymentOptions.PROGRAM_CONFIG_WILDCARDS);
final Configuration
v11 = new Configuration();
v11.addAll(clusterConfiguration);
v11.addAll(envInitConfig);
return new StreamContextEnvironment(executorServiceLoader, clusterConfiguration, v11, userCodeClassLoader, enforceSingleJobExecution, suppressSysout, programConfigEnabled, programConfigWildcards);
};
initializeContextEnvironment(factory);
} | 3.26 |
flink_SerializationSchema_open_rdh | /**
* Initialization method for the schema. It is called before the actual working methods {@link #serialize(Object)} and thus suitable for one time setup work.
*
* <p>The provided {@link InitializationContext} can be used to access additional features such
* as e.g. registering user metrics.
*
* @param context
* Contextual information that can be used during initialization.
*/
@PublicEvolving
default void open(InitializationContext context)
throws Exception {
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.