name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_FileOutputFormat_initDefaultsFromConfiguration_rdh
|
/**
* Initialize defaults for output format. Needs to be a static method because it is configured
* for local cluster execution.
*
* @param configuration
* The configuration to load defaults from
*/
public static void initDefaultsFromConfiguration(Configuration configuration) {
final boolean overwrite = configuration.getBoolean(CoreOptions.FILESYTEM_DEFAULT_OVERRIDE);
DEFAULT_WRITE_MODE = (overwrite) ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE;
final boolean alwaysCreateDirectory = configuration.getBoolean(CoreOptions.FILESYSTEM_OUTPUT_ALWAYS_CREATE_DIRECTORY);
DEFAULT_OUTPUT_DIRECTORY_MODE = (alwaysCreateDirectory) ? OutputDirectoryMode.ALWAYS : OutputDirectoryMode.f0;
}
| 3.26 |
flink_FileOutputFormat_configure_rdh
|
// ----------------------------------------------------------------
@Override
public void configure(Configuration parameters) {
// get the output file path, if it was not yet set
if (this.outputFilePath == null) {
// get the file parameter
String filePath = parameters.getString(FILE_PARAMETER_KEY, null);
if (filePath == null) {throw new IllegalArgumentException("The output path has been specified neither via constructor/setters" + ", nor via the Configuration.");
}
try {
this.outputFilePath = new Path(filePath);
} catch (RuntimeException rex) {
throw new RuntimeException("Could not create a valid URI from the given file path name: " + rex.getMessage());
}
}
// check if have not been set and use the defaults in that case
if (this.writeMode == null) {
this.writeMode = DEFAULT_WRITE_MODE;
}
if (this.outputDirectoryMode == null) {
this.outputDirectoryMode =
DEFAULT_OUTPUT_DIRECTORY_MODE;
}
}
| 3.26 |
flink_FileOutputFormat_initializeGlobal_rdh
|
/**
* Initialization of the distributed file system if it is used.
*
* @param parallelism
* The task parallelism.
*/
@Override
public void initializeGlobal(int parallelism) throws IOException {
final Path path = getOutputFilePath();
final FileSystem fs = path.getFileSystem();// only distributed file systems can be initialized at start-up time.
if (fs.isDistributedFS()) {
final WriteMode writeMode = getWriteMode();
final OutputDirectoryMode outDirMode = getOutputDirectoryMode();
if ((parallelism == 1) && (outDirMode == OutputDirectoryMode.f0)) {
// output is not written in parallel and should be written to a single file.
// prepare distributed output path
if (!fs.initOutPathDistFS(path, writeMode, false)) {// output preparation failed! Cancel task.
throw new IOException("Output path could not be initialized.");
}
} else // output should be written to a directory
// only distributed file systems can be initialized at start-up time.
if (!fs.initOutPathDistFS(path, writeMode, true)) {
throw new IOException("Output directory could not be created.");}
}
}
| 3.26 |
flink_FlinkS3PrestoFileSystem_deleteObject_rdh
|
/**
* Deletes the object referenced by the passed {@code path}. This method is used to work around
* the fact that Presto doesn't allow us to differentiate between deleting a non-existing object
* and some other errors. Therefore, a final check for existence is necessary in case of an
* error or false return value.
*
* @param path
* The path referring to the object that shall be deleted.
* @throws IOException
* if an error occurred while deleting the file other than the {@code path}
* referring to a non-empty directory.
*/
private void deleteObject(Path path) throws IOException {
boolean success = true;
IOException actualException = null;
try {
// empty directories will cause this method to fail as well - checking for their
// existence afterwards is a workaround to cover this use-case
success = super.delete(path, false);
} catch (IOException e) {
actualException = e;
}
if ((!success) || (actualException != null)) {if (exists(path)) {
throw Optional.ofNullable(actualException).orElse(new IOException(path.getPath() + " could not be deleted for unknown reasons."));
}
}
}
| 3.26 |
flink_Bucket_getNew_rdh
|
// --------------------------- Static Factory Methods -----------------------------
/**
* Creates a new empty {@code Bucket}.
*
* @param subtaskIndex
* the index of the subtask creating the bucket.
* @param bucketId
* the identifier of the bucket, as returned by the {@link BucketAssigner}.
* @param bucketPath
* the path to where the part files for the bucket will be written to.
* @param initialPartCounter
* the initial counter for the part files of the bucket.
* @param bucketWriter
* the {@link BucketWriter} used to write part files in the bucket.
* @param rollingPolicy
* the policy based on which a bucket rolls its currently open part file
* and opens a new one.
* @param fileListener
* the listener about the status of file.
* @param <IN>
* the type of input elements to the sink.
* @param <BucketID>
* the type of the identifier of the bucket, as returned by the {@link BucketAssigner}
* @param outputFileConfig
* the part file configuration.
* @return The new Bucket.
*/
static <IN, BucketID> Bucket<IN, BucketID> getNew(final int subtaskIndex, final BucketID bucketId, final Path
bucketPath, final long initialPartCounter, final BucketWriter<IN, BucketID> bucketWriter, final RollingPolicy<IN, BucketID> rollingPolicy, @Nullable
final FileLifeCycleListener<BucketID> fileListener, final OutputFileConfig outputFileConfig) {
return new Bucket<>(subtaskIndex, bucketId, bucketPath, initialPartCounter, bucketWriter, rollingPolicy, fileListener, outputFileConfig);
}
| 3.26 |
flink_Bucket_assembleNewPartPath_rdh
|
/**
* Constructor a new PartPath and increment the partCounter.
*/
private Path assembleNewPartPath() {long currentPartCounter = partCounter++;
return new Path(bucketPath, ((((outputFileConfig.getPartPrefix() + '-') + subtaskIndex) + '-') + currentPartCounter) + outputFileConfig.getPartSuffix());
}
| 3.26 |
flink_Bucket_getPendingFileRecoverablesPerCheckpoint_rdh
|
// --------------------------- Testing Methods -----------------------------
@VisibleForTesting
Map<Long, List<InProgressFileWriter.PendingFileRecoverable>> getPendingFileRecoverablesPerCheckpoint() {return pendingFileRecoverablesPerCheckpoint;
}
| 3.26 |
flink_Bucket_restore_rdh
|
/**
* Restores a {@code Bucket} from the state included in the provided {@link BucketState}.
*
* @param subtaskIndex
* the index of the subtask creating the bucket.
* @param initialPartCounter
* the initial counter for the part files of the bucket.
* @param bucketWriter
* the {@link BucketWriter} used to write part files in the bucket.
* @param rollingPolicy
* the policy based on which a bucket rolls its currently open part file
* and opens a new one.
* @param bucketState
* the initial state of the restored bucket.
* @param fileListener
* the listener about the status of file.
* @param <IN>
* the type of input elements to the sink.
* @param <BucketID>
* the type of the identifier of the bucket, as returned by the {@link BucketAssigner}
* @param outputFileConfig
* the part file configuration.
* @return The restored Bucket.
*/
static <IN, BucketID> Bucket<IN, BucketID> restore(final int subtaskIndex, final long initialPartCounter, final BucketWriter<IN, BucketID> bucketWriter, final RollingPolicy<IN, BucketID> rollingPolicy, final BucketState<BucketID> bucketState, @Nullable
final FileLifeCycleListener<BucketID>
fileListener, final OutputFileConfig outputFileConfig) throws
IOException {
return new Bucket<>(subtaskIndex, initialPartCounter, bucketWriter, rollingPolicy, bucketState, fileListener, outputFileConfig);
}
| 3.26 |
flink_FlinkRelMdCollation_window_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Window}'s collation.
*
* <p>A Window projects the fields of its input first, followed by the output from each of its
* windows. Assuming (quite reasonably) that the implementation does not re-order its input
* rows, then any collations of its input are preserved.
*/
public static List<RelCollation> window(RelMetadataQuery mq, RelNode input, ImmutableList<Window.Group> groups) {
return mq.collations(input);
}
| 3.26 |
flink_FlinkRelMdCollation_mergeJoin_rdh
|
/**
* Helper method to determine a {@link Join}'s collation assuming that it uses a merge-join
* algorithm.
*
* <p>If the inputs are sorted on other keys <em>in addition to</em> the join key, the result
* preserves those collations too.
*/
public static List<RelCollation> mergeJoin(RelMetadataQuery mq, RelNode left, RelNode right, ImmutableIntList leftKeys, ImmutableIntList rightKeys) {
final ImmutableList.Builder<RelCollation> builder = ImmutableList.builder();
final ImmutableList<RelCollation> leftCollations = mq.collations(left);
assert RelCollations.contains(leftCollations, leftKeys) : "cannot merge join: left input is not sorted on left keys";
builder.addAll(leftCollations);
final ImmutableList<RelCollation> rightCollations = mq.collations(right);
assert RelCollations.contains(rightCollations, rightKeys) : "cannot merge join: right input is not sorted on right keys";
final int v34 = left.getRowType().getFieldCount();
for (RelCollation collation : rightCollations) {
builder.add(RelCollations.shift(collation, v34));
}
return builder.build();
}
| 3.26 |
flink_FlinkRelMdCollation_table_rdh
|
// Helper methods
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.TableScan}'s collation.
*/
public static List<RelCollation> table(RelOptTable table) {
// Behavior change since CALCITE-4215: the default collations is null.
// In Flink, the default is an empty list.
List<RelCollation> collations = table.getCollationList();
return collations == null ? Collections.emptyList() : collations;
}
| 3.26 |
flink_FlinkRelMdCollation_filter_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Filter}'s collation.
*/
public static List<RelCollation> filter(RelMetadataQuery mq, RelNode input) {
return mq.collations(input);
}
| 3.26 |
flink_FlinkRelMdCollation_values_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Values}'s collation.
*
* <p>We actually under-report the collations. A Values with 0 or 1 rows - an edge case, but
* legitimate and very common - is ordered by every permutation of every subset of the columns.
*
* <p>So, our algorithm aims to:
*
* <ul>
* <li>produce at most N collations (where N is the number of columns);
* <li>make each collation as long as possible;
* <li>do not repeat combinations already emitted - if we've emitted {@code (a, b)} do not
* later emit {@code (b, a)};
* <li>probe the actual values and make sure that each collation is consistent with the data
* </ul>
*
* <p>So, for an empty Values with 4 columns, we would emit {@code (a, b, c, d), (b, c, d), (c,
* d), (d)}.
*/
public static List<RelCollation> values(RelMetadataQuery mq, RelDataType rowType, ImmutableList<ImmutableList<RexLiteral>> tuples) {
Util.discard(mq);// for future use
final List<RelCollation> list = new ArrayList<>();
final int n = rowType.getFieldCount();
final List<Pair<RelFieldCollation, Ordering<List<RexLiteral>>>> pairs = new ArrayList<>();
outer : for (int i = 0; i < n; i++) {
pairs.clear();
for (int j = i; j < n; j++) {
final RelFieldCollation fieldCollation =
new RelFieldCollation(j);
Ordering<List<RexLiteral>> comparator = comparator(fieldCollation);
Ordering<List<RexLiteral>> ordering;
if (pairs.isEmpty()) {
ordering = comparator;
} else {
ordering = Util.last(pairs).right.compound(comparator);
}
pairs.add(Pair.of(fieldCollation, ordering));
if (!ordering.isOrdered(tuples)) {
if (j == i) {
continue outer;
}
pairs.remove(pairs.size() - 1);
}
}
if (!pairs.isEmpty()) {
list.add(RelCollations.of(Pair.left(pairs)));
}
}
return list;
}
| 3.26 |
flink_FlinkRelMdCollation_project_rdh
|
/**
* Helper method to determine a {@link Project}'s collation.
*/
public static List<RelCollation> project(RelMetadataQuery mq, RelNode input, List<? extends RexNode> projects) {
final SortedSet<RelCollation> collations = new TreeSet<>();
final List<RelCollation> inputCollations = mq.collations(input);
if ((inputCollations == null) || inputCollations.isEmpty()) {
return ImmutableList.of();
}
final Multimap<Integer, Integer> targets = LinkedListMultimap.create();
final Map<Integer, SqlMonotonicity> targetsWithMonotonicity = new HashMap<>();
for (Ord<RexNode> v20 : Ord.<RexNode>zip(projects)) {
if (v20.e instanceof RexInputRef) {
targets.put(((RexInputRef) (v20.e)).getIndex(), v20.i);} else if (v20.e instanceof RexCall) {
final RexCall call = ((RexCall) (v20.e));
final RexCallBinding binding = RexCallBinding.create(input.getCluster().getTypeFactory(), call, inputCollations);
targetsWithMonotonicity.put(v20.i, call.getOperator().getMonotonicity(binding));
}
}
final List<RelFieldCollation> fieldCollations = new ArrayList<>();
loop : for (RelCollation ic : inputCollations) {
if (ic.getFieldCollations().isEmpty()) {
continue;
}
fieldCollations.clear();
for (RelFieldCollation ifc : ic.getFieldCollations()) {
final Collection<Integer> integers = targets.get(ifc.getFieldIndex());
if (integers.isEmpty()) {
continue loop;// cannot do this collation
}
fieldCollations.add(ifc.withFieldIndex(integers.iterator().next()));
}
assert !fieldCollations.isEmpty();
collations.add(RelCollations.of(fieldCollations));
}
final List<RelFieldCollation> fieldCollationsForRexCalls = new ArrayList<>();
for (Map.Entry<Integer, SqlMonotonicity> entry : targetsWithMonotonicity.entrySet()) {
final SqlMonotonicity value = entry.getValue();
switch (value) {
case NOT_MONOTONIC :
case CONSTANT :
break;
default :
fieldCollationsForRexCalls.add(new RelFieldCollation(entry.getKey(), RelFieldCollation.Direction.of(value)));
break;
}
}
if (!fieldCollationsForRexCalls.isEmpty()) {
collations.add(RelCollations.of(fieldCollationsForRexCalls));
}
return ImmutableList.copyOf(collations);
}
| 3.26 |
flink_FlinkRelMdCollation_sort_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Sort}'s collation.
*/
public static List<RelCollation> sort(RelCollation collation) {
return ImmutableList.of(collation);
}
| 3.26 |
flink_FlinkRelMdCollation_enumerableHashJoin_rdh
|
/**
* Returns the collation of {@link EnumerableHashJoin} based on its inputs and the join type.
*/
public static List<RelCollation> enumerableHashJoin(RelMetadataQuery
mq, RelNode left, RelNode right, JoinRelType joinType) {
if (joinType == JoinRelType.SEMI) {return enumerableSemiJoin(mq, left, right);
} else {
return enumerableJoin0(mq, left, right, joinType);
}
}
| 3.26 |
flink_FlinkRelMdCollation_getDef_rdh
|
// ~ Methods ----------------------------------------------------------------
public MetadataDef<BuiltInMetadata.Collation> getDef() {
return Collation.DEF;
}
| 3.26 |
flink_FlinkRelMdCollation_enumerableNestedLoopJoin_rdh
|
/**
* Returns the collation of {@link EnumerableNestedLoopJoin} based on its inputs and the join
* type.
*/
public static List<RelCollation> enumerableNestedLoopJoin(RelMetadataQuery mq, RelNode left, RelNode right, JoinRelType joinType) {
return enumerableJoin0(mq, left, right, joinType);
}
| 3.26 |
flink_FlinkRelMdCollation_match_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Match}'s collation.
*/
public static List<RelCollation> match(RelMetadataQuery mq, RelNode input, RelDataType rowType, RexNode pattern, boolean strictStart,
boolean strictEnd, Map<String, RexNode> patternDefinitions, Map<String, RexNode> measures, RexNode after, Map<String, ? extends SortedSet<String>> subsets, boolean allRows, ImmutableBitSet partitionKeys, RelCollation orderKeys, RexNode interval) {
return mq.collations(input);
}
| 3.26 |
flink_FlinkRelMdCollation_collations_rdh
|
/**
* Catch-all implementation for {@link BuiltInMetadata.Collation#collations()}, invoked using
* reflection, for any relational expression not handled by a more specific method.
*
* <p>{@link org.apache.calcite.rel.core.Union}, {@link org.apache.calcite.rel.core.Intersect},
* {@link org.apache.calcite.rel.core.Minus}, {@link org.apache.calcite.rel.core.Join}, {@link org.apache.calcite.rel.core.Correlate} do not in general return sorted results (but
* implementations using particular algorithms may).
*
* @param rel
* Relational expression
* @return Relational expression's collations
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#collations(RelNode)
*/
public ImmutableList<RelCollation> collations(RelNode rel, RelMetadataQuery mq) {
return ImmutableList.of();
}
| 3.26 |
flink_FlinkRelMdCollation_snapshot_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Snapshot}'s collation.
*/
public static List<RelCollation> snapshot(RelMetadataQuery mq, RelNode input) {
return mq.collations(input);
}
| 3.26 |
flink_FlinkRelMdCollation_calc_rdh
|
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Calc}'s collation.
*/
public static List<RelCollation> calc(RelMetadataQuery mq, RelNode input, RexProgram program) {
final List<RexNode> projects = program.getProjectList().stream().map(program::expandLocalRef).collect(Collectors.toList());
return project(mq, input, projects);
}
| 3.26 |
flink_CheckpointStorageWorkerView_toFileMergingStorage_rdh
|
/**
* Return {@link org.apache.flink.runtime.state.filesystem.FsMergingCheckpointStorageAccess} if
* file merging is enabled Otherwise, return itself. File merging is supported by subclasses of
* {@link org.apache.flink.runtime.state.filesystem.AbstractFsCheckpointStorageAccess}.
*/
default CheckpointStorageWorkerView toFileMergingStorage(FileMergingSnapshotManager mergingSnapshotManager, Environment environment) throws IOException {
return this;
}
| 3.26 |
flink_RpcServiceUtils_createRandomName_rdh
|
/**
* Creates a random name of the form prefix_X, where X is an increasing number.
*
* @param prefix
* Prefix string to prepend to the monotonically increasing name offset number
* @return A random name of the form prefix_X where X is an increasing number
*/
public static String createRandomName(String prefix) {
Preconditions.checkNotNull(prefix, "Prefix must not be null.");
long nameOffset;
// obtain the next name offset by incrementing it atomically
do {
nameOffset = nextNameOffset.get();
} while (!nextNameOffset.compareAndSet(nameOffset,
nameOffset + 1L) );
return (prefix + '_') + nameOffset;
}
| 3.26 |
flink_RpcServiceUtils_createWildcardName_rdh
|
/**
* Creates a wildcard name symmetric to {@link #createRandomName(String)}.
*
* @param prefix
* prefix of the wildcard name
* @return wildcard name starting with the prefix
*/
public static String createWildcardName(String prefix) {
return prefix + "_*";
}
| 3.26 |
flink_ModuleManager_useModules_rdh
|
/**
* Enable modules in use with declared name order. Modules that have been loaded but not exist
* in names varargs will become unused.
*
* @param names
* module names to be used
* @throws ValidationException
* when module names contain an unloaded name
*/
public void useModules(String... names) {
checkNotNull(names, "names cannot be null");Set<String> deduplicateNames = new HashSet<>();
for (String name : names) {
if (!loadedModules.containsKey(name)) {
throw new ValidationException(String.format("No module with name '%s' exists", name));
}
if (!deduplicateNames.add(name)) {
throw new ValidationException(String.format("Module '%s' appears more than once", name));
}
}
usedModules.clear();
usedModules.addAll(Arrays.asList(names));
}
| 3.26 |
flink_ModuleManager_loadModule_rdh
|
/**
* Load a module under a unique name. Modules will be kept in the loaded order, and new module
* will be added to the left before the unused module and turn on use by default.
*
* @param name
* name of the module
* @param module
* the module instance
* @throws ValidationException
* when there already exists a module with the same name
*/
public void loadModule(String name, Module module) {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(name), "name cannot be null or empty string");
checkNotNull(module, "module cannot be null");
if (loadedModules.containsKey(name)) {
throw new ValidationException(String.format("A module with name '%s' already exists", name));
} else {
usedModules.add(name);
loadedModules.put(name, module);
LOG.info("Loaded module '{}' from class {}", name, module.getClass().getName());
}
}
| 3.26 |
flink_ModuleManager_unloadModule_rdh
|
/**
* Unload a module with given name.
*
* @param name
* name of the module
* @throws ValidationException
* when there is no module with the given name
*/
public void unloadModule(String name) {
if (loadedModules.containsKey(name)) {
loadedModules.remove(name);
boolean used = usedModules.remove(name);
LOG.info("Unloaded an {} module '{}'", used ? "used" : "unused", name);
} else {
throw new ValidationException(String.format("No module with name '%s' exists", name));
}
}
| 3.26 |
flink_ModuleManager_getFunctionDefinition_rdh
|
/**
* Get an optional of {@link FunctionDefinition} by a given name. Function will be resolved to
* modules in the used order, and the first match will be returned. If no match is found in all
* modules, return an optional.
*
* <p>It includes hidden functions even though not listed in {@link #listFunctions()}.
*
* @param name
* name of the function
* @return an optional of {@link FunctionDefinition}
*/
public Optional<FunctionDefinition> getFunctionDefinition(String name) {
for (String moduleName : usedModules) {
if (loadedModules.get(moduleName).listFunctions(true).stream().anyMatch(name::equalsIgnoreCase)) {
LOG.debug("Got FunctionDefinition '{}' from '{}' module.", name, moduleName);
return loadedModules.get(moduleName).getFunctionDefinition(name);
}
}LOG.debug("Cannot find FunctionDefinition '{}' from any loaded modules.", name);
return Optional.empty();
}
| 3.26 |
flink_ModuleManager_getFactory_rdh
|
/**
* Returns the first factory found in the loaded modules given a selector.
*
* <p>Modules are checked in the order in which they have been loaded. The first factory
* returned by a module will be used. If no loaded module provides a factory, {@link Optional#empty()} is returned.
*/
@SuppressWarnings("unchecked")
public <T extends Factory> Optional<T> getFactory(Function<Module, Optional<T>> selector) {
for (final String moduleName : usedModules) {
final Optional<T> factory = selector.apply(loadedModules.get(moduleName));
if (factory.isPresent()) {
return factory;
}
}
return Optional.empty();
}
| 3.26 |
flink_ModuleManager_listFunctions_rdh
|
/**
* Get names of all functions from used modules. It excludes hidden functions.
*
* @return a set of function names of used modules
*/
public Set<String> listFunctions() {
return usedModules.stream().map(name -> loadedModules.get(name).listFunctions(false)).flatMap(Collection::stream).collect(Collectors.toSet());
}
| 3.26 |
flink_ModuleManager_listFullModules_rdh
|
/**
* Get all loaded modules with use status. Modules in use status are returned in resolution
* order.
*
* @return a list of module entries with module name and use status
*/
public List<ModuleEntry> listFullModules() {
// keep the order for used modules
List<ModuleEntry> moduleEntries = usedModules.stream().map(name -> new ModuleEntry(name, true)).collect(Collectors.toList());
loadedModules.keySet().stream().filter(name -> !usedModules.contains(name)).forEach(name -> moduleEntries.add(new ModuleEntry(name, false)));
return moduleEntries;
}
| 3.26 |
flink_AbstractMetricGroup_counter_rdh
|
// -----------------------------------------------------------------------------------------------------------------
// Metrics
// -----------------------------------------------------------------------------------------------------------------
@Override
public Counter counter(String name) {
return counter(name, new SimpleCounter());
}
| 3.26 |
flink_AbstractMetricGroup_putVariables_rdh
|
/**
* Enters all variables specific to this {@link AbstractMetricGroup} and their associated values
* into the map.
*
* @param variables
* map to enter variables and their values into
*/
protected void putVariables(Map<String, String> variables) {
}
| 3.26 |
flink_AbstractMetricGroup_addMetric_rdh
|
/**
* Adds the given metric to the group and registers it at the registry, if the group is not yet
* closed, and if no metric with the same name has been registered before.
*
* @param name
* the name to register the metric under
* @param metric
* the metric to register
*/
protected void addMetric(String name, Metric metric) {
if (metric == null) {
LOG.warn("Ignoring attempted registration of a metric due to being null for name {}.", name);
return;
}
// add the metric only if the group is still open
synchronized(this) {
if (!closed) {
// immediately put without a 'contains' check to optimize the common case (no
// collision)
// collisions are resolved later
Metric prior = f0.put(name, metric);
// check for collisions with other metric names
if (prior == null) {
// no other metric with this name yet
if (groups.containsKey(name)) {
// we warn here, rather than failing, because metrics are tools that should
// not fail the
// program when used incorrectly
LOG.warn((("Name collision: Adding a metric with the same name as a metric subgroup: '" + name) + "'. Metric might not get properly reported. ") + Arrays.toString(scopeComponents)); }
registry.register(metric, name, this);
} else {
// we had a collision. put back the original value
f0.put(name, prior);
// we warn here, rather than failing, because metrics are tools that should not
// fail the
// program when used incorrectly
LOG.warn((("Name collision: Group already contains a Metric with the name '" + name)
+ "'. Metric will not be reported.") + Arrays.toString(scopeComponents));}
}
}
}
| 3.26 |
flink_AbstractMetricGroup_getMetricIdentifier_rdh
|
/**
* Returns the fully qualified metric name using the configured delimiter for the reporter with
* the given index, for example {@code "host-7.taskmanager-2.window_word_count.my-mapper.metricName"}.
*
* @param metricName
* metric name
* @param filter
* character filter which is applied to the scope components if not null.
* @param reporterIndex
* index of the reporter whose delimiter should be used
* @param delimiter
* delimiter to use
* @return fully qualified metric name
*/
public String getMetricIdentifier(String metricName, CharacterFilter filter, int reporterIndex, char delimiter) {
Preconditions.checkNotNull(filter);
metricName = filter.filterCharacters(metricName);
if ((scopeStrings.length == 0) || ((reporterIndex < 0) || (reporterIndex >= scopeStrings.length))) {
return (ScopeFormat.concat(filter, delimiter, scopeComponents) + delimiter) + metricName;
} else {
if (scopeStrings[reporterIndex] == null) {
scopeStrings[reporterIndex] = ScopeFormat.concat(filter, delimiter, scopeComponents);
}
return (scopeStrings[reporterIndex] + delimiter) + metricName;
}}
| 3.26 |
flink_AbstractMetricGroup_addGroup_rdh
|
// ------------------------------------------------------------------------
// Groups
// ------------------------------------------------------------------------
@Override
public MetricGroup addGroup(String name) {
return addGroup(name, ChildType.GENERIC);
}
| 3.26 |
flink_AbstractMetricGroup_getLogicalScope_rdh
|
/**
* Returns the logical scope of this group, for example {@code "taskmanager.job.task"}.
*
* @param filter
* character filter which is applied to the scope components
* @return logical scope
*/public String getLogicalScope(CharacterFilter filter) {
return getLogicalScope(filter, registry.getDelimiter());
}
/**
* Returns the logical scope of this group, for example {@code "taskmanager.job.task"}
| 3.26 |
flink_AbstractMetricGroup_close_rdh
|
// ------------------------------------------------------------------------
// Closing
// ------------------------------------------------------------------------
public void close() {
synchronized(this) {
if (!closed) {
closed = true;
// close all subgroups
for (AbstractMetricGroup group
: groups.values()) {
group.close();
}
groups.clear();
// un-register all directly contained metrics
for (Map.Entry<String, Metric> metric : f0.entrySet()) { registry.unregister(metric.getValue(), metric.getKey(), this);}
f0.clear();
}
}
}
| 3.26 |
flink_AbstractMetricGroup_getQueryServiceMetricInfo_rdh
|
/**
* Returns the metric query service scope for this group.
*
* @param filter
* character filter
* @return query service scope
*/
public QueryScopeInfo getQueryServiceMetricInfo(CharacterFilter filter) {
if (queryServiceScopeInfo == null) {queryServiceScopeInfo = createQueryServiceMetricInfo(filter);
}
return queryServiceScopeInfo;
}
| 3.26 |
flink_AbstractMetricGroup_m0_rdh
|
/**
* Returns the logical scope of this group, for example {@code "taskmanager.job.task"}.
*
* @param filter
* character filter which is applied to the scope components
* @param delimiter
* delimiter to use for concatenating scope components
* @param reporterIndex
* index of the reporter
* @return logical scope
*/
String m0(CharacterFilter filter, char delimiter, int reporterIndex) {
if ((logicalScopeStrings.length == 0) || ((reporterIndex < 0) || (reporterIndex >= logicalScopeStrings.length))) {
return createLogicalScope(filter, delimiter);
} else {
if (logicalScopeStrings[reporterIndex] == null) {
logicalScopeStrings[reporterIndex] = createLogicalScope(filter, delimiter);
}
return logicalScopeStrings[reporterIndex];
}
}
| 3.26 |
flink_ProtoUtils_createUserDefinedDataStreamFunctionProto_rdh
|
// ------------------------------------------------------------------------
// DataStream API related utilities
// ------------------------------------------------------------------------
public static UserDefinedDataStreamFunction createUserDefinedDataStreamFunctionProto(DataStreamPythonFunctionInfo dataStreamPythonFunctionInfo, RuntimeContext runtimeContext, Map<String, String> internalParameters, boolean
inBatchExecutionMode, boolean isMetricEnabled, boolean isProfileEnabled, boolean hasSideOutput, int stateCacheSize, int mapStateReadCacheSize, int mapStateWriteCacheSize) {
FlinkFnApi.UserDefinedDataStreamFunction.Builder builder = FlinkFnApi.UserDefinedDataStreamFunction.newBuilder();
builder.setFunctionType(UserDefinedDataStreamFunction.FunctionType.forNumber(dataStreamPythonFunctionInfo.getFunctionType()));
builder.setRuntimeContext(UserDefinedDataStreamFunction.RuntimeContext.newBuilder().setTaskName(runtimeContext.getTaskName()).setTaskNameWithSubtasks(runtimeContext.getTaskNameWithSubtasks()).setNumberOfParallelSubtasks(runtimeContext.getNumberOfParallelSubtasks()).setMaxNumberOfParallelSubtasks(runtimeContext.getMaxNumberOfParallelSubtasks()).setIndexOfThisSubtask(runtimeContext.getIndexOfThisSubtask()).setAttemptNumber(runtimeContext.getAttemptNumber()).addAllJobParameters(runtimeContext.getExecutionConfig().getGlobalJobParameters().toMap().entrySet().stream().map(entry -> FlinkFnApi.JobParameter.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()).collect(Collectors.toList())).addAllJobParameters(internalParameters.entrySet().stream().map(entry -> FlinkFnApi.JobParameter.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()).collect(Collectors.toList())).setInBatchExecutionMode(inBatchExecutionMode).build());
builder.setPayload(ByteString.copyFrom(dataStreamPythonFunctionInfo.getPythonFunction().getSerializedPythonFunction()));
builder.setMetricEnabled(isMetricEnabled);
builder.setProfileEnabled(isProfileEnabled);builder.setHasSideOutput(hasSideOutput); builder.setStateCacheSize(stateCacheSize);
builder.setMapStateReadCacheSize(mapStateReadCacheSize);
builder.setMapStateWriteCacheSize(mapStateWriteCacheSize);return builder.build();
}
| 3.26 |
flink_ProtoUtils_parseStateTtlConfigFromProto_rdh
|
// ------------------------------------------------------------------------
// State related utilities
// ------------------------------------------------------------------------
public static StateTtlConfig parseStateTtlConfigFromProto(FlinkFnApi.StateDescriptor.StateTTLConfig stateTTLConfigProto) {
StateTtlConfig.Builder builder = StateTtlConfig.newBuilder(Time.milliseconds(stateTTLConfigProto.getTtl())).setUpdateType(parseUpdateTypeFromProto(stateTTLConfigProto.getUpdateType())).setStateVisibility(parseStateVisibilityFromProto(stateTTLConfigProto.getStateVisibility())).setTtlTimeCharacteristic(parseTtlTimeCharacteristicFromProto(stateTTLConfigProto.getTtlTimeCharacteristic()));
FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies cleanupStrategiesProto
= stateTTLConfigProto.getCleanupStrategies();
if (!cleanupStrategiesProto.getIsCleanupInBackground()) {
builder.disableCleanupInBackground();
}
for (FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry mapStrategiesEntry : cleanupStrategiesProto.getStrategiesList())
{
FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies strategyProto = mapStrategiesEntry.getStrategy();
if (strategyProto == Strategies.FULL_STATE_SCAN_SNAPSHOT) {
builder.cleanupFullSnapshot();
} else if (strategyProto == Strategies.INCREMENTAL_CLEANUP) {
FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.IncrementalCleanupStrategy incrementalCleanupStrategyProto = mapStrategiesEntry.getIncrementalCleanupStrategy();
builder.cleanupIncrementally(incrementalCleanupStrategyProto.getCleanupSize(), incrementalCleanupStrategyProto.getRunCleanupForEveryRecord());
}
else if (strategyProto == Strategies.ROCKSDB_COMPACTION_FILTER) {
FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategyProto = mapStrategiesEntry.getRocksdbCompactFilterCleanupStrategy();
builder.cleanupInRocksdbCompactFilter(rocksdbCompactFilterCleanupStrategyProto.getQueryTimeAfterNumEntries());
}
}
return builder.build();
}
| 3.26 |
flink_ProtoUtils_createUserDefinedFunctionsProto_rdh
|
// function utilities
public static UserDefinedFunctions createUserDefinedFunctionsProto(RuntimeContext runtimeContext, PythonFunctionInfo[] userDefinedFunctions, boolean isMetricEnabled, boolean isProfileEnabled) {
FlinkFnApi.UserDefinedFunctions.Builder builder = FlinkFnApi.UserDefinedFunctions.newBuilder();
for (PythonFunctionInfo userDefinedFunction : userDefinedFunctions) {
builder.addUdfs(createUserDefinedFunctionProto(userDefinedFunction));
}
builder.setMetricEnabled(isMetricEnabled);
builder.setProfileEnabled(isProfileEnabled);
builder.addAllJobParameters(runtimeContext.getExecutionConfig().getGlobalJobParameters().toMap().entrySet().stream().map(entry -> FlinkFnApi.JobParameter.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()).collect(Collectors.toList()));
return builder.build();
}
| 3.26 |
flink_AbstractCatalogStore_close_rdh
|
/**
* Closes the catalog store.
*/
@Override
public void close() {
isOpen =
false;
}
| 3.26 |
flink_AbstractCatalogStore_checkOpenState_rdh
|
/**
* Checks whether the catalog store is currently open.
*
* @throws IllegalStateException
* if the store is closed
*/
protected void checkOpenState() {
Preconditions.checkState(isOpen, "CatalogStore is not opened yet.");}
| 3.26 |
flink_GenericWriteAheadSink_saveHandleInState_rdh
|
/**
* Called when a checkpoint barrier arrives. It closes any open streams to the backend and marks
* them as pending for committing to the external, third-party storage system.
*
* @param checkpointId
* the id of the latest received checkpoint.
* @throws IOException
* in case something went wrong when handling the stream to the backend.
*/
private void saveHandleInState(final long checkpointId, final long timestamp) throws Exception {// only add handle if a new OperatorState was created since the last snapshot
if (out != null) {
int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask();
StreamStateHandle handle = out.closeAndGetHandle();
PendingCheckpoint pendingCheckpoint = new PendingCheckpoint(checkpointId, subtaskIdx, timestamp, handle);
if (pendingCheckpoints.contains(pendingCheckpoint)) {
// we already have a checkpoint stored for that ID that may have been partially
// written,
// so we discard this "alternate version" and use the stored checkpoint
handle.discardState();
} else {pendingCheckpoints.add(pendingCheckpoint);
}
out = null;
}
}
| 3.26 |
flink_GenericWriteAheadSink_cleanRestoredHandles_rdh
|
/**
* Called at {@link #open()} to clean-up the pending handle list. It iterates over all restored
* pending handles, checks which ones are already committed to the outside storage system and
* removes them from the list.
*/
private void cleanRestoredHandles() throws Exception {synchronized(pendingCheckpoints) {
Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator();
while (pendingCheckpointIt.hasNext()) {
PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next();
if (committer.isCheckpointCommitted(pendingCheckpoint.subtaskId, pendingCheckpoint.checkpointId)) {
pendingCheckpoint.stateHandle.discardState();
pendingCheckpointIt.remove();
}}
}
}
| 3.26 |
flink_Tuple5_setFields_rdh
|
/**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4) {
this.f0 = f0;this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
}
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4), where the
* individual fields are the value returned by calling {@link Object#toString}
| 3.26 |
flink_Tuple5_of_rdh
|
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4> Tuple5<T0, T1, T2, T3, T4> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4) {
return new Tuple5<>(f0, f1, f2, f3, f4);
}
| 3.26 |
flink_Tuple5_equals_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple5)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple5 tuple = ((Tuple5) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return
false;
}
if (f1 != null
? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;}
return true;
}
| 3.26 |
flink_AbstractPartialSolutionNode_copyEstimates_rdh
|
// --------------------------------------------------------------------------------------------
protected void copyEstimates(OptimizerNode node) {
this.estimatedNumRecords = node.estimatedNumRecords;
this.estimatedOutputSize = node.estimatedOutputSize;
}
| 3.26 |
flink_AbstractPartialSolutionNode_isOnDynamicPath_rdh
|
// --------------------------------------------------------------------------------------------
public boolean isOnDynamicPath() {
return
true;
}
| 3.26 |
flink_ReOpenableHashPartition_restorePartitionBuffers_rdh
|
/**
* This method is called every time a multi-match hash map is opened again for a new probe
* input.
*
* @param ioManager
* @param availableMemory
* @throws IOException
*/
void restorePartitionBuffers(IOManager ioManager, List<MemorySegment> availableMemory)
throws IOException {
final BulkBlockChannelReader reader = ioManager.createBulkBlockChannelReader(this.initialBuildSideChannel, availableMemory, this.initialPartitionBuffersCount);
reader.close();
final List<MemorySegment> partitionBuffersFromDisk = reader.getFullSegments();
this.partitionBuffers = ((MemorySegment[]) (partitionBuffersFromDisk.toArray(new MemorySegment[partitionBuffersFromDisk.size()])));this.overflowSegments = new MemorySegment[2];
this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
this.isRestored = true;
}
| 3.26 |
flink_ReOpenableHashPartition_spillInMemoryPartition_rdh
|
/**
* Spills this partition to disk. This method is invoked once after the initial open() method
*
* @return Number of memorySegments in the writeBehindBuffers!
*/
int spillInMemoryPartition(FileIOChannel.ID targetChannel, IOManager ioManager, LinkedBlockingQueue<MemorySegment> writeBehindBuffers) throws IOException {
this.initialPartitionBuffersCount = partitionBuffers.length;// for ReOpenableHashMap
this.initialBuildSideChannel = targetChannel;
initialBuildSideWriter = ioManager.createBlockChannelWriter(targetChannel, writeBehindBuffers);
final int numSegments = this.partitionBuffers.length;
for (int i = 0; i < numSegments; i++) {
initialBuildSideWriter.writeBlock(partitionBuffers[i]);
}
this.partitionBuffers = null;
initialBuildSideWriter.close();
// num partitions are now in the writeBehindBuffers. We propagate this information back
return numSegments;}
| 3.26 |
flink_SortMergeSubpartitionReader_unsynchronizedGetNumberOfQueuedBuffers_rdh
|
// suppress warning as this method is only for unsafe purpose.
@SuppressWarnings("FieldAccessNotGuarded")
@Override
public int unsynchronizedGetNumberOfQueuedBuffers() {
return buffersRead.size();
}
| 3.26 |
flink_HiveParserProjectWindowTrimmer_trimProjectWindow_rdh
|
/**
* Remove the redundant nodes from the project node which contains over window node.
*
* @param selectProject
* the project node contains selected fields in top of the project node
* with window
* @param projectWithWindow
* the project node which contains windows in the end of project
* expressions.
* @return the new project node after trimming
*/
public static RelNode trimProjectWindow(Project selectProject, Project projectWithWindow, Map<RelNode, HiveParserRowResolver> relToRowResolver, Map<RelNode, Map<String, Integer>> relToHiveColNameCalcitePosMap) {
// get the over window nodes
List<RexOver> rexOverList = projectWithWindow.getProjects().stream().filter(node -> node instanceof RexOver).map(node -> ((RexOver) (node))).collect(Collectors.toList());
// the fields size excluding the over window field in the project node with window
int windowInputColumn = projectWithWindow.getProjects().size() - rexOverList.size();
// find all field referred by over window and select project node
final ImmutableBitSet beReferred = findReference(selectProject, rexOverList, windowInputColumn);
// If all the input columns are referred,
// it is impossible to trim anyone of them out
if (beReferred.cardinality() == windowInputColumn) {
return selectProject;
}
// Keep only the fields which are referred and the over window field
final List<RexNode> exps = new ArrayList<>();
final RelDataTypeFactory.Builder builder = projectWithWindow.getCluster().getTypeFactory().builder();
final List<RelDataTypeField> rowTypeWindowInput = projectWithWindow.getRowType().getFieldList();
// add index for referred field
List<Integer> remainIndexInProjectWindow = new ArrayList<>(beReferred.asList());
// add index for the over window field
remainIndexInProjectWindow.addAll(IntStream.range(windowInputColumn, projectWithWindow.getProjects().size()).boxed().collect(Collectors.toList()));
for (int v7
: remainIndexInProjectWindow)
{
exps.add(projectWithWindow.getProjects().get(v7));
builder.add(rowTypeWindowInput.get(v7));
}
// As the un-referred columns are trimmed,
// the indices specified in select project would need to be adjusted
final RexShuttle indexAdjustment = new RexShuttle() {
@Override
public RexNode visitInputRef(RexInputRef inputRef) {
final int newIndex = getAdjustedIndex(inputRef.getIndex(), beReferred, windowInputColumn);
return new RexInputRef(newIndex, inputRef.getType());
}
};
// adjust the top select project node
final List<RexNode> topProjExps = indexAdjustment.visitList(selectProject.getProjects());
// create a project with the project trimmed
LogicalProject trimmedProject = LogicalProject.create(projectWithWindow.getInput(), Collections.emptyList(), exps, builder.build());
// put row resolver for newly trimmed project node
HiveParserRowResolver v12 = relToRowResolver.remove(projectWithWindow);
if (v12 != null) {
HiveParserRowResolver newProjectRR = new HiveParserRowResolver();
List<ColumnInfo> v14 = v12.getColumnInfos();
for (int index : remainIndexInProjectWindow) {
newProjectRR.put(v14.get(index).getTabAlias(), v14.get(index).getAlias(), v14.get(index));
}
relToRowResolver.put(trimmedProject, newProjectRR);
relToHiveColNameCalcitePosMap.remove(projectWithWindow);
relToHiveColNameCalcitePosMap.put(trimmedProject, buildHiveToCalciteColumnMap(newProjectRR));
}
// create new project with adjusted field ref
RelNode newProject = LogicalProject.create(trimmedProject, Collections.emptyList(), topProjExps, selectProject.getRowType());
// put row resolver for newly project node
relToRowResolver.put(newProject, relToRowResolver.remove(selectProject));
relToHiveColNameCalcitePosMap.put(newProject, relToHiveColNameCalcitePosMap.remove(selectProject));
return newProject;
}
| 3.26 |
flink_ShortSummaryAggregator_max_rdh
|
/**
* Like Math.max() except for shorts.
*/
public static Short max(Short a, Short b) {
return a >= b ? a : b;
}
| 3.26 |
flink_ShortSummaryAggregator_min_rdh
|
/**
* Like Math.min() except for shorts.
*/
public static Short min(Short a, Short
b) {
return a <= b ? a : b;
}
| 3.26 |
flink_RocksDBHeapTimersFullRestoreOperation_restore_rdh
|
/**
* Restores all key-groups data that is referenced by the passed state handles.
*/
@Override
public RocksDBRestoreResult restore() throws IOException, StateMigrationException, RocksDBException {
rocksHandle.openDB();
try (ThrowingIterator<SavepointRestoreResult> restore
= savepointRestoreOperation.restore()) {
while (restore.hasNext()) {
applyRestoreResult(restore.next());
}
}
return new RocksDBRestoreResult(this.rocksHandle.getDb(), this.rocksHandle.getDefaultColumnFamilyHandle(), this.rocksHandle.getNativeMetricMonitor(), -1, null, null); }
| 3.26 |
flink_RocksDBHeapTimersFullRestoreOperation_restoreKVStateData_rdh
|
/**
* Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state
* handle.
*/
private void restoreKVStateData(ThrowingIterator<KeyGroup> keyGroups, Map<Integer, ColumnFamilyHandle> columnFamilies, Map<Integer, HeapPriorityQueueSnapshotRestoreWrapper<?>> restoredPQStates) throws IOException, RocksDBException, StateMigrationException {
// for all key-groups in the current state handle...
try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) {HeapPriorityQueueSnapshotRestoreWrapper<HeapPriorityQueueElement> restoredPQ = null;
ColumnFamilyHandle handle = null;
while (keyGroups.hasNext()) {
KeyGroup keyGroup = keyGroups.next();
try (ThrowingIterator<KeyGroupEntry> groupEntries = keyGroup.getKeyGroupEntries()) {
int oldKvStateId = -1;while (groupEntries.hasNext()) {
KeyGroupEntry groupEntry = groupEntries.next();
int kvStateId = groupEntry.getKvStateId();
if (kvStateId != oldKvStateId) {
oldKvStateId = kvStateId;
handle = columnFamilies.get(kvStateId);
restoredPQ = getRestoredPQ(restoredPQStates, kvStateId);
}
if (restoredPQ != null) {
restoreQueueElement(restoredPQ, groupEntry);
} else if (handle != null) {
writeBatchWrapper.put(handle, groupEntry.getKey(), groupEntry.getValue());
} else {
throw new IllegalStateException("Unknown state id: " + kvStateId);
}
}
}
}
}
}
| 3.26 |
flink_OperatorInfo_getIds_rdh
|
// ------------------------------------------------------------------------
// utils
// ------------------------------------------------------------------------
static Collection<OperatorID> getIds(Collection<? extends OperatorInfo> infos) {
return infos.stream().map(OperatorInfo::operatorId).collect(Collectors.toList());
}
| 3.26 |
flink_Plugin_configure_rdh
|
/**
* Optional method for plugins to pick up settings from the configuration.
*
* @param config
* The configuration to apply to the plugin.
*/
default void configure(Configuration config) {
}
| 3.26 |
flink_KvStateService_start_rdh
|
// --------------------------------------------------------------------------------------------
// Start and shut down methods
// --------------------------------------------------------------------------------------------
public void start() {
synchronized(lock) {
Preconditions.checkState(!isShutdown, "The KvStateService has already been shut down.");
LOG.info("Starting the kvState service and its components.");
if (kvStateServer != null) {
try {
kvStateServer.start();
} catch (Throwable ie) {
kvStateServer.shutdown();
kvStateServer = null;
LOG.error("Failed to start the Queryable State Data Server.", ie);
}
}
if (kvStateClientProxy != null) {
try {
kvStateClientProxy.start();
} catch (Throwable ie) {
kvStateClientProxy.shutdown();
kvStateClientProxy = null;
LOG.error("Failed to start the Queryable State Client Proxy.", ie);
}
}
}
}
| 3.26 |
flink_KvStateService_getKvStateRegistry_rdh
|
// --------------------------------------------------------------------------------------------
// Getter/Setter
// --------------------------------------------------------------------------------------------
public KvStateRegistry getKvStateRegistry() {
return kvStateRegistry;
}
| 3.26 |
flink_KvStateService_m0_rdh
|
// --------------------------------------------------------------------------------------------
// Static factory methods for kvState service
// --------------------------------------------------------------------------------------------
/**
* Creates and returns the KvState service.
*
* @param taskManagerServicesConfiguration
* task manager configuration
* @return service for kvState related components
*/
public static KvStateService m0(TaskManagerServicesConfiguration taskManagerServicesConfiguration) {
KvStateRegistry kvStateRegistry = new KvStateRegistry();
QueryableStateConfiguration qsConfig = taskManagerServicesConfiguration.getQueryableStateConfig();
KvStateClientProxy v2 = null;
KvStateServer kvStateServer = null;
if (qsConfig != null) {
int
numProxyServerNetworkThreads = (qsConfig.numProxyServerThreads() == 0) ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numProxyServerThreads();
int numProxyServerQueryThreads = (qsConfig.numProxyQueryThreads() == 0) ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numProxyQueryThreads();
v2 = QueryableStateUtils.createKvStateClientProxy(taskManagerServicesConfiguration.getExternalAddress(), qsConfig.getProxyPortRange(), numProxyServerNetworkThreads, numProxyServerQueryThreads, new DisabledKvStateRequestStats());
int numStateServerNetworkThreads = (qsConfig.numStateServerThreads() == 0) ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numStateServerThreads();
int numStateServerQueryThreads = (qsConfig.numStateQueryThreads() == 0) ? taskManagerServicesConfiguration.getNumberOfSlots() : qsConfig.numStateQueryThreads();
kvStateServer = QueryableStateUtils.createKvStateServer(taskManagerServicesConfiguration.getExternalAddress(), qsConfig.getStateServerPortRange(), numStateServerNetworkThreads, numStateServerQueryThreads, kvStateRegistry, new DisabledKvStateRequestStats());
}
return new
KvStateService(kvStateRegistry, kvStateServer, v2);
}
| 3.26 |
flink_CoGroupNode_getOperator_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the operator for this CoGroup node.
*
* @return The CoGroup operator.
*/
@Override
public CoGroupOperatorBase<?, ?, ?, ?> getOperator() {
return ((CoGroupOperatorBase<?, ?, ?, ?>) (super.getOperator()));
}
| 3.26 |
flink_FunctionCatalogOperatorTable_verifyFunctionKind_rdh
|
/**
* Verifies which kinds of functions are allowed to be returned from the catalog given the
* context information.
*/
private boolean verifyFunctionKind(@Nullable
SqlFunctionCategory category, ContextResolvedFunction resolvedFunction) {
final FunctionDefinition definition = resolvedFunction.getDefinition();
// built-in functions without implementation are handled separately
if (definition instanceof BuiltInFunctionDefinition) {
final BuiltInFunctionDefinition builtInFunction = ((BuiltInFunctionDefinition) (definition));
if (!builtInFunction.hasRuntimeImplementation()) { return false;
}
}
final FunctionKind kind = definition.getKind();
if (kind == FunctionKind.TABLE) {
return true;
} else if (((kind == FunctionKind.SCALAR) || (kind
== FunctionKind.AGGREGATE)) || (kind == FunctionKind.TABLE_AGGREGATE)) {
if ((category != null) && category.isTableFunction()) {
throw new ValidationException(String.format("Function '%s' cannot be used as a table function.", resolvedFunction));
}
return true;
}
return false;
}
| 3.26 |
flink_HadoopInputSplit_m0_rdh
|
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public InputSplit m0() {
return mapreduceInputSplit;
}
| 3.26 |
flink_HadoopInputSplit_writeObject_rdh
|
// ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
private void
writeObject(ObjectOutputStream out) throws IOException {
// serialize the parent fields and the final fields
out.defaultWriteObject();
// write the input split
((Writable) (mapreduceInputSplit)).write(out);
}
| 3.26 |
flink_TaskKvStateRegistry_unregisterAll_rdh
|
/**
* Unregisters all registered KvState instances from the KvStateRegistry.
*/
public void unregisterAll() {
for (KvStateInfo kvState : registeredKvStates) {
registry.unregisterKvState(jobId, jobVertexId, kvState.keyGroupRange, kvState.registrationName, kvState.f0);
}
}
| 3.26 |
flink_TaskKvStateRegistry_registerKvState_rdh
|
/**
* Registers the KvState instance at the KvStateRegistry.
*
* @param keyGroupRange
* Key group range the KvState instance belongs to
* @param registrationName
* The registration name (not necessarily the same as the KvState name
* defined in the state descriptor used to create the KvState instance)
* @param kvState
* The
*/
public void registerKvState(KeyGroupRange keyGroupRange, String registrationName, InternalKvState<?, ?, ?> kvState, ClassLoader userClassLoader) {
KvStateID kvStateId = registry.registerKvState(jobId, jobVertexId, keyGroupRange, registrationName, kvState, userClassLoader);
registeredKvStates.add(new KvStateInfo(keyGroupRange, registrationName, kvStateId));
}
| 3.26 |
flink_DCounter_getMetricValue_rdh
|
/**
* Returns the count of events since the last report.
*
* @return the number of events since the last retrieval
*/
@Override
public Number
getMetricValue() {
long currentCount = counter.getCount();
long difference = currentCount - f0;
currentReportCount = currentCount;
return difference;
}
| 3.26 |
flink_JavaFieldPredicates_ofType_rdh
|
/**
* Match the {@link Class} of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link JavaField} has the same type of the given {@code clazz}.
*/
public static DescribedPredicate<JavaField> ofType(String
fqClassName) {
String className = getClassSimpleNameFromFqName(fqClassName);
return DescribedPredicate.describe("of type " + className, field -> field.getType().getName().equals(fqClassName));
}
| 3.26 |
flink_JavaFieldPredicates_isPublic_rdh
|
/**
* Fine-grained predicates focus on the JavaField.
*
* <p>NOTE: it is recommended to use methods that accept fully qualified class names instead of
* {@code Class} objects to reduce the risks of introducing circular dependencies between the
* project submodules.
*/public class JavaFieldPredicates {
/**
* Match the public modifier of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link JavaField} has the public modifier.
*/
public static DescribedPredicate<JavaField> isPublic() {
return DescribedPredicate.describe("public", field -> field.getModifiers().contains(JavaModifier.PUBLIC));
}
| 3.26 |
flink_JavaFieldPredicates_isStatic_rdh
|
/**
* Match the static modifier of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link JavaField} has the static modifier.
*/
public static DescribedPredicate<JavaField> isStatic() {
return DescribedPredicate.describe("static", field -> field.getModifiers().contains(JavaModifier.STATIC));
}
| 3.26 |
flink_JavaFieldPredicates_annotatedWith_rdh
|
/**
* Match the single Annotation of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link JavaField} has exactly the given Annotation {@code annotationType}.
*/
public static DescribedPredicate<JavaField> annotatedWith(Class<? extends Annotation> annotationType) {
return matchAnnotationType(annotationType.getSimpleName(), annotation -> annotation.getRawType().isEquivalentTo(annotationType));
}
/**
* Match the single Annotation of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if the tested {@link JavaField} is
annotated with the annotation identified by the fully qualified name {@code fqAnnotationTypeName}
| 3.26 |
flink_JavaFieldPredicates_isNotStatic_rdh
|
/**
* Match none static modifier of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link JavaField} has no static modifier.
*/
public static DescribedPredicate<JavaField> isNotStatic() {
return DescribedPredicate.describe("not static", field -> !field.getModifiers().contains(JavaModifier.STATIC));
}
/**
* Match the final modifier of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link JavaField}
| 3.26 |
flink_JavaFieldPredicates_isAssignableTo_rdh
|
/**
* Match the {@link Class} of the {@link JavaField}'s assignability.
*
* @param clazz
* the Class type to check for assignability
* @return a {@link DescribedPredicate} that returns {@code true}, if the respective {@link JavaField} is assignable to the supplied {@code clazz}.
*/
public static DescribedPredicate<JavaField> isAssignableTo(Class<?> clazz) {
return DescribedPredicate.describe("is assignable to " +
clazz.getSimpleName(), field -> field.getRawType().isAssignableTo(clazz));
}
| 3.26 |
flink_CatalogCalciteSchema_getSubSchema_rdh
|
/**
* Look up a sub-schema (database) by the given sub-schema name.
*
* @param schemaName
* name of sub-schema to look up
* @return the sub-schema with a given database name, or null
*/
@Override
public Schema getSubSchema(String schemaName) {
if (catalogManager.schemaExists(catalogName, schemaName)) {
if (getSchemaVersion().isPresent()) {
return new
DatabaseCalciteSchema(catalogName, schemaName, catalogManager,
isStreamingMode).snapshot(getSchemaVersion().get());
} else {
return new DatabaseCalciteSchema(catalogName, schemaName, catalogManager, isStreamingMode);
}
} else {
return null;
}
}
| 3.26 |
flink_ProducerMergedPartitionFileReader_lazyInitializeFileChannel_rdh
|
/**
* Initialize the file channel in a lazy manner, which can reduce usage of the file descriptor
* resource.
*/
private void lazyInitializeFileChannel() {
if (fileChannel == null) {
try {
fileChannel = FileChannel.open(dataFilePath, StandardOpenOption.READ);
} catch (IOException e) {
ExceptionUtils.rethrow(e, "Failed to open file channel.");
}
}
}
| 3.26 |
flink_LastDatedValueFunction_accumulate_rdh
|
/**
* Generic runtime function that will be called with different kind of instances for {@code input} depending on actual call in the query.
*/
public void accumulate(Accumulator<T> acc, T input, LocalDate date) {
if ((input != null) && ((acc.date == null) || date.isAfter(acc.date))) {
acc.value = input;
acc.date = date;
}
}
| 3.26 |
flink_LastDatedValueFunction_getTypeInference_rdh
|
// Planning
// --------------------------------------------------------------------------------------------
/**
* Declares the {@link TypeInference} of this function. It specifies:
*
* <ul>
* <li>which argument types are supported when calling this function,
* <li>which {@link DataType#getConversionClass()} should be used when calling the JVM method
* {@link #accumulate(Accumulator, Object, LocalDate)} during runtime,
* <li>a similar strategy how to derive an accumulator type,
* <li>and a similar strategy how to derive the output type.
* </ul>
*/
@Override
public TypeInference getTypeInference(DataTypeFactory typeFactory) {
return // let the output data type depend on the first input argument
// let the accumulator data type depend on the first input argument
// accept a signature (ANY, DATE) both with default conversion classes,
// the input type strategy is mostly used to produce nicer validation exceptions
// during planning, implementers can decide to skip it if they are fine with failing
// at a later stage during code generation when the runtime method is checked
TypeInference.newBuilder().inputTypeStrategy(InputTypeStrategies.sequence(InputTypeStrategies.ANY, InputTypeStrategies.explicit(DataTypes.DATE()))).accumulatorTypeStrategy(callContext -> {
final DataType argDataType
= callContext.getArgumentDataTypes().get(0);
final DataType accDataType = DataTypes.STRUCTURED(Accumulator.class,
DataTypes.FIELD("value", argDataType), DataTypes.FIELD("date", DataTypes.DATE()));
return Optional.of(accDataType);
}).outputTypeStrategy(callContext -> {
final DataType v2 =
callContext.getArgumentDataTypes().get(0);
final DataType outputDataType = DataTypes.ROW(DataTypes.FIELD("value", v2), DataTypes.FIELD("date", DataTypes.DATE()));
return Optional.of(outputDataType);
}).build();
}
| 3.26 |
flink_DynamicEventTimeSessionWindows_mergeWindows_rdh
|
/**
* Merge overlapping {@link TimeWindow}s.
*/
@Override
public void mergeWindows(Collection<TimeWindow> windows, MergeCallback<TimeWindow> c) {
TimeWindow.mergeWindows(windows, c);
}
| 3.26 |
flink_DynamicEventTimeSessionWindows_withDynamicGap_rdh
|
/**
* Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions
* based on the element timestamp.
*
* @param sessionWindowTimeGapExtractor
* The extractor to use to extract the time gap from the
* input elements
* @return The policy.
*/public static <T> DynamicEventTimeSessionWindows<T> withDynamicGap(SessionWindowTimeGapExtractor<T> sessionWindowTimeGapExtractor) {
return new DynamicEventTimeSessionWindows<>(sessionWindowTimeGapExtractor);
}
| 3.26 |
flink_TaskManagerOptions_loadFromConfiguration_rdh
|
/**
* The method is mainly to load the {@link TaskManagerOptions#TASK_MANAGER_LOAD_BALANCE_MODE} from {@link Configuration}, which is
* compatible with {@link ClusterOptions#EVENLY_SPREAD_OUT_SLOTS_STRATEGY}.
*/
public static TaskManagerLoadBalanceMode loadFromConfiguration(@Nonnull
Configuration configuration) {
Optional<TaskManagerLoadBalanceMode> taskManagerLoadBalanceModeOptional = configuration.getOptional(TaskManagerOptions.TASK_MANAGER_LOAD_BALANCE_MODE);
if (taskManagerLoadBalanceModeOptional.isPresent()) {
return taskManagerLoadBalanceModeOptional.get();
}
boolean v1 = configuration.getBoolean(ClusterOptions.EVENLY_SPREAD_OUT_SLOTS_STRATEGY);
return v1 ? TaskManagerLoadBalanceMode.SLOTS : TaskManagerOptions.TASK_MANAGER_LOAD_BALANCE_MODE.defaultValue();
}
| 3.26 |
flink_StateAssignmentOperation_extractIntersectingState_rdh
|
/**
* Extracts certain key group ranges from the given state handles and adds them to the
* collector.
*/
@VisibleForTesting
public static void extractIntersectingState(Collection<? extends KeyedStateHandle> originalSubtaskStateHandles, KeyGroupRange rangeToExtract, List<KeyedStateHandle> extractedStateCollector) {
for (KeyedStateHandle keyedStateHandle : originalSubtaskStateHandles) {
if (keyedStateHandle != null) {KeyedStateHandle intersectedKeyedStateHandle = keyedStateHandle.getIntersection(rangeToExtract);
if (intersectedKeyedStateHandle != null) {
extractedStateCollector.add(intersectedKeyedStateHandle);
}
}
}
}
| 3.26 |
flink_StateAssignmentOperation_checkParallelismPreconditions_rdh
|
/**
* Verifies conditions in regards to parallelism and maxParallelism that must be met when
* restoring state.
*
* @param operatorState
* state to restore
* @param executionJobVertex
* task for which the state should be restored
*/
private static void checkParallelismPreconditions(OperatorState operatorState, ExecutionJobVertex executionJobVertex) {
// ----------------------------------------max parallelism
// preconditions-------------------------------------
if (operatorState.getMaxParallelism() < executionJobVertex.getParallelism()) {
throw new IllegalStateException(((((("The state for task " + executionJobVertex.getJobVertexId()) +
" can not be restored. The maximum parallelism (") + operatorState.getMaxParallelism()) + ") of the restored state is lower than the configured parallelism (") + executionJobVertex.getParallelism()) + "). Please reduce the parallelism of the task to be lower or equal to the maximum parallelism.");
}
// check that the number of key groups have not changed or if we need to override it to
// satisfy the restored state
if (operatorState.getMaxParallelism() != executionJobVertex.getMaxParallelism()) {
if (executionJobVertex.canRescaleMaxParallelism(operatorState.getMaxParallelism())) {
LOG.debug("Rescaling maximum parallelism for JobVertex {} from {} to {}", executionJobVertex.getJobVertexId(), executionJobVertex.getMaxParallelism(), operatorState.getMaxParallelism());
executionJobVertex.setMaxParallelism(operatorState.getMaxParallelism());
} else {
// if the max parallelism cannot be rescaled, we complain on mismatch
throw new IllegalStateException(((((((("The maximum parallelism (" + operatorState.getMaxParallelism())
+ ") with which the latest ") + "checkpoint of the execution job vertex ") + executionJobVertex) + " has been taken and the current maximum parallelism (") + executionJobVertex.getMaxParallelism()) + ") changed. This ") + "is currently not supported.");
}
}
}
| 3.26 |
flink_StateAssignmentOperation_createKeyGroupPartitions_rdh
|
/**
* Groups the available set of key groups into key group partitions. A key group partition is
* the set of key groups which is assigned to the same task. Each set of the returned list
* constitutes a key group partition.
*
* <p><b>IMPORTANT</b>: The assignment of key groups to partitions has to be in sync with the
* KeyGroupStreamPartitioner.
*
* @param numberKeyGroups
* Number of available key groups (indexed from 0 to numberKeyGroups - 1)
* @param parallelism
* Parallelism to generate the key group partitioning for
* @return List of key group partitions
*/
public static List<KeyGroupRange> createKeyGroupPartitions(int numberKeyGroups, int parallelism) {
Preconditions.checkArgument(numberKeyGroups >= parallelism);
List<KeyGroupRange> result = new ArrayList<>(parallelism);
for
(int i = 0; i < parallelism; ++i) {
result.add(KeyGroupRangeAssignment.computeKeyGroupRangeForOperatorIndex(numberKeyGroups, parallelism, i));
}
return result;
}
| 3.26 |
flink_StateAssignmentOperation_getRawKeyedStateHandles_rdh
|
/**
* Collect {@link KeyGroupsStateHandle rawKeyedStateHandles} which have intersection with given
* {@link KeyGroupRange} from {@link TaskState operatorState}.
*
* @param operatorState
* all state handles of a operator
* @param subtaskKeyGroupRange
* the KeyGroupRange of a subtask
* @return all rawKeyedStateHandles which have intersection with given KeyGroupRange
*/
public static List<KeyedStateHandle> getRawKeyedStateHandles(OperatorState operatorState, KeyGroupRange subtaskKeyGroupRange) {
final int v55 = operatorState.getParallelism();List<KeyedStateHandle> extractedKeyedStateHandles = null;
for (int i = 0; i < v55; i++) {
if (operatorState.getState(i) != null) {
Collection<KeyedStateHandle> rawKeyedState = operatorState.getState(i).getRawKeyedState();
if (extractedKeyedStateHandles == null) {
extractedKeyedStateHandles = new ArrayList<>(v55 * rawKeyedState.size());
}
extractIntersectingState(rawKeyedState, subtaskKeyGroupRange, extractedKeyedStateHandles);
}
}
return extractedKeyedStateHandles != null ? extractedKeyedStateHandles : emptyList();
}
| 3.26 |
flink_StateAssignmentOperation_reAssignSubKeyedStates_rdh
|
// TODO rewrite based on operator id
private Tuple2<List<KeyedStateHandle>, List<KeyedStateHandle>> reAssignSubKeyedStates(OperatorState operatorState, List<KeyGroupRange> keyGroupPartitions, int subTaskIndex, int newParallelism, int oldParallelism) {List<KeyedStateHandle> subManagedKeyedState;
List<KeyedStateHandle> subRawKeyedState;
if (newParallelism == oldParallelism) {
if (operatorState.getState(subTaskIndex) != null) {
subManagedKeyedState = operatorState.getState(subTaskIndex).getManagedKeyedState().asList();
subRawKeyedState = operatorState.getState(subTaskIndex).getRawKeyedState().asList();
} else {
subManagedKeyedState = emptyList();
subRawKeyedState =
emptyList();
}
} else {
subManagedKeyedState = getManagedKeyedStateHandles(operatorState, keyGroupPartitions.get(subTaskIndex));
subRawKeyedState = getRawKeyedStateHandles(operatorState, keyGroupPartitions.get(subTaskIndex));
}
if (subManagedKeyedState.isEmpty() && subRawKeyedState.isEmpty()) {return new Tuple2<>(emptyList(), emptyList());
} else {
return new Tuple2<>(subManagedKeyedState, subRawKeyedState);
}
}
| 3.26 |
flink_StateAssignmentOperation_getManagedKeyedStateHandles_rdh
|
/**
* Collect {@link KeyGroupsStateHandle managedKeyedStateHandles} which have intersection with
* given {@link KeyGroupRange} from {@link TaskState operatorState}.
*
* @param operatorState
* all state handles of a operator
* @param subtaskKeyGroupRange
* the KeyGroupRange of a subtask
* @return all managedKeyedStateHandles which have intersection with given KeyGroupRange
*/
public static List<KeyedStateHandle> getManagedKeyedStateHandles(OperatorState operatorState, KeyGroupRange subtaskKeyGroupRange) {
final int parallelism = operatorState.getParallelism();
List<KeyedStateHandle> subtaskKeyedStateHandles = null;
for (int i = 0; i < parallelism; i++) {
if (operatorState.getState(i) != null) {
Collection<KeyedStateHandle> keyedStateHandles = operatorState.getState(i).getManagedKeyedState();
if (subtaskKeyedStateHandles == null) {
subtaskKeyedStateHandles = new ArrayList<>(parallelism * keyedStateHandles.size());
}
extractIntersectingState(keyedStateHandles, subtaskKeyGroupRange, subtaskKeyedStateHandles);
}
}
return subtaskKeyedStateHandles != null ? subtaskKeyedStateHandles : emptyList();
}
| 3.26 |
flink_StateAssignmentOperation_applyRepartitioner_rdh
|
/**
* Repartitions the given operator state using the given {@link OperatorStateRepartitioner} with
* respect to the new parallelism.
*
* @param opStateRepartitioner
* partitioner to use
* @param chainOpParallelStates
* state to repartition
* @param oldParallelism
* parallelism with which the state is currently partitioned
* @param newParallelism
* parallelism with which the state should be partitioned
* @return repartitioned state
*/// TODO rewrite based on operator id
public static <T> List<List<T>> applyRepartitioner(OperatorStateRepartitioner<T> opStateRepartitioner, List<List<T>> chainOpParallelStates, int oldParallelism, int newParallelism) {
if (chainOpParallelStates == null) {
return emptyList();
}
return opStateRepartitioner.repartitionState(chainOpParallelStates, oldParallelism, newParallelism);
}
| 3.26 |
flink_StateAssignmentOperation_m1_rdh
|
/**
* Verifies that all operator states can be mapped to an execution job vertex.
*
* @param allowNonRestoredState
* if false an exception will be thrown if a state could not be
* mapped
* @param operatorStates
* operator states to map
* @param tasks
* task to map to
*/
private static void m1(boolean allowNonRestoredState, Map<OperatorID, OperatorState> operatorStates, Set<ExecutionJobVertex> tasks) {
Set<OperatorID> allOperatorIDs = new HashSet<>();
for (ExecutionJobVertex executionJobVertex : tasks) {
for (OperatorIDPair v65 : executionJobVertex.getOperatorIDs()) {
allOperatorIDs.add(v65.getGeneratedOperatorID());
v65.getUserDefinedOperatorID().ifPresent(allOperatorIDs::add);
}
}
for (Map.Entry<OperatorID, OperatorState> operatorGroupStateEntry : operatorStates.entrySet()) {
// ----------------------------------------find operator for
// state---------------------------------------------
if (!allOperatorIDs.contains(operatorGroupStateEntry.getKey())) {
OperatorState operatorState = operatorGroupStateEntry.getValue();
if (allowNonRestoredState) {
LOG.info("Skipped checkpoint state for operator {}.", operatorState.getOperatorID());
} else {
throw new IllegalStateException("There is no operator for the state " + operatorState.getOperatorID());
}
}
}
}
| 3.26 |
flink_Conditions_fulfill_rdh
|
/**
* Generic condition to check fulfillment of a predicate.
*/
public static <T extends HasName> ArchCondition<T> fulfill(DescribedPredicate<T> predicate) {
return new ArchCondition<T>(predicate.getDescription()) {
@Override
public void check(T item, ConditionEvents events) {
if (!predicate.test(item)) {
final String message = String.format("%s does not satisfy: %s", item.getName(), predicate.getDescription()); events.add(SimpleConditionEvent.violated(item, message));
}
}
};
}
| 3.26 |
flink_Conditions_haveLeafExceptionTypes_rdh
|
/**
* Tests leaf exception types of a method against the given predicate.
*
* <p>See {@link #haveLeafTypes(DescribedPredicate)} for details.
*/
public static ArchCondition<JavaMethod> haveLeafExceptionTypes(DescribedPredicate<JavaClass> typePredicate) {
return
new ArchCondition<JavaMethod>("have leaf exception types" + typePredicate.getDescription()) {
@Override
public void check(JavaMethod method, ConditionEvents events) {
final List<JavaClass> leafArgumentTypes = method.getExceptionTypes().stream().flatMap(argumentType -> getLeafTypes(argumentType).stream()).collect(Collectors.toList());
for (JavaClass leafType : leafArgumentTypes) {
if (!isJavaClass(leafType)) {
continue;
}
if (!typePredicate.test(leafType))
{
final String message = String.format("%s: Exception leaf type %s does not satisfy: %s", method.getFullName(), leafType.getName(), typePredicate.getDescription());
events.add(SimpleConditionEvent.violated(method, message));
}
}
}
};
}
| 3.26 |
flink_Conditions_haveLeafReturnTypes_rdh
|
/**
* Tests leaf return types of a method against the given predicate.
*
* <p>See {@link #haveLeafTypes(DescribedPredicate)} for details.
*/
public static ArchCondition<JavaMethod> haveLeafReturnTypes(DescribedPredicate<JavaClass> typePredicate) {
return new ArchCondition<JavaMethod>("have leaf return types" + typePredicate.getDescription()) {
@Override
public void check(JavaMethod method, ConditionEvents events) {
for (JavaClass leafType : getLeafTypes(method.getReturnType())) {
if (!isJavaClass(leafType)) {
continue;
}if (!typePredicate.test(leafType)) {
final String message = String.format("%s: Returned leaf type %s does not satisfy: %s", method.getFullName(), leafType.getName(), typePredicate.getDescription()); events.add(SimpleConditionEvent.violated(method, message));
}
}
}
};
}
| 3.26 |
flink_Conditions_haveLeafArgumentTypes_rdh
|
/**
* Tests leaf argument types of a method against the given predicate.
*
* <p>See {@link #haveLeafTypes(DescribedPredicate)} for details.
*/
public static ArchCondition<JavaMethod> haveLeafArgumentTypes(DescribedPredicate<JavaClass> typePredicate) {
return new ArchCondition<JavaMethod>("have leaf argument types" + typePredicate.getDescription()) {
@Override
public void check(JavaMethod method, ConditionEvents events) {
final List<JavaClass> leafArgumentTypes = method.getParameterTypes().stream().flatMap(argumentType -> getLeafTypes(argumentType).stream()).collect(Collectors.toList());
for (JavaClass leafType
: leafArgumentTypes) {
if (!isJavaClass(leafType)) {
continue;
}if (!typePredicate.test(leafType)) {
final String message = String.format("%s: Argument leaf type %s does not satisfy: %s", method.getFullName(), leafType.getName(), typePredicate.getDescription());events.add(SimpleConditionEvent.violated(method, message));
}
}}
};
}
| 3.26 |
flink_Conditions_haveLeafTypes_rdh
|
/**
* Tests leaf types of a method against the given predicate.
*
* <p>Given some {@link JavaType}, "leaf" types are recursively determined as described below.
* Leaf types are taken from argument, return, and (declared) exception types.
*
* <ul>
* <li>If the type is an array type, check its base component type.
* <li>If the type is a generic type, check the type itself and all of its type arguments.
* <li>Otherwise, check just the type itself.
* </ul>
*/
public static ArchCondition<JavaMethod> haveLeafTypes(DescribedPredicate<JavaClass> typePredicate) {
return haveLeafReturnTypes(typePredicate).and(haveLeafArgumentTypes(typePredicate)).and(haveLeafExceptionTypes(typePredicate));
}
| 3.26 |
flink_PurgingTrigger_of_rdh
|
/**
* Creates a new purging trigger from the given {@code Trigger}.
*
* @param nestedTrigger
* The trigger that is wrapped by this purging trigger
*/
public static <T, W extends Window> PurgingTrigger<T, W> of(Trigger<T, W> nestedTrigger) {
return new PurgingTrigger<>(nestedTrigger);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.