name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_CatalogSchemaTable_getContextResolvedTable_rdh
|
// ~ Methods ----------------------------------------------------------------
public ContextResolvedTable getContextResolvedTable() {
return contextResolvedTable;
}
| 3.26 |
flink_DelegationTokenProvider_serviceConfigPrefix_rdh
|
/**
* Config prefix of the service.
*/
default String serviceConfigPrefix() {
return String.format("%s.%s", CONFIG_PREFIX, serviceName());
}
| 3.26 |
flink_RuntimeRestAPIVersion_isStableVersion_rdh
|
/**
* Returns whether this version is considered stable.
*
* @return whether this version is stable
*/
@Override
public boolean isStableVersion() {
return isStable;
}
| 3.26 |
flink_SinkModifyOperation_getTargetColumns_rdh
|
/**
* return null when no column list specified.
*/
@Nullable
public int[][] getTargetColumns() {
return targetColumns;
}
| 3.26 |
flink_DefaultLookupCache_maximumSize_rdh
|
/**
* Specifies the maximum number of entries of the cache.
*/
public Builder maximumSize(long maximumSize) {
this.maximumSize = maximumSize;
return this;
}
| 3.26 |
flink_DefaultLookupCache_newBuilder_rdh
|
/**
* Creates a builder for the cache.
*/
public static Builder newBuilder() {
return new Builder();
}
| 3.26 |
flink_DefaultLookupCache_expireAfterAccess_rdh
|
/**
* Specifies the duration after an entry is last accessed that it should be automatically
* removed.
*/
public Builder expireAfterAccess(Duration duration) {
expireAfterAccessDuration = duration;
return this;
}
| 3.26 |
flink_DefaultLookupCache_cacheMissingKey_rdh
|
/**
* Specifies whether to cache empty value into the cache.
*
* <p>Please note that "empty" means a collection without any rows in it instead of null.
* The cache will not accept any null key or value.
*/
public Builder cacheMissingKey(boolean cacheMissingKey) {
this.cacheMissingKey = cacheMissingKey;
return this;
}
| 3.26 |
flink_DefaultLookupCache_expireAfterWrite_rdh
|
/**
* Specifies the duration after an entry is created that it should be automatically removed.
*/
public Builder expireAfterWrite(Duration duration) {
expireAfterWriteDuration = duration;
return this;
}
| 3.26 |
flink_DefaultLookupCache_build_rdh
|
/**
* Creates the cache.
*/
public DefaultLookupCache build() {
return new DefaultLookupCache(expireAfterAccessDuration, expireAfterWriteDuration, maximumSize, cacheMissingKey);
}
| 3.26 |
flink_BooleanColumnSummary_getNonNullCount_rdh
|
/**
* The number of non-null values in this column.
*/
@Override
public long getNonNullCount() {
return trueCount + falseCount;
}
| 3.26 |
flink_RichAndCondition_getLeft_rdh
|
/**
*
* @return One of the {@link IterativeCondition conditions} combined in this condition.
*/
public IterativeCondition<T> getLeft() {
return getNestedConditions()[0];
}
| 3.26 |
flink_RichAndCondition_getRight_rdh
|
/**
*
* @return One of the {@link IterativeCondition conditions} combined in this condition.
*/
public IterativeCondition<T> getRight() {
return getNestedConditions()[1];
}
| 3.26 |
flink_SchemaValidator_deriveProctimeAttribute_rdh
|
/**
* Finds the proctime attribute if defined.
*/
public static Optional<String> deriveProctimeAttribute(DescriptorProperties properties) {
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
for (int i = 0; i < names.size(); i++) {Optional<Boolean> isProctime = properties.getOptionalBoolean((((SCHEMA + ".") + i) + ".") + SCHEMA_PROCTIME);
if (isProctime.isPresent() && isProctime.get()) {
return Optional.of(names.get((((SCHEMA + ".") + i) + ".") + SCHEMA_NAME));
}
}
return Optional.empty();
}
| 3.26 |
flink_SchemaValidator_deriveFieldMapping_rdh
|
/**
* Finds a table source field mapping.
*
* @param properties
* The properties describing a schema.
* @param inputType
* The input type that a connector and/or format produces. This parameter can
* be used to resolve a rowtime field against an input field.
*/
public static Map<String, String> deriveFieldMapping(DescriptorProperties properties, Optional<TypeInformation<?>> inputType) {
Map<String, String> mapping = new HashMap<>();
TableSchema
schema = properties.getTableSchema(SCHEMA);
List<String> columnNames = new ArrayList<>();
inputType.ifPresent(t -> columnNames.addAll(Arrays.asList(((CompositeType) (t)).getFieldNames())));
// add all source fields first because rowtime might reference one of them
columnNames.forEach(name -> mapping.put(name, name));
// add all schema fields first for implicit mappings
Arrays.stream(schema.getFieldNames()).forEach(name -> mapping.put(name, name));
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
for (int i = 0; i < names.size(); i++) {
String name = properties.getString((((SCHEMA + ".") + i) + ".") + SCHEMA_NAME);
Optional<String> source
= properties.getOptionalString((((SCHEMA + ".") + i) + ".") +
SCHEMA_FROM);
if (source.isPresent()) {
// add explicit mapping
mapping.put(name, source.get());
} else {
// implicit mapping or time
boolean isProctime = properties.getOptionalBoolean((((SCHEMA + ".") + i) + ".") + SCHEMA_PROCTIME).orElse(false);
boolean isRowtime =
properties.containsKey((((SCHEMA + ".") + i) + ".") + ROWTIME_TIMESTAMPS_TYPE);
boolean isGeneratedColumn = properties.containsKey((((SCHEMA +
".") + i) +
".") + EXPR);
// remove proctime/rowtime from mapping
if ((isProctime || isRowtime) || isGeneratedColumn) {
mapping.remove(name);
} else if (!columnNames.contains(name)) {
throw new ValidationException(format("Could not map the schema field '%s' to a field " + "from source. Please specify the source field from which it can be derived.", name));
}
}
}
return mapping;
}
| 3.26 |
flink_SchemaValidator_deriveRowtimeAttributes_rdh
|
/**
* Finds the rowtime attributes if defined.
*/
public static List<RowtimeAttributeDescriptor> deriveRowtimeAttributes(DescriptorProperties properties) {
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
List<RowtimeAttributeDescriptor> attributes = new ArrayList<>();
// check for rowtime in every field
for (int i = 0; i < names.size(); i++) {
Optional<Tuple2<TimestampExtractor, WatermarkStrategy>> rowtimeComponents = RowtimeValidator.getRowtimeComponents(properties, ((SCHEMA + ".") + i) + ".");
int index = i;
// create descriptor
rowtimeComponents.ifPresent(tuple2 -> attributes.add(new RowtimeAttributeDescriptor(properties.getString((((SCHEMA + ".") + index) + ".") + SCHEMA_NAME), tuple2.f0, tuple2.f1)));
}
return attributes;
}
| 3.26 |
flink_CharValue_setValue_rdh
|
/**
* Sets the encapsulated char to the specified value.
*
* @param value
* the new value of the encapsulated char.
*/
public void setValue(char value) {
this.value = value;
}
| 3.26 |
flink_CharValue_compareTo_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int compareTo(CharValue o) {
final int other = o.value;
return this.value < other ? -1 : this.value > other ? 1 : 0;
}
| 3.26 |
flink_CharValue_read_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
this.value = in.readChar();
}
| 3.26 |
flink_CharValue_getMaxNormalizedKeyLen_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int getMaxNormalizedKeyLen() {
return 2;
}
| 3.26 |
flink_SlideWithSize_every_rdh
|
/**
* Specifies the window's slide as time or row-count interval.
*
* <p>The slide determines the interval in which windows are started. Hence, sliding windows can
* overlap if the slide is smaller than the size of the window.
*
* <p>For example, you could have windows of size 15 minutes that slide by 3 minutes. With this
* 15 minutes worth of elements are grouped every 3 minutes and each row contributes to 5
* windows.
*
* @param slide
* the slide of the window either as time or row-count interval.
* @return a sliding window
*/
public SlideWithSizeAndSlide every(Expression slide) {
return new SlideWithSizeAndSlide(size, slide);}
| 3.26 |
flink_ContinuousFileReaderOperator_prepareToProcessRecord_rdh
|
// the split was added and message to itself was enqueued to process it
public <T extends TimestampedInputSplit> boolean prepareToProcessRecord(ContinuousFileReaderOperator<?, T> op) throws IOException {if
(op.splits.isEmpty()) {
op.switchState(ReaderState.IDLE);
return false;
} else {
op.loadSplit(op.splits.poll());
op.switchState(ReaderState.READING);
return true;
}}
| 3.26 |
flink_KvStateClientProxyHandler_getKvStateLookupInfo_rdh
|
/**
* Lookup the {@link KvStateLocation} for the given job and queryable state name.
*
* <p>The job manager will be queried for the location only if forced or no cached location can
* be found. There are no guarantees about
*
* @param jobId
* JobID the state instance belongs to.
* @param queryableStateName
* Name under which the state instance has been published.
* @param forceUpdate
* Flag to indicate whether to force a update via the lookup service.
* @return Future holding the KvStateLocation
*/
private CompletableFuture<KvStateLocation> getKvStateLookupInfo(final JobID jobId, final String queryableStateName, final boolean forceUpdate) {
final Tuple2<JobID, String> v7 = new Tuple2<>(jobId, queryableStateName);
final CompletableFuture<KvStateLocation>
cachedFuture = lookupCache.get(v7);
if (((!forceUpdate) && (cachedFuture != null)) && (!cachedFuture.isCompletedExceptionally())) {
LOG.debug("Retrieving location for state={} of job={} from the cache.", queryableStateName, jobId);
return cachedFuture;
}
final KvStateLocationOracle kvStateLocationOracle = proxy.getKvStateLocationOracle(jobId);
if (kvStateLocationOracle != null) {
LOG.debug("Retrieving location for state={} of job={} from the key-value state location oracle.", queryableStateName, jobId);
final CompletableFuture<KvStateLocation> location = new CompletableFuture<>();
lookupCache.put(v7, location);
kvStateLocationOracle.requestKvStateLocation(jobId, queryableStateName).whenComplete((KvStateLocation kvStateLocation,Throwable throwable) -> {
if (throwable != null) {
if (ExceptionUtils.stripCompletionException(throwable) instanceof FlinkJobNotFoundException) {
// if the jobId was wrong, remove the entry from the cache.
lookupCache.remove(v7);
}
location.completeExceptionally(throwable);
} else {
location.complete(kvStateLocation);
}});
return location;
} else {
return FutureUtils.completedExceptionally(new UnknownLocationException(((("Could not retrieve location of state=" + queryableStateName) + " of job=") + jobId) + ". Potential reasons are: i) the state is not ready, or ii) the job does not exist."));
}
}
| 3.26 |
flink_AbstractKeyedStateBackend_getNumberOfKeyGroups_rdh
|
/**
*
* @see KeyedStateBackend
*/
public int getNumberOfKeyGroups() {
return numberOfKeyGroups;
}
| 3.26 |
flink_AbstractKeyedStateBackend_getKeyGroupRange_rdh
|
/**
*
* @see KeyedStateBackend
*/
@Override
public KeyGroupRange getKeyGroupRange() {return keyGroupRange;
}
| 3.26 |
flink_AbstractKeyedStateBackend_getCurrentKey_rdh
|
/**
*
* @see KeyedStateBackend
*/
@Override
public K getCurrentKey() {
return this.keyContext.getCurrentKey();
}
| 3.26 |
flink_AbstractKeyedStateBackend_dispose_rdh
|
/**
* Closes the state backend, releasing all internal resources, but does not delete any
* persistent checkpoint data.
*/
@Override
public void dispose()
{
IOUtils.closeQuietly(cancelStreamRegistry);if (kvStateRegistry != null) {
kvStateRegistry.unregisterAll();
}
lastName = null;
lastState = null;
keyValueStatesByName.clear();
}
| 3.26 |
flink_AbstractKeyedStateBackend_getOrCreateKeyedState_rdh
|
/**
*
* @see KeyedStateBackend
*/
@Override
@SuppressWarnings("unchecked")
public <N, S extends State, V> S getOrCreateKeyedState(final TypeSerializer<N> namespaceSerializer, StateDescriptor<S, V> stateDescriptor) throws Exception {
checkNotNull(namespaceSerializer, "Namespace serializer");
checkNotNull(keySerializer, "State key serializer has not been configured in the config. " + "This operation cannot use partitioned state.");
InternalKvState<K, ?, ?> kvState = keyValueStatesByName.get(stateDescriptor.getName());
if (kvState == null) {
if (!stateDescriptor.isSerializerInitialized()) {
stateDescriptor.initializeSerializerUnlessSet(executionConfig);
}
kvState = LatencyTrackingStateFactory.createStateAndWrapWithLatencyTrackingIfEnabled(TtlStateFactory.createStateAndWrapWithTtlIfEnabled(namespaceSerializer, stateDescriptor, this, ttlTimeProvider), stateDescriptor, f0);
keyValueStatesByName.put(stateDescriptor.getName(), kvState);
publishQueryableStateIfEnabled(stateDescriptor, kvState);
}
return ((S) (kvState));
}
| 3.26 |
flink_AbstractKeyedStateBackend_getPartitionedState_rdh
|
/**
* TODO: NOTE: This method does a lot of work caching / retrieving states just to update the
* namespace. This method should be removed for the sake of namespaces being lazily fetched from
* the keyed state backend, or being set on the state directly.
*
* @see KeyedStateBackend
*/
@SuppressWarnings("unchecked")
@Override
public <N, S extends State> S getPartitionedState(final N namespace, final TypeSerializer<N> namespaceSerializer, final StateDescriptor<S, ?> stateDescriptor) throws Exception {checkNotNull(namespace, "Namespace");
if ((lastName != null) && lastName.equals(stateDescriptor.getName())) {
lastState.setCurrentNamespace(namespace);
return ((S) (lastState));
}
InternalKvState<K, ?, ?> previous = keyValueStatesByName.get(stateDescriptor.getName());
if (previous != null) {lastState = previous;
lastState.setCurrentNamespace(namespace);
lastName = stateDescriptor.getName();
return ((S) (previous));
}
final S state = getOrCreateKeyedState(namespaceSerializer, stateDescriptor);
final InternalKvState<K, N, ?> kvState = ((InternalKvState<K, N, ?>) (state));
lastName = stateDescriptor.getName();
lastState = kvState;
kvState.setCurrentNamespace(namespace);
return state;
}
| 3.26 |
flink_AbstractKeyedStateBackend_getCurrentKeyGroupIndex_rdh
|
/**
*
* @see KeyedStateBackend
*/
public int getCurrentKeyGroupIndex() {
return this.keyContext.getCurrentKeyGroupIndex();
}
| 3.26 |
flink_AbstractKeyedStateBackend_applyToAllKeys_rdh
|
/**
*
* @see KeyedStateBackend
*/
@Override
public <N, S extends State, T> void applyToAllKeys(final N namespace, final TypeSerializer<N> namespaceSerializer, final StateDescriptor<S, T> stateDescriptor, final KeyedStateFunction<K, S> function) throws Exception {
applyToAllKeys(namespace, namespaceSerializer, stateDescriptor, function, this::getPartitionedState);
}
| 3.26 |
flink_AbstractKeyedStateBackend_setCurrentKey_rdh
|
/**
*
* @see KeyedStateBackend
*/
@Override
public void setCurrentKey(K newKey) {
notifyKeySelected(newKey);
this.keyContext.setCurrentKey(newKey);
this.keyContext.setCurrentKeyGroupIndex(KeyGroupRangeAssignment.assignToKeyGroup(newKey,
numberOfKeyGroups));
}
| 3.26 |
flink_AbstractKeyedStateBackend_getKeySerializer_rdh
|
/**
*
* @see KeyedStateBackend
*/
@Override
public TypeSerializer<K> getKeySerializer() {
return keySerializer;
}
| 3.26 |
flink_RestAPIVersion_getLatestVersion_rdh
|
/**
* Accept versions and one of them as a comparator, and get the latest one.
*
* @return latest version that implement RestAPIVersion interface>
*/
static <E
extends RestAPIVersion<E>> E getLatestVersion(Collection<E> versions) {
return Collections.max(versions);
}
| 3.26 |
flink_ChainedStateHandle_isEmpty_rdh
|
/**
* Check if there are any states handles present. Notice that this can be true even if {@link #getLength()} is greater than zero, because state handles can be null.
*
* @return true if there are no state handles for any operator.
*/
public boolean isEmpty() {
for (T
state : operatorStateHandles) {
if (state != null) {
return false;
}
}
return true;
}
| 3.26 |
flink_ChainedStateHandle_getLength_rdh
|
/**
* Returns the length of the operator chain. This can be different from the number of operator
* state handles, because the some operators in the chain can have no state and thus their state
* handle can be null.
*
* @return length of the operator chain
*/
public int getLength() {
return operatorStateHandles.size();
}
| 3.26 |
flink_FlinkRelUtil_isMergeable_rdh
|
/**
* Return two neighbouring {@link Calc} can merge into one {@link Calc} or not. If the two
* {@link Calc} can merge into one, each non-deterministic {@link RexNode} of bottom {@link Calc} should appear at most once in the project list of top {@link Calc}.
*/
public static boolean isMergeable(Calc topCalc, Calc bottomCalc) {
final RexProgram topProgram = topCalc.getProgram();
final RexProgram bottomProgram = bottomCalc.getProgram();
final int[] topInputRefCounter = initializeArray(topCalc.getInput().getRowType().getFieldCount(), 0);
List<RexNode> topInputRefs = topProgram.getProjectList().stream().map(topProgram::expandLocalRef).collect(Collectors.toList());
List<RexNode> bottomProjects = bottomProgram.getProjectList().stream().map(bottomProgram::expandLocalRef).collect(Collectors.toList());
if (null != topProgram.getCondition()) {
topInputRefs.add(topProgram.expandLocalRef(topProgram.getCondition()));
}
return mergeable(topInputRefCounter, topInputRefs, bottomProjects);
}
| 3.26 |
flink_FlinkRelUtil_merge_rdh
|
/**
* Merges the programs of two {@link Calc} instances and returns a new {@link Calc} instance
* with the merged program.
*/
public static Calc merge(Calc topCalc, Calc bottomCalc) {
RexProgram topProgram = topCalc.getProgram();
RexBuilder rexBuilder = topCalc.getCluster().getRexBuilder();
// Merge the programs together.
RexProgram mergedProgram = RexProgramBuilder.mergePrograms(topProgram, bottomCalc.getProgram(), rexBuilder);
if (!mergedProgram.getOutputRowType().equals(topProgram.getOutputRowType())) {
throw new IllegalArgumentException("Output row type of merged program is not the same top program.");
}
RexProgram newMergedProgram;
if (mergedProgram.getCondition() != null) {
RexNode condition = mergedProgram.expandLocalRef(mergedProgram.getCondition());
RexNode simplifiedCondition = FlinkRexUtil.simplify(rexBuilder, condition, topCalc.getCluster().getPlanner().getExecutor());
if (simplifiedCondition.equals(condition)) {
newMergedProgram = mergedProgram;} else {
RexProgramBuilder programBuilder = RexProgramBuilder.forProgram(mergedProgram, rexBuilder, true);
programBuilder.clearCondition();
programBuilder.addCondition(simplifiedCondition);
newMergedProgram =
programBuilder.getProgram(true);
}
} else {
newMergedProgram = mergedProgram;
}
return topCalc.copy(topCalc.getTraitSet(), bottomCalc.getInput(), newMergedProgram);
}
| 3.26 |
flink_FlinkRelUtil_mergeable_rdh
|
/**
* The internal reusable method for filter, project nd calc.
*/
private static boolean mergeable(int[] topInputRefCounter, List<RexNode> topProjects, List<RexNode> bottomProjects) {
RexUtil.apply(new InputRefCounter(true, topInputRefCounter), topProjects, null);
boolean mergeable = true;
for (int idx = 0; idx < bottomProjects.size(); idx++) {
RexNode node
= bottomProjects.get(idx);
if (!RexUtil.isDeterministic(node)) {
assert idx < topInputRefCounter.length;
if (topInputRefCounter[idx] >
1) {
mergeable = false;
break;}
}
}
return mergeable;
}
| 3.26 |
flink_FlinkRelUtil_initializeArray_rdh
|
/**
* Returns an int array with given length and initial value.
*
* @param length
* array length
* @param initVal
* initial value
* @return initialized int array
*/
public static int[] initializeArray(int length, int initVal) {
final
int[] array = new int[length];
Arrays.fill(array, initVal);
return array;
}
| 3.26 |
flink_InnerJoinOperatorBase_executeOnCollections_rdh
|
// --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
protected List<OUT> executeOnCollections(List<IN1> inputData1, List<IN2> inputData2, RuntimeContext runtimeContext, ExecutionConfig executionConfig) throws Exception {
FlatJoinFunction<IN1, IN2, OUT> function = userFunction.getUserCodeObject();
FunctionUtils.setFunctionRuntimeContext(function, runtimeContext);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
TypeInformation<IN1> leftInformation = getOperatorInfo().getFirstInputType();
TypeInformation<IN2> rightInformation = getOperatorInfo().getSecondInputType();
TypeInformation<OUT> outInformation = getOperatorInfo().getOutputType();
TypeSerializer<IN1> leftSerializer = leftInformation.createSerializer(executionConfig);
TypeSerializer<IN2> rightSerializer = rightInformation.createSerializer(executionConfig);TypeComparator<IN1> leftComparator;
TypeComparator<IN2> rightComparator;
if (leftInformation instanceof AtomicType) {
leftComparator = ((AtomicType<IN1>) (leftInformation)).createComparator(true, executionConfig);
} else if (leftInformation instanceof
CompositeType) {
int[] keyPositions = getKeyColumns(0);
boolean[] orders = new
boolean[keyPositions.length];
Arrays.fill(orders, true);
leftComparator = ((CompositeType<IN1>) (leftInformation)).createComparator(keyPositions, orders,
0, executionConfig);
} else {
throw new RuntimeException(("Type information for left input of type " + leftInformation.getClass().getCanonicalName()) + " is not supported. Could not generate a comparator.");
}
if (rightInformation instanceof AtomicType) {
rightComparator = ((AtomicType<IN2>) (rightInformation)).createComparator(true, executionConfig);
} else if (rightInformation instanceof CompositeType) {
int[] keyPositions = getKeyColumns(1);
boolean[] orders = new boolean[keyPositions.length];
Arrays.fill(orders, true);
rightComparator = ((CompositeType<IN2>) (rightInformation)).createComparator(keyPositions, orders, 0, executionConfig);
} else {
throw
new RuntimeException(("Type information for right input of type " + rightInformation.getClass().getCanonicalName()) + " is not supported. Could not generate a comparator.");
}
TypePairComparator<IN1, IN2> pairComparator = new GenericPairComparator<IN1, IN2>(leftComparator, rightComparator);
List<OUT> result = new ArrayList<OUT>();
Collector<OUT> collector = new
CopyingListCollector<OUT>(result, outInformation.createSerializer(executionConfig));
Map<Integer, List<IN2>> probeTable = new HashMap<Integer, List<IN2>>();
// Build hash table
for (IN2 element : inputData2) {
List<IN2> list = probeTable.get(rightComparator.hash(element));
if (list == null) {
list = new ArrayList<IN2>();
probeTable.put(rightComparator.hash(element), list);
}
list.add(element);
}
// Probing
for (IN1 left : inputData1)
{
List<IN2> matchingHashes = probeTable.get(leftComparator.hash(left));
if (matchingHashes != null)
{
pairComparator.setReference(left);
for (IN2 right : matchingHashes) {
if
(pairComparator.equalToReference(right)) {
function.join(leftSerializer.copy(left), rightSerializer.copy(right), collector);
}
}
}
}
FunctionUtils.closeFunction(function);return result;
}
| 3.26 |
flink_SourcePredicates_isJavaClass_rdh
|
/**
* Checks whether the given {@link JavaClass} is actually a Java class, and not a Scala class.
*
* <p>ArchUnit does not yet fully support Scala. Rules should ensure that they restrict
* themselves to only Java classes for correct results.
*/
static boolean isJavaClass(JavaClass clazz) {
if (!clazz.getSource().isPresent()) {
return false;
}
final Source source = clazz.getSource().get();
if (!source.getFileName().isPresent()) {
return false;
}
return source.getFileName().get().contains(".java");
}
| 3.26 |
flink_SourcePredicates_areJavaClasses_rdh
|
/**
* Tests that a given class is a Java class.
*
* <p>ArchUnit does not yet fully support Scala. Rules should ensure that they restrict
* themselves to only Java classes for correct results.
*/
public static DescribedPredicate<JavaClass> areJavaClasses() {
return new DescribedPredicate<JavaClass>("are Java classes") {@Override
public boolean test(JavaClass clazz) {
return isJavaClass(clazz);
}
};
}
| 3.26 |
flink_FileWriterBucket_assembleNewPartPath_rdh
|
/**
* Constructor a new PartPath and increment the partCounter.
*/
private Path assembleNewPartPath() {
long v6 = partCounter++;
return new Path(bucketPath, ((((outputFileConfig.getPartPrefix() + '-') + uniqueId) + '-') + v6) + outputFileConfig.getPartSuffix());
}
| 3.26 |
flink_FileWriterBucket_getUniqueId_rdh
|
// --------------------------- Testing Methods -----------------------------
@VisibleForTesting
public String getUniqueId() {
return uniqueId;
}
| 3.26 |
flink_FileWriterBucket_getNew_rdh
|
// --------------------------- Static Factory Methods -----------------------------
/**
* Creates a new empty {@code Bucket}.
*
* @param bucketId
* the identifier of the bucket, as returned by the {@link BucketAssigner}.
* @param bucketPath
* the path to where the part files for the bucket will be written to.
* @param bucketWriter
* the {@link BucketWriter} used to write part files in the bucket.
* @param <IN>
* the type of input elements to the sink.
* @param outputFileConfig
* the part file configuration.
* @return The new Bucket.
*/
static <IN> FileWriterBucket<IN> getNew(final String bucketId, final Path bucketPath, final BucketWriter<IN, String> bucketWriter, final RollingPolicy<IN, String>
rollingPolicy, final OutputFileConfig outputFileConfig) {
return new FileWriterBucket<>(bucketId, bucketPath, bucketWriter, rollingPolicy, outputFileConfig);
}
| 3.26 |
flink_FileWriterBucket_restore_rdh
|
/**
* Restores a {@code Bucket} from the state included in the provided {@link FileWriterBucketState}.
*
* @param bucketWriter
* the {@link BucketWriter} used to write part files in the bucket.
* @param bucketState
* the initial state of the restored bucket.
* @param <IN>
* the type of input elements to the sink.
* @param outputFileConfig
* the part file configuration.
* @return The restored Bucket.
*/
static <IN> FileWriterBucket<IN> restore(final BucketWriter<IN, String> bucketWriter, final RollingPolicy<IN, String> rollingPolicy, final FileWriterBucketState bucketState, final
OutputFileConfig outputFileConfig) throws IOException
{
return new FileWriterBucket<>(bucketWriter, rollingPolicy, bucketState, outputFileConfig);
}
| 3.26 |
flink_RemoteCacheManager_startSegment_rdh
|
// ------------------------------------------------------------------------
// Called by RemoteTierProducerAgent
// ------------------------------------------------------------------------
void startSegment(int subpartitionId, int segmentId) {
subpartitionCacheDataManagers[subpartitionId].startSegment(segmentId);
subpartitionSegmentIds[subpartitionId] = segmentId;
}
| 3.26 |
flink_SqlGatewayRestAPIVersion_getStableVersions_rdh
|
/**
* Returns the supported stable versions.
*
* @return the list of the stable versions.
*/
public static List<SqlGatewayRestAPIVersion> getStableVersions() {
return Arrays.stream(SqlGatewayRestAPIVersion.values()).filter(SqlGatewayRestAPIVersion::isStableVersion).collect(Collectors.toList());
}
| 3.26 |
flink_SqlGatewayRestAPIVersion_getURLVersionPrefix_rdh
|
/**
* Returns the URL version prefix (e.g. "v1") for this version.
*
* @return URL version prefix
*/
@Override
public String getURLVersionPrefix() {
return name().toLowerCase();
}
| 3.26 |
flink_SqlGatewayRestAPIVersion_fromURIToVersion_rdh
|
/**
* Convert uri to SqlGatewayRestAPIVersion. If failed, return default version.
*
* @return SqlGatewayRestAPIVersion
*/
public static SqlGatewayRestAPIVersion fromURIToVersion(String uri) {
int slashIndex = uri.indexOf('/', 1);
if (slashIndex < 0) {
slashIndex = uri.length();
}
try {
return valueOf(uri.substring(1, slashIndex).toUpperCase());
} catch (Exception e) {
return getDefaultVersion();
}
}
| 3.26 |
flink_KvStateLocation_getJobVertexId_rdh
|
/**
* Returns the JobVertexID the KvState instances belong to.
*
* @return JobVertexID the KvState instances belong to
*/
public JobVertexID getJobVertexId() {
return jobVertexId; }
| 3.26 |
flink_KvStateLocation_getKvStateID_rdh
|
/**
* Returns the registered KvStateID for the key group index or <code>null</code> if none is
* registered yet.
*
* @param keyGroupIndex
* Key group index to get ID for.
* @return KvStateID for the key group index or <code>null</code> if none is registered yet
* @throws IndexOutOfBoundsException
* If key group index < 0 or >= Number of key groups
*/
public KvStateID getKvStateID(int keyGroupIndex) {
if ((keyGroupIndex < 0) || (keyGroupIndex >= numKeyGroups)) {
throw new IndexOutOfBoundsException("Key group index");
}
return kvStateIds[keyGroupIndex];
}
| 3.26 |
flink_KvStateLocation_unregisterKvState_rdh
|
/**
* Registers a KvState instance for the given key group index.
*
* @param keyGroupRange
* Key group range to unregister.
* @throws IndexOutOfBoundsException
* If key group range start < 0 or key group range end >=
* Number of key groups
* @throws IllegalArgumentException
* If no location information registered for a key group index
* in the range.
*/
void unregisterKvState(KeyGroupRange keyGroupRange) {
if ((keyGroupRange.getStartKeyGroup() < 0) || (keyGroupRange.getEndKeyGroup() >= numKeyGroups)) {
throw new IndexOutOfBoundsException("Key group index");
}
for (int kgIdx = keyGroupRange.getStartKeyGroup(); kgIdx <= keyGroupRange.getEndKeyGroup();
++kgIdx) {
if ((kvStateIds[kgIdx] == null) || (f1[kgIdx] == null)) {
throw new IllegalArgumentException("Not registered. Probably registration/unregistration race.");
}
numRegisteredKeyGroups--;
kvStateIds[kgIdx] = null;f1[kgIdx] = null;
}
}
| 3.26 |
flink_KvStateLocation_registerKvState_rdh
|
/**
* Registers a KvState instance for the given key group index.
*
* @param keyGroupRange
* Key group range to register
* @param kvStateId
* ID of the KvState instance at the key group index.
* @param kvStateAddress
* Server address of the KvState instance at the key group index.
* @throws IndexOutOfBoundsException
* If key group range start < 0 or key group range end >=
* Number of key groups
*/public void registerKvState(KeyGroupRange keyGroupRange, KvStateID kvStateId, InetSocketAddress kvStateAddress) {
if ((keyGroupRange.getStartKeyGroup() < 0) || (keyGroupRange.getEndKeyGroup() >= numKeyGroups)) {
throw new IndexOutOfBoundsException("Key group index");
}
for (int kgIdx = keyGroupRange.getStartKeyGroup(); kgIdx <= keyGroupRange.getEndKeyGroup(); ++kgIdx) {
if ((kvStateIds[kgIdx] == null) && (f1[kgIdx] == null)) {numRegisteredKeyGroups++;
}
kvStateIds[kgIdx] = kvStateId;
f1[kgIdx] = kvStateAddress;
}
}
| 3.26 |
flink_KvStateLocation_getRegistrationName_rdh
|
/**
* Returns the name under which the KvState instances have been registered.
*
* @return Name under which the KvState instances have been registered.
*/
public String getRegistrationName() {
return f0;
}
| 3.26 |
flink_BlobOutputStream_receiveAndCheckPutResponse_rdh
|
/**
* Reads the response from the input stream and throws in case of errors.
*
* @param is
* stream to read from
* @param md
* message digest to check the response against
* @param blobType
* whether the BLOB should be permanent or transient
* @throws IOException
* if the response is an error, the message digest does not match or reading
* the response failed
*/
private static BlobKey receiveAndCheckPutResponse(InputStream is, MessageDigest md, BlobKey.BlobType blobType) throws IOException {
int response = is.read();
if (response < 0) {
throw new EOFException("Premature end of response");
} else if (response == RETURN_OKAY) {
BlobKey remoteKey = BlobKey.readFromInputStream(is);
byte[] localHash = md.digest();
if (blobType != remoteKey.getType()) {
throw new IOException("Detected data corruption during transfer");
}
if (!Arrays.equals(localHash, remoteKey.getHash())) {
throw new IOException("Detected data corruption during transfer");
}
return remoteKey;
} else if (response == RETURN_ERROR) {
Throwable cause = BlobUtils.readExceptionFromStream(is);
throw new IOException("Server side error: " + cause.getMessage(), cause);
} else {
throw new
IOException(("Unrecognized response: " + response) + '.');
}
}
| 3.26 |
flink_BlobOutputStream_sendPutHeader_rdh
|
/**
* Constructs and writes the header data for a PUT request to the given output stream.
*
* @param outputStream
* the output stream to write the PUT header data to
* @param jobId
* the ID of job the BLOB belongs to (or <tt>null</tt> if job-unrelated)
* @param blobType
* whether the BLOB should become permanent or transient
* @throws IOException
* thrown if an I/O error occurs while writing the header data to the output
* stream
*/
private static void sendPutHeader(OutputStream outputStream, @Nullable
JobID jobId, BlobKey.BlobType blobType) throws IOException {
// Signal type of operation
outputStream.write(PUT_OPERATION);
if (jobId == null) {
outputStream.write(JOB_UNRELATED_CONTENT);
} else {
outputStream.write(JOB_RELATED_CONTENT);
outputStream.write(jobId.getBytes());
}
outputStream.write(blobType.ordinal());
}
| 3.26 |
flink_JoinedStreams_evictor_rdh
|
/**
* Sets the {@code Evictor} that should be used to evict elements from a window before
* emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* pre-aggregation of window results cannot be used.
*/
@PublicEvolving
public WithWindow<T1, T2, KEY, W> evictor(Evictor<? super TaggedUnion<T1, T2>, ? super W> newEvictor) {
return new WithWindow<>(input1, input2, keySelector1, keySelector2, keyType, windowAssigner, trigger, newEvictor, f0);
}
| 3.26 |
flink_JoinedStreams_window_rdh
|
/**
* Specifies the window on which the join operation works.
*/
@PublicEvolving
public <W extends Window> WithWindow<T1, T2, KEY, W> window(WindowAssigner<? super TaggedUnion<T1, T2>, W> assigner) {
return new WithWindow<>(input1, input2, keySelector1, keySelector2, keyType, assigner, null, null, null);
}
| 3.26 |
flink_JoinedStreams_where_rdh
|
/**
* Specifies a {@link KeySelector} for elements from the first input with explicit type
* information for the key type.
*
* @param keySelector
* The KeySelector to be used for extracting the first input's key for
* partitioning.
* @param keyType
* The type information describing the key type.
*/
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector, TypeInformation<KEY> keyType) {
requireNonNull(keySelector);
requireNonNull(keyType);
return new Where<>(input1.clean(keySelector), keyType);
}
| 3.26 |
flink_JoinedStreams_allowedLateness_rdh
|
/**
* Sets the time by which elements are allowed to be late.
*
* @see WindowedStream#allowedLateness(Time)
*/
@PublicEvolving
public WithWindow<T1, T2,
KEY, W> allowedLateness(Time newLateness) {
return
new WithWindow<>(input1, input2, keySelector1, keySelector2, keyType, windowAssigner, trigger, evictor, newLateness);
}
| 3.26 |
flink_JoinedStreams_equalTo_rdh
|
/**
* Specifies a {@link KeySelector} for elements from the second input with explicit type
* information for the key type.
*
* @param keySelector
* The KeySelector to be used for extracting the second input's key for
* partitioning.
* @param keyType
* The type information describing the key type.
*/
public EqualTo equalTo(KeySelector<T2, KEY> keySelector, TypeInformation<KEY> keyType)
{
requireNonNull(keySelector);
requireNonNull(keyType);
if (!keyType.equals(this.keyType)) {
throw new IllegalArgumentException(((("The keys for the two inputs are not equal: " + "first key = ") + this.keyType) + " , second key = ") + keyType);
}
return new EqualTo(input2.clean(keySelector));
}
| 3.26 |
flink_JoinedStreams_trigger_rdh
|
/**
* Sets the {@code Trigger} that should be used to trigger window emission.
*/
@PublicEvolving
public WithWindow<T1, T2, KEY, W> trigger(Trigger<? super TaggedUnion<T1, T2>, ? super W> newTrigger) {
return new WithWindow<>(input1, input2, keySelector1, keySelector2, keyType, windowAssigner, newTrigger, evictor, f0);
}
| 3.26 |
flink_JoinedStreams_apply_rdh
|
/**
* Completes the join operation with the user function that is executed for each combination
* of elements with the same key in a window.
*
* <p>Note: This method's return type does not support setting an operator-specific
* parallelism. Due to binary backwards compatibility, this cannot be altered. Use the
* {@link #with(JoinFunction, TypeInformation)}, method to set an operator-specific
* parallelism.
*/
public <T> DataStream<T> apply(JoinFunction<T1, T2, T> function, TypeInformation<T> resultType) {
// clean the closure
function = input1.getExecutionEnvironment().clean(function);
coGroupedWindowedStream = input1.coGroup(input2).where(keySelector1).equalTo(keySelector2).window(windowAssigner).trigger(trigger).evictor(evictor).allowedLateness(f0);
return coGroupedWindowedStream.apply(new JoinCoGroupFunction<>(function), resultType);
}
/**
* Completes the join operation with the user function that is executed for each combination
* of elements with the same key in a window.
*
* <p><b>Note:</b> This is a temporary workaround while the {@link #apply(FlatJoinFunction,
* TypeInformation)} method has the wrong return type and hence does not allow one to set an
* operator-specific parallelism
*
* @deprecated This method will be removed once the {@link #apply(JoinFunction,
TypeInformation)}
| 3.26 |
flink_MapValue_isEmpty_rdh
|
/* (non-Javadoc)
@see java.util.Map#isEmpty()
*/
@Override
public boolean isEmpty() {
return this.map.isEmpty();
}
| 3.26 |
flink_MapValue_get_rdh
|
/* (non-Javadoc)
@see java.util.Map#get(java.lang.Object)
*/@Override
public V get(final Object key) {
return this.map.get(key);
}
| 3.26 |
flink_MapValue_containsValue_rdh
|
/* (non-Javadoc)
@see java.util.Map#containsValue(java.lang.Object)
*/
@Override
public boolean containsValue(final Object value) { return this.map.containsValue(value);
}
| 3.26 |
flink_MapValue_keySet_rdh
|
/* (non-Javadoc)
@see java.util.Map#keySet()
*/
@Override
public Set<K> keySet() {
return this.map.keySet();
}
| 3.26 |
flink_MapValue_put_rdh
|
/* (non-Javadoc)
@see java.util.Map#put(java.lang.Object, java.lang.Object)
*/
@Override
public V put(final K key, final
V value) {
return this.map.put(key, value);
}
| 3.26 |
flink_MapValue_clear_rdh
|
/* (non-Javadoc)
@see java.util.Map#clear()
*/
@Override
public void clear() {
this.map.clear();
}
| 3.26 |
flink_MapValue_m0_rdh
|
/* (non-Javadoc)
@see java.util.Map#putAll(java.util.Map)
*/
@Override
public void m0(final Map<? extends K, ? extends V> m) {
this.map.putAll(m);
}
| 3.26 |
flink_MapValue_containsKey_rdh
|
/* (non-Javadoc)
@see java.util.Map#containsKey(java.lang.Object)
*/
@Override
public boolean containsKey(final Object key) {
return this.map.containsKey(key);
}
| 3.26 |
flink_MapValue_toString_rdh
|
/* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString()
{
return this.map.toString();
}
| 3.26 |
flink_MapValue_equals_rdh
|
/* (non-Javadoc)
@see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
final MapValue<?, ?> other = ((MapValue<?, ?>) (obj)); if (this.map == null) {
if (other.map != null) {
return false;
}
} else if (!this.map.equals(other.map)) {
return false;
}
return true;
}
| 3.26 |
flink_MapValue_entrySet_rdh
|
/* (non-Javadoc)
@see java.util.Map#entrySet()
*/
@Override
public Set<Entry<K, V>> entrySet() {
return this.map.entrySet();
}
| 3.26 |
flink_MapValue_remove_rdh
|
/* (non-Javadoc)
@see java.util.Map#remove(java.lang.Object)
*/
@Override
public V remove(final Object key) {
return this.map.remove(key);
}
| 3.26 |
flink_MapValue_size_rdh
|
/* (non-Javadoc)
@see java.util.Map#size()
*/
@Override
public int size() {
return this.map.size();
}
| 3.26 |
flink_MapValue_hashCode_rdh
|
/* (non-Javadoc)
@see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 47;
int result = 1;
result = (prime * result) + this.map.hashCode();
return result;
}
| 3.26 |
flink_Tuple9_setFields_rdh
|
/**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8
= f8;
}
| 3.26 |
flink_Tuple9_toString_rdh
|
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7,
* f8), where the individual fields are the value returned by calling {@link Object#toString} on
* that field.
*
* @return The string representation of the tuple.
*/@Override
public String toString() {
return
((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") +
StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6))
+ ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ")";
}
| 3.26 |
flink_Tuple9_copy_rdh
|
/**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8> copy() {
return new Tuple9<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8);
}
| 3.26 |
flink_Tuple9_m0_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean m0(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple9)) {
return false;
} @SuppressWarnings("rawtypes")
Tuple9 tuple = ((Tuple9) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {return false;
}if (f6 != null ? !f6.equals(tuple.f6)
: tuple.f6 != null) {
return false; }
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
return true;
}
| 3.26 |
flink_Tuple9_of_rdh
|
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8> Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8) {return new Tuple9<>(f0, f1, f2, f3, f4, f5, f6, f7, f8);
}
| 3.26 |
flink_DoubleSumAggregator_aggregate_rdh
|
/**
* Adds the given value to the current aggregate.
*
* @param value
* The value to add to the aggregate.
*/
public void aggregate(double value) {
sum += value;
}
| 3.26 |
flink_TpcdsResultComparator_compareLine_rdh
|
// ------------------------------------------------------------------------
private static boolean compareLine(String expectedLine, String actualLine) {
return compareLineInternal(expectedLine, actualLine, -1, false);
}
| 3.26 |
flink_TpcdsResultComparator_isEqualCol_rdh
|
// ------------------------------------------------------------------------
private static boolean isEqualCol(String expected, String actual) {
return (isEqualNull(expected, actual) ||
isEqualNumber(expected, actual)) || expected.equals(actual);
}
| 3.26 |
flink_TpcdsResultComparator_compareQuery34_rdh
|
// ------------------------------------------------------------------------
private static boolean compareQuery34(String[] expectedLines, String[] actualLines) {
// take the first two lines and move them back to lines 7 and 8
final String expected1 = expectedLines[0];
final String expected2 = expectedLines[1];
System.arraycopy(expectedLines, 2, expectedLines, 0, 6);expectedLines[6] = expected1;
expectedLines[7] = expected2;
return compareLinesPrintingErrors(expectedLines, actualLines, 0);
}
| 3.26 |
flink_OptionalFailure_get_rdh
|
/**
*
* @return stored value or throw a {@link FlinkException} with {@code failureCause}.
*/
public T get() throws FlinkException {
if (value != null) {
return value;
}
checkNotNull(failureCause);
throw new FlinkException(failureCause);
}
/**
*
* @return same as {@link #get()} but throws a {@link FlinkRuntimeException}
| 3.26 |
flink_PushFilterIntoSourceScanRuleBase_resolveFiltersAndCreateTableSourceTable_rdh
|
/**
* Resolves filters using the underlying sources {@link SupportsFilterPushDown} and creates a
* new {@link TableSourceTable} with the supplied predicates.
*
* @param convertiblePredicates
* Predicates to resolve
* @param oldTableSourceTable
* TableSourceTable to copy
* @param scan
* Underlying table scan to push to
* @param relBuilder
* Builder to push the scan to
* @return A tuple, constituting of the resolved filters and the newly created {@link TableSourceTable}
*/
protected Tuple2<SupportsFilterPushDown.Result, TableSourceTable> resolveFiltersAndCreateTableSourceTable(RexNode[] convertiblePredicates, TableSourceTable oldTableSourceTable, TableScan scan, RelBuilder relBuilder) {
// record size before applyFilters for update statistics
int originPredicatesSize = convertiblePredicates.length;
// update DynamicTableSource
DynamicTableSource newTableSource = oldTableSourceTable.tableSource().copy();
SupportsFilterPushDown.Result result = FilterPushDownSpec.apply(Arrays.asList(convertiblePredicates), newTableSource, SourceAbilityContext.from(scan)); relBuilder.push(scan);
List<RexNode> acceptedPredicates = convertExpressionToRexNode(result.getAcceptedFilters(), relBuilder);
FilterPushDownSpec filterPushDownSpec = new FilterPushDownSpec(acceptedPredicates);
TableSourceTable newTableSourceTable = oldTableSourceTable.copy(newTableSource, oldTableSourceTable.getStatistic(), new SourceAbilitySpec[]{ filterPushDownSpec });
return new Tuple2<>(result, newTableSourceTable);
}
| 3.26 |
flink_EdgeManagerBuildUtil_computeMaxEdgesToTargetExecutionVertex_rdh
|
/**
* Given parallelisms of two job vertices, compute the max number of edges connected to a target
* execution vertex from the source execution vertices. Note that edge is considered undirected
* here. It can be an edge connected from an upstream job vertex to a downstream job vertex, or
* in a reversed way.
*
* @param targetParallelism
* parallelism of the target job vertex.
* @param sourceParallelism
* parallelism of the source job vertex.
* @param distributionPattern
* the {@link DistributionPattern} of the connecting edge.
*/
public static int computeMaxEdgesToTargetExecutionVertex(int targetParallelism, int sourceParallelism, DistributionPattern distributionPattern) {
switch (distributionPattern) {
case POINTWISE :
return ((sourceParallelism + targetParallelism) - 1) / targetParallelism;
case ALL_TO_ALL :
return sourceParallelism;
default :
throw new IllegalArgumentException("Unrecognized distribution pattern.");
}
}
| 3.26 |
flink_EdgeManagerBuildUtil_connectInternal_rdh
|
/**
* Connect all execution vertices to all partitions.
*/
private static void connectInternal(List<ExecutionVertex> taskVertices,
List<IntermediateResultPartition> partitions, ResultPartitionType
resultPartitionType, EdgeManager edgeManager) {
checkState(!taskVertices.isEmpty());
checkState(!partitions.isEmpty());
ConsumedPartitionGroup
consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(taskVertices.size(), partitions, resultPartitionType, edgeManager);
for (ExecutionVertex ev : taskVertices) {
ev.addConsumedPartitionGroup(consumedPartitionGroup);
}List<ExecutionVertexID> consumerVertices = taskVertices.stream().map(ExecutionVertex::getID).collect(Collectors.toList());
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromMultipleVertices(consumerVertices, resultPartitionType);
for (IntermediateResultPartition partition : partitions) {
partition.addConsumers(consumerVertexGroup);
}
consumedPartitionGroup.setConsumerVertexGroup(consumerVertexGroup);
consumerVertexGroup.setConsumedPartitionGroup(consumedPartitionGroup);
}
| 3.26 |
flink_EdgeManagerBuildUtil_connectVertexToResult_rdh
|
/**
* Calculate the connections between {@link ExecutionJobVertex} and {@link IntermediateResult} *
* based on the {@link DistributionPattern}.
*
* @param vertex
* the downstream consumer {@link ExecutionJobVertex}
* @param intermediateResult
* the upstream consumed {@link IntermediateResult}
*/
static void connectVertexToResult(ExecutionJobVertex vertex, IntermediateResult intermediateResult) {
final DistributionPattern distributionPattern = intermediateResult.getConsumingDistributionPattern();
final JobVertexInputInfo jobVertexInputInfo = vertex.getGraph().getJobVertexInputInfo(vertex.getJobVertexId(), intermediateResult.getId());
switch (distributionPattern) {
case POINTWISE :
connectPointwise(vertex, intermediateResult, jobVertexInputInfo);
break;
case ALL_TO_ALL :
connectAllToAll(vertex, intermediateResult, jobVertexInputInfo);
break;
default :
throw new IllegalArgumentException("Unrecognized distribution pattern.");
}
}
| 3.26 |
flink_TumblingWindowAssigner_of_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Creates a new {@code TumblingWindowAssigner} {@link WindowAssigner} that assigns elements to
* time windows based on the element timestamp.
*
* @param size
* The size of the generated windows.
* @return The time policy.
*/
public static TumblingWindowAssigner of(Duration size) {
return new TumblingWindowAssigner(size.toMillis(), 0, true);
}
| 3.26 |
flink_TumblingWindowAssigner_withOffset_rdh
|
/**
* Creates a new {@code TumblingWindowAssigner} {@link WindowAssigner} that assigns elements to
* time windows based on the element timestamp and offset.
*
* <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of
* each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time
* windows start at 0:15:00,1:15:00,2:15:00,etc.
*
* <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as
* China which is using GMT+08:00,and you want a time window with size of one day, and window
* begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}.
* The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than
* UTC time.
*
* @param offset
* The offset which window start would be shifted by.
* @return The time policy.
*/
public TumblingWindowAssigner withOffset(Duration offset) {return new TumblingWindowAssigner(size, offset.toMillis(), isEventTime);
}
| 3.26 |
flink_CliRowView_init_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected void init() {
// nothing to do
}
| 3.26 |
flink_Tuple16_equals_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if
(!(o instanceof Tuple16)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple16 tuple = ((Tuple16) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null
? !f1.equals(tuple.f1) : tuple.f1 != null) {return false;
}
if (f2 != null
? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3
!= null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 !=
null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ?
!f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null)
{
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
return true;
}
| 3.26 |
flink_Tuple16_of_rdh
|
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15) {
return new Tuple16<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15);
}
| 3.26 |
flink_Tuple16_copy_rdh
|
/**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> copy() {
return new Tuple16<>(this.f0, this.f1, this.f2,
this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15);
}
| 3.26 |
flink_Tuple16_toString_rdh
|
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15), where the individual fields are the value returned by
* calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",")
+ StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") +
StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ")";
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.