name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_CrossOperator_projectTuple12_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> ProjectCross<I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
T10, T11>> projectTuple12() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType = new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(fTypes);
return new ProjectCross<I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple23_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> ProjectCross<I1, I2, Tuple23<T0, T1, T2, T3,
T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> projectTuple23() {
TypeInformation<?>[] v66 = m3(fieldIndexes);
TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> tType = new TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(v66);
return new ProjectCross<I1, I2, Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple4_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3> ProjectCross<I1, I2, Tuple4<T0, T1, T2, T3>> projectTuple4() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes);
return new ProjectCross<I1, I2, Tuple4<T0, T1, T2, T3>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple16_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> ProjectCross<I1, I2, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> projectTuple16() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> tType = new TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(fTypes);
return
new ProjectCross<I1, I2, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_projectTuple8_rdh | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1,
T2, T3, T4, T5, T6, T7> ProjectCross<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() {
TypeInformation<?>[] fTypes = m3(fieldIndexes);
TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0,
T1, T2, T3, T4, T5, T6, T7>>(fTypes);
return new ProjectCross<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.26 |
flink_CrossOperator_with_rdh | /**
* Finalizes a Cross transformation by applying a {@link CrossFunction} to each pair of
* crossed elements.
*
* <p>Each CrossFunction call returns exactly one element.
*
* @param function
* The CrossFunction that is called for each pair of crossed elements.
* @return An CrossOperator that represents the crossed result DataSet
* @see CrossFunction
* @see DataSet
*/
public <R> CrossOperator<I1, I2, R> with(CrossFunction<I1, I2, R> function) {
if (function == null) {
throw new NullPointerException("Cross function must not be null.");
}
TypeInformation<R> returnType = TypeExtractor.getCrossReturnTypes(function, getInput1().getType(), getInput2().getType(), super.getDefaultName(), true);return new CrossOperator<I1, I2, R>(getInput1(), getInput2(), clean(function),
returnType, getCrossHint(), Utils.getCallLocationName());
} | 3.26 |
flink_CrossOperator_m3_rdh | // END_OF_TUPLE_DEPENDENT_CODE
// -----------------------------------------------------------------------------------------
private TypeInformation<?>[]
m3(int[] fields) {
TypeInformation<?>[] fieldTypes = new TypeInformation[fields.length];
for (int
i = 0; i < fields.length; i++) {
TypeInformation<?> typeInfo;
if (isFieldInFirst[i]) {
if (fields[i] >= 0) {
typeInfo = ((TupleTypeInfo<?>) (ds1.getType())).getTypeAt(fields[i]);
} else {typeInfo = ds1.getType();
}
} else if (fields[i] >= 0) {
typeInfo = ((TupleTypeInfo<?>) (ds2.getType())).getTypeAt(fields[i]);
} else {
typeInfo = ds2.getType();
}
fieldTypes[i] = typeInfo;
}
return fieldTypes;
} | 3.26 |
flink_MapTypeInfo_isBasicType_rdh | // ------------------------------------------------------------------------
// TypeInformation implementation
// ------------------------------------------------------------------------
@Override
public boolean isBasicType() {
return false;
} | 3.26 |
flink_MapTypeInfo_getKeyTypeInfo_rdh | // ------------------------------------------------------------------------
// MapTypeInfo specific properties
// ------------------------------------------------------------------------
/**
* Gets the type information for the keys in the map
*/
public TypeInformation<K> getKeyTypeInfo() { return keyTypeInfo;
} | 3.26 |
flink_ChainedFlatMapDriver_getStub_rdh | // --------------------------------------------------------------------------------------------
public Function getStub() {
return this.mapper;
} | 3.26 |
flink_ChainedFlatMapDriver_collect_rdh | // --------------------------------------------------------------------------------------------
@Override
public void collect(IT record) {
try {
this.numRecordsIn.inc();
this.mapper.flatMap(record, this.outputCollector);
} catch (Exception ex) {throw new ExceptionInChainedStubException(this.taskName, ex);
}
} | 3.26 |
flink_ChainedFlatMapDriver_setup_rdh | // --------------------------------------------------------------------------------------------
@Override
public void setup(AbstractInvokable parent) {
@SuppressWarnings("unchecked")
final FlatMapFunction<IT, OT> mapper = BatchTask.instantiateUserCode(this.config, userCodeClassLoader, FlatMapFunction.class);
this.mapper = mapper; FunctionUtils.setFunctionRuntimeContext(mapper, getUdfRuntimeContext());
} | 3.26 |
flink_TableDescriptor_toCatalogTable_rdh | // ---------------------------------------------------------------------------------------------
/**
* Converts this descriptor into a {@link CatalogTable}.
*/
public CatalogTable toCatalogTable() {
final Schema schema =
getSchema().orElseThrow(() -> new ValidationException((("Missing schema in TableDescriptor. " + "A schema is typically required. ") + "It can only be omitted at certain ") + "documented locations."));
return CatalogTable.of(schema, getComment().orElse(null), getPartitionKeys(), getOptions());} | 3.26 |
flink_TableDescriptor_getSchema_rdh | // ---------------------------------------------------------------------------------------------
public Optional<Schema> getSchema() {
return Optional.ofNullable(schema);
} | 3.26 |
flink_TableDescriptor_option_rdh | /**
* Sets the given option on the table.
*
* <p>Option keys must be fully specified. When defining options for a {@link Format
* format}, use {@link #format(FormatDescriptor)} instead.
*
* <p>Example:
*
* <pre>{@code TableDescriptor.forConnector("kafka")
* .option("scan.startup.mode", "latest-offset")
* .build();}</pre>
*/
public Builder option(String key, String value) {
Preconditions.checkNotNull(key, "Key must not be null.");
Preconditions.checkNotNull(value, "Value must not be null.");
options.put(key, value);
return this;
} | 3.26 |
flink_TableDescriptor_build_rdh | /**
* Returns an immutable instance of {@link TableDescriptor}.
*/
public TableDescriptor build() {
return new TableDescriptor(schema, options, partitionKeys, comment);
} | 3.26 |
flink_TableDescriptor_toString_rdh | // ---------------------------------------------------------------------------------------------
@Override
public String toString() {
final String escapedPartitionKeys = partitionKeys.stream().map(EncodingUtils::escapeIdentifier).collect(Collectors.joining(", "));
final String partitionedBy = (!partitionKeys.isEmpty()) ? String.format("PARTITIONED BY (%s)", escapedPartitionKeys) :
"";
final String serializedOptions = options.entrySet().stream().map(entry -> String.format(" '%s' = '%s'",
EncodingUtils.escapeSingleQuotes(entry.getKey()), EncodingUtils.escapeSingleQuotes(entry.getValue()))).collect(Collectors.joining(String.format(",%n")));
return String.format("%s%nCOMMENT '%s'%n%s%nWITH (%n%s%n)", schema != null ? schema : "", comment != null ? comment : "", partitionedBy, serializedOptions);
} | 3.26 |
flink_TableDescriptor_format_rdh | /**
* Defines the format to be used for this table.
*
* <p>Note that not every connector requires a format to be specified, while others may use
* multiple formats.
*
* <p>Options of the provided {@param formatDescriptor} are automatically prefixed. For
* example,
*
* <pre>{@code descriptorBuilder.format(KafkaOptions.KEY_FORMAT, FormatDescriptor.forFormat("json")
* .option(JsonOptions.IGNORE_PARSE_ERRORS, true)
* .build()}</pre>
*
* <p>will result in the options
*
* <pre>{@code 'key.format' = 'json'
* 'key.json.ignore-parse-errors' = 'true'}</pre>
*/
public Builder format(ConfigOption<String>
formatOption, FormatDescriptor formatDescriptor) {
Preconditions.checkNotNull(formatOption, "Format option must not be null.");
Preconditions.checkNotNull(formatDescriptor, "Format descriptor must not be null.");
option(formatOption, formatDescriptor.getFormat());
final String optionPrefix = FactoryUtil.getFormatPrefix(formatOption, formatDescriptor.getFormat());formatDescriptor.getOptions().forEach((key, value) -> {
if (key.startsWith(optionPrefix)) {
throw new ValidationException(String.format("Format options set using #format(FormatDescriptor) should not contain the prefix '%s', but found '%s'.", optionPrefix, key));
}
final String prefixedKey = optionPrefix + key;
option(prefixedKey, value);
});
return this;
} | 3.26 |
flink_TableDescriptor_comment_rdh | /**
* Define the comment for this table.
*/
public Builder comment(@Nullable
String comment) {
this.comment = comment;
return this;
} | 3.26 |
flink_TableDescriptor_schema_rdh | /**
* Define the schema of the {@link TableDescriptor}.
*
* <p>The schema is typically required. It is optional only in cases where the schema can be
* inferred, e.g. {@link Table#insertInto(TableDescriptor)}.
*/
public Builder schema(@Nullable
Schema schema) {
this.schema = schema;
return this;
} | 3.26 |
flink_TableDescriptor_m0_rdh | /**
* Creates a new {@link Builder} for a managed table.
*
* @deprecated This method will be removed soon. Please see FLIP-346 for more details.
*/
@Deprecatedpublic static Builder m0() {
return new Builder();
} | 3.26 |
flink_TableDescriptor_toBuilder_rdh | /**
* Converts this immutable instance into a mutable {@link Builder}.
*/public Builder toBuilder() {
return new Builder(this);} | 3.26 |
flink_TableDescriptor_forConnector_rdh | /**
* Creates a new {@link Builder} for a table using the given connector.
*
* @param connector
* The factory identifier for the connector.
*/public static Builder forConnector(String connector) {
Preconditions.checkNotNull(connector, "Table descriptors require a connector identifier.");
final Builder descriptorBuilder = new Builder();
descriptorBuilder.option(FactoryUtil.CONNECTOR, connector);
return descriptorBuilder;
} | 3.26 |
flink_TableDescriptor_partitionedBy_rdh | /**
* Define which columns this table is partitioned by.
*/
public Builder partitionedBy(String... partitionKeys) {
this.partitionKeys.addAll(Arrays.asList(partitionKeys));
return this;
} | 3.26 |
flink_FloatParser_parseField_rdh | /**
* Static utility to parse a field of type float from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final float parseField(byte[] bytes, int startPos, int length, char delimiter) {
final
int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if ((limitedLen > 0) && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[(startPos + limitedLen) - 1]))) {
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field."); }
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Float.parseFloat(str);
} | 3.26 |
flink_UploadThrottle_hasCapacity_rdh | /**
* Test whether some capacity is available.
*/
public boolean hasCapacity() {
return inFlightBytesCounter < maxBytesInFlight;
} | 3.26 |
flink_UploadThrottle_seizeCapacity_rdh | /**
* Seize <b>bytes</b> capacity. It is the caller responsibility to ensure at least some capacity
* {@link #hasCapacity() is available}. <strong>After</strong> this call, the caller is allowed
* to actually use the seized capacity. When the capacity is not needed anymore, the caller is
* required to {@link #releaseCapacity(long) release} it. Called by the Task thread.
*
* @throws IllegalStateException
* if capacity is unavailable.
*/
public void seizeCapacity(long bytes) throws IllegalStateException {
checkState(hasCapacity());
inFlightBytesCounter += bytes;
} | 3.26 |
flink_UploadThrottle_releaseCapacity_rdh | /**
* Release previously {@link #seizeCapacity(long) seized} capacity. Called by {@link BatchingStateChangeUploadScheduler} (IO thread).
*/
public void releaseCapacity(long bytes) {
inFlightBytesCounter -= bytes;
} | 3.26 |
flink_TypeInferenceExtractor_forAsyncTableFunction_rdh | /**
* Extracts a type inference from a {@link AsyncTableFunction}.
*/
public static TypeInference forAsyncTableFunction(DataTypeFactory typeFactory, Class<? extends AsyncTableFunction<?>> function) {
final FunctionMappingExtractor mappingExtractor = new FunctionMappingExtractor(typeFactory, function, UserDefinedFunctionHelper.ASYNC_TABLE_EVAL, createParameterSignatureExtraction(1), null, createGenericResultExtraction(AsyncTableFunction.class, 0, true), createParameterWithArgumentVerification(CompletableFuture.class));
return
extractTypeInference(mappingExtractor);
} | 3.26 |
flink_TypeInferenceExtractor_forTableFunction_rdh | /**
* Extracts a type inference from a {@link TableFunction}.
*/
public static TypeInference forTableFunction(DataTypeFactory typeFactory, Class<? extends TableFunction<?>> function) {
final FunctionMappingExtractor mappingExtractor = new FunctionMappingExtractor(typeFactory, function, UserDefinedFunctionHelper.TABLE_EVAL, createParameterSignatureExtraction(0), null, createGenericResultExtraction(TableFunction.class, 0, true), createParameterVerification()); return extractTypeInference(mappingExtractor);
} | 3.26 |
flink_TypeInferenceExtractor_forProcedure_rdh | /**
* Extracts a type in inference from a {@link Procedure}.
*/
public static TypeInference forProcedure(DataTypeFactory typeFactory, Class<? extends Procedure> procedure) {
final ProcedureMappingExtractor mappingExtractor = new ProcedureMappingExtractor(typeFactory, procedure, ProcedureDefinition.PROCEDURE_CALL, ProcedureMappingExtractor.createParameterSignatureExtraction(1), ProcedureMappingExtractor.createReturnTypeResultExtraction(), ProcedureMappingExtractor.createParameterAndReturnTypeVerification());
return extractTypeInference(mappingExtractor);
} | 3.26 |
flink_TypeInferenceExtractor_forTableAggregateFunction_rdh | /**
* Extracts a type inference from a {@link TableAggregateFunction}.
*/ public static TypeInference forTableAggregateFunction(DataTypeFactory typeFactory, Class<? extends TableAggregateFunction<?, ?>> function) {
final FunctionMappingExtractor mappingExtractor =
new FunctionMappingExtractor(typeFactory, function, UserDefinedFunctionHelper.TABLE_AGGREGATE_ACCUMULATE, createParameterSignatureExtraction(1), createGenericResultExtraction(TableAggregateFunction.class, 1, false), createGenericResultExtraction(TableAggregateFunction.class, 0, true), createParameterWithAccumulatorVerification());
return extractTypeInference(mappingExtractor);
} | 3.26 |
flink_TypeInferenceExtractor_forAggregateFunction_rdh | /**
* Extracts a type inference from a {@link AggregateFunction}.
*/
public static TypeInference forAggregateFunction(DataTypeFactory typeFactory, Class<? extends AggregateFunction<?, ?>> function) {
final FunctionMappingExtractor mappingExtractor = new FunctionMappingExtractor(typeFactory, function, UserDefinedFunctionHelper.AGGREGATE_ACCUMULATE, createParameterSignatureExtraction(1), createGenericResultExtraction(AggregateFunction.class, 1, false), createGenericResultExtraction(AggregateFunction.class, 0, true), createParameterWithAccumulatorVerification());
return extractTypeInference(mappingExtractor);
} | 3.26 |
flink_TypeInferenceExtractor_m0_rdh | /**
* Extracts a type inference from a {@link ScalarFunction}.
*/
public static TypeInference m0(DataTypeFactory typeFactory, Class<? extends ScalarFunction> function) {
final FunctionMappingExtractor mappingExtractor = new
FunctionMappingExtractor(typeFactory, function, UserDefinedFunctionHelper.SCALAR_EVAL, createParameterSignatureExtraction(0), null, createReturnTypeResultExtraction(), createParameterAndReturnTypeVerification());
return extractTypeInference(mappingExtractor);
} | 3.26 |
flink_CheckpointStatsCounts_incrementCompletedCheckpoints_rdh | /**
* Increments the number of successfully completed checkpoints.
*
* <p>It is expected that this follows a previous call to {@link #incrementInProgressCheckpoints()}.
*/
void incrementCompletedCheckpoints() {
if (canDecrementOfInProgressCheckpointsNumber()) {
numInProgressCheckpoints--;
}
numCompletedCheckpoints++;
} | 3.26 |
flink_CheckpointStatsCounts_getNumberOfRestoredCheckpoints_rdh | /**
* Returns the number of restored checkpoints.
*
* @return Number of restored checkpoints.
*/public long getNumberOfRestoredCheckpoints() {
return numRestoredCheckpoints;
} | 3.26 |
flink_CheckpointStatsCounts_getNumberOfFailedCheckpoints_rdh | /**
* Returns the number of failed checkpoints.
*
* @return Number of failed checkpoints.
*/
public long getNumberOfFailedCheckpoints() {
return numFailedCheckpoints;
} | 3.26 |
flink_CheckpointStatsCounts_incrementFailedCheckpoints_rdh | /**
* Increments the number of failed checkpoints.
*
* <p>It is expected that this follows a previous call to {@link #incrementInProgressCheckpoints()}.
*/
void incrementFailedCheckpoints() {
if (canDecrementOfInProgressCheckpointsNumber()) {
numInProgressCheckpoints--;
}
numFailedCheckpoints++;
} | 3.26 |
flink_CheckpointStatsCounts_incrementRestoredCheckpoints_rdh | /**
* Increments the number of restored checkpoints.
*/
void incrementRestoredCheckpoints() {
numRestoredCheckpoints++;
} | 3.26 |
flink_CheckpointStatsCounts_createSnapshot_rdh | /**
* Creates a snapshot of the current state.
*
* @return Snapshot of the current state.
*/CheckpointStatsCounts createSnapshot() {
return new CheckpointStatsCounts(numRestoredCheckpoints, numTotalCheckpoints, numInProgressCheckpoints, numCompletedCheckpoints, numFailedCheckpoints);
} | 3.26 |
flink_CheckpointStatsCounts_getTotalNumberOfCheckpoints_rdh | /**
* Returns the total number of checkpoints (in progress, completed, failed).
*
* @return Total number of checkpoints.
*/
public long getTotalNumberOfCheckpoints() {
return numTotalCheckpoints;
} | 3.26 |
flink_CheckpointStatsCounts_incrementInProgressCheckpoints_rdh | /**
* Increments the number of total and in progress checkpoints.
*/
void incrementInProgressCheckpoints() {
numInProgressCheckpoints++;
numTotalCheckpoints++;
} | 3.26 |
flink_StreamingSink_sink_rdh | /**
* Create a sink from file writer. Decide whether to add the node to commit partitions according
* to options.
*/public static DataStreamSink<?> sink(ProviderContext providerContext, DataStream<PartitionCommitInfo> writer, Path locationPath, ObjectIdentifier identifier, List<String> partitionKeys, TableMetaStoreFactory msFactory, FileSystemFactory fsFactory, Configuration options) {
DataStream<?> stream = writer;
if ((partitionKeys.size() > 0) && options.contains(SINK_PARTITION_COMMIT_POLICY_KIND)) {
PartitionCommitter committer
= new PartitionCommitter(locationPath, identifier, partitionKeys, msFactory, fsFactory, options);
SingleOutputStreamOperator<Void> v12 = writer.transform(PartitionCommitter.class.getSimpleName(), Types.VOID, committer).setParallelism(1).setMaxParallelism(1);
providerContext.generateUid("partition-committer").ifPresent(v12::uid);
stream = v12;
}
DataStreamSink<?> discardingSink = stream.sinkTo(new DiscardingSink<>()).name("end").setParallelism(1);
providerContext.generateUid("discarding-sink").ifPresent(discardingSink::uid);
return discardingSink;
} | 3.26 |
flink_StreamingSink_compactionWriter_rdh | /**
* Create a file writer with compaction operators by input stream. In addition, it can emit
* {@link PartitionCommitInfo} to down stream.
*/
public static <T> DataStream<PartitionCommitInfo> compactionWriter(ProviderContext providerContext, DataStream<T> inputStream, long bucketCheckInterval, BucketsBuilder<T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>> bucketsBuilder, FileSystemFactory fsFactory, Path path, CompactReader.Factory<T> readFactory, long targetFileSize, int parallelism, boolean parallelismConfigured) {
CompactFileWriter<T> writer = new CompactFileWriter<>(bucketCheckInterval,
bucketsBuilder);SupplierWithException<FileSystem, IOException> fsSupplier = ((SupplierWithException) (() -> fsFactory.create(path.toUri())));
CompactCoordinator coordinator = new CompactCoordinator(fsSupplier, targetFileSize);
SingleOutputStreamOperator<CoordinatorInput> v5 = inputStream.transform("streaming-writer", TypeInformation.of(CoordinatorInput.class), writer);
v5.getTransformation().setParallelism(parallelism, parallelismConfigured);
providerContext.generateUid("streaming-writer").ifPresent(v5::uid);
SingleOutputStreamOperator<CoordinatorOutput> coordinatorStream = v5.transform("compact-coordinator", TypeInformation.of(CoordinatorOutput.class), coordinator).setParallelism(1).setMaxParallelism(1);
providerContext.generateUid("compact-coordinator").ifPresent(coordinatorStream::uid);
CompactWriter.Factory<T> writerFactory = CompactBucketWriter.factory(((SupplierWithException) (bucketsBuilder::createBucketWriter)));
CompactOperator<T> compacter = new CompactOperator<>(fsSupplier, readFactory, writerFactory);SingleOutputStreamOperator<PartitionCommitInfo> operatorStream = coordinatorStream.broadcast().transform("compact-operator", TypeInformation.of(PartitionCommitInfo.class), compacter);
operatorStream.getTransformation().setParallelism(parallelism, parallelismConfigured);
providerContext.generateUid("compact-operator").ifPresent(operatorStream::uid);
return operatorStream;
} | 3.26 |
flink_StreamingSink_m0_rdh | /**
* Create a file writer by input stream. This is similar to {@link StreamingFileSink}, in
* addition, it can emit {@link PartitionCommitInfo} to down stream.
*/
public static <T> DataStream<PartitionCommitInfo> m0(ProviderContext providerContext, DataStream<T> inputStream, long bucketCheckInterval, BucketsBuilder<T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>> bucketsBuilder, int
parallelism, List<String> partitionKeys, Configuration conf, boolean parallelismConfigured) {
StreamingFileWriter<T> fileWriter = new StreamingFileWriter<>(bucketCheckInterval, bucketsBuilder, partitionKeys, conf);
SingleOutputStreamOperator<PartitionCommitInfo> writerStream = inputStream.transform(StreamingFileWriter.class.getSimpleName(), TypeInformation.of(PartitionCommitInfo.class), fileWriter);writerStream.getTransformation().setParallelism(parallelism, parallelismConfigured);
providerContext.generateUid("streaming-writer").ifPresent(writerStream::uid);
return writerStream;
} | 3.26 |
flink_FlatMapIterator_flatMap_rdh | // --------------------------------------------------------------------------------------------
/**
* Delegates calls to the {@link #flatMap(Object)} method.
*/
@Override
public final void flatMap(IN value, Collector<OUT> out) throws Exception {for (Iterator<OUT> iter = flatMap(value); iter.hasNext();) {out.collect(iter.next());
}
} | 3.26 |
flink_KubernetesSessionCli_repStep_rdh | /**
* Check whether need to continue or kill the cluster.
*
* @param in
* input buffer reader
* @return f0, whether need to continue read from input. f1, whether need to kill the cluster.
*/
private Tuple2<Boolean, Boolean> repStep(BufferedReader in) throws IOException, InterruptedException {
final long startTime = System.currentTimeMillis();
while (((System.currentTimeMillis() - startTime) < CLIENT_POLLING_INTERVAL_MS) && (!in.ready())) {
Thread.sleep(200L);
}
// ------------- handle interactive command by user. ----------------------
if (in.ready()) {
final String command = in.readLine();
switch (command) {
case "quit" :
return new Tuple2<>(false,
false);
case "stop" :
return new Tuple2<>(false, true);
case "help" :
System.err.println(KUBERNETES_CLUSTER_HELP);break;
default :
System.err.println(("Unknown command '" + command) + "'. Showing help:");
System.err.println(KUBERNETES_CLUSTER_HELP);
break;
}
}
return new Tuple2<>(true, false);
} | 3.26 |
flink_BufferManager_releaseAll_rdh | /**
* The floating buffer is recycled to local buffer pool directly, and the exclusive buffer
* will be gathered to return to global buffer pool later.
*
* @param exclusiveSegments
* The list that we will add exclusive segments into.
*/
void releaseAll(List<MemorySegment> exclusiveSegments) {
Buffer buffer;
while ((buffer = floatingBuffers.poll()) != null) {
buffer.recycleBuffer();
}
while ((buffer = exclusiveBuffers.poll()) != null) {
exclusiveSegments.add(buffer.getMemorySegment());
}
} | 3.26 |
flink_BufferManager_unsynchronizedGetNumberOfRequiredBuffers_rdh | // ------------------------------------------------------------------------
// Getter properties
// ------------------------------------------------------------------------
@VisibleForTesting
int unsynchronizedGetNumberOfRequiredBuffers() {return numRequiredBuffers;
} | 3.26 |
flink_BufferManager_requestFloatingBuffers_rdh | /**
* Requests floating buffers from the buffer pool based on the given required amount, and
* returns the actual requested amount. If the required amount is not fully satisfied, it will
* register as a listener.
*/
int requestFloatingBuffers(int numRequired) {
int numRequestedBuffers = 0;
synchronized(bufferQueue) {
// Similar to notifyBufferAvailable(), make sure that we never add a buffer after
// channel
// released all buffers via releaseAllResources().
if (inputChannel.isReleased()) {
return numRequestedBuffers;
}
numRequiredBuffers = numRequired;
numRequestedBuffers
= tryRequestBuffers();
}
return numRequestedBuffers;
} | 3.26 |
flink_BufferManager_addExclusiveBuffer_rdh | /**
* Adds an exclusive buffer (back) into the queue and releases one floating buffer if the
* number of available buffers in queue is more than the required amount. If floating buffer
* is released, the total amount of available buffers after adding this exclusive buffer has
* not changed, and no new buffers are available. The caller is responsible for recycling
* the release/returned floating buffer.
*
* @param buffer
* The exclusive buffer to add
* @param numRequiredBuffers
* The number of required buffers
* @return An released floating buffer, may be null if the numRequiredBuffers is not met.
*/
@Nullable
Buffer addExclusiveBuffer(Buffer buffer, int numRequiredBuffers) {
exclusiveBuffers.add(buffer);
if (getAvailableBufferSize() >
numRequiredBuffers) {
return floatingBuffers.poll();
}
return null;
} | 3.26 |
flink_BufferManager_releaseAllBuffers_rdh | /**
* Recycles all the exclusive and floating buffers from the given buffer queue.
*/
void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException {
// Gather all exclusive buffers and recycle them to global pool in batch, because
// we do not want to trigger redistribution of buffers after each recycle.
final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>();
Exception err = null;
Buffer buffer;while ((buffer =
buffers.poll()) != null) {try {
if (buffer.getRecycler() == this) {
exclusiveRecyclingSegments.add(buffer.getMemorySegment());} else {
buffer.recycleBuffer();
}
} catch (Exception e) {
err = firstOrSuppressed(e, err);
}
}
try {
synchronized(bufferQueue) {
bufferQueue.releaseAll(exclusiveRecyclingSegments);
bufferQueue.notifyAll();
}
} catch (Exception e) {
err = firstOrSuppressed(e,
err);
}
try {
if (exclusiveRecyclingSegments.size() > 0) {
globalPool.recycleUnpooledMemorySegments(exclusiveRecyclingSegments);}
} catch (Exception e) {
err = firstOrSuppressed(e, err);
}
if (err != null) {
throw err instanceof IOException ?
((IOException) (err)) : new IOException(err);
}
} | 3.26 |
flink_BufferManager_requestBuffer_rdh | // ------------------------------------------------------------------------
// Buffer request
// ------------------------------------------------------------------------
@Nullable
Buffer requestBuffer() {
synchronized(bufferQueue) {
// decrease the number of buffers require to avoid the possibility of
// allocating more than required buffers after the buffer is taken
--numRequiredBuffers;
return bufferQueue.takeBuffer();
}
} | 3.26 |
flink_BufferManager_requestExclusiveBuffers_rdh | /**
* Requests exclusive buffers from the provider.
*/
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException {
checkArgument(numExclusiveBuffers >= 0, "Num exclusive buffers must be non-negative.");
if (numExclusiveBuffers == 0) {
return;
}
Collection<MemorySegment> segments = globalPool.requestUnpooledMemorySegments(numExclusiveBuffers);
synchronized(bufferQueue) {
// AvailableBufferQueue::addExclusiveBuffer may release the previously allocated
// floating buffer, which requires the caller to recycle these released floating
// buffers. There should be no floating buffers that have been allocated before the
// exclusive buffers are initialized, so here only a simple assertion is required
checkState(unsynchronizedGetFloatingBuffersAvailable() == 0, "Bug in buffer allocation logic: floating buffer is allocated before exclusive buffers are initialized.");
for (MemorySegment segment
: segments) {
bufferQueue.addExclusiveBuffer(new NetworkBuffer(segment, this), numRequiredBuffers);
}
}
} | 3.26 |
flink_BufferManager_recycle_rdh | // ------------------------------------------------------------------------
// Buffer recycle
// ------------------------------------------------------------------------
/**
* Exclusive buffer is recycled to this channel manager directly and it may trigger return extra
* floating buffer based on <tt>numRequiredBuffers</tt>.
*
* @param segment
* The exclusive segment of this channel.
*/
@Override
public void recycle(MemorySegment segment) {@Nullable
Buffer releasedFloatingBuffer = null;
synchronized(bufferQueue) {
try {
// Similar to notifyBufferAvailable(), make sure that we never add a buffer
// after channel released all buffers via releaseAllResources().
if (inputChannel.isReleased()) {
globalPool.recycleUnpooledMemorySegments(Collections.singletonList(segment));
return;} else {
releasedFloatingBuffer = bufferQueue.addExclusiveBuffer(new NetworkBuffer(segment, this), numRequiredBuffers);
}
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
} finally {
bufferQueue.notifyAll();
}
}
if (releasedFloatingBuffer != null) {
releasedFloatingBuffer.recycleBuffer();
} else {
try {
inputChannel.notifyBufferAvailable(1);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
}
} | 3.26 |
flink_BufferManager_notifyBufferAvailable_rdh | // ------------------------------------------------------------------------
// Buffer listener notification
// ------------------------------------------------------------------------
/**
* The buffer pool notifies this listener of an available floating buffer. If the listener is
* released or currently does not need extra buffers, the buffer should be returned to the
* buffer pool. Otherwise, the buffer will be added into the <tt>bufferQueue</tt>.
*
* @param buffer
* Buffer that becomes available in buffer pool.
* @return true if the buffer is accepted by this listener.
*/
@Override
public boolean notifyBufferAvailable(Buffer buffer) {
// Assuming two remote channels with respective buffer managers as listeners inside
// LocalBufferPool.
// While canceler thread calling ch1#releaseAllResources, it might trigger
// bm2#notifyBufferAvaialble.
// Concurrently if task thread is recycling exclusive buffer, it might trigger
// bm1#notifyBufferAvailable.
// Then these two threads will both occupy the respective bufferQueue lock and wait for
// other side's
// bufferQueue lock to cause deadlock. So we check the isReleased state out of synchronized
// to resolve it.
if (inputChannel.isReleased()) {
return false;
}
int numBuffers = 0;
boolean isBufferUsed = false;
try {
synchronized(bufferQueue) {
checkState(isWaitingForFloatingBuffers, "This channel should be waiting for floating buffers.");
isWaitingForFloatingBuffers = false;
// Important: make sure that we never add a buffer after releaseAllResources()
// released all buffers. Following scenarios exist:
// 1) releaseAllBuffers() already released buffers inside bufferQueue
// -> while isReleased is set correctly in InputChannel
// 2) releaseAllBuffers() did not yet release buffers from bufferQueue
// -> we may or may not have set isReleased yet but will always wait for the
// lock on bufferQueue to release buffers
if (inputChannel.isReleased() || (bufferQueue.getAvailableBufferSize() >= numRequiredBuffers)) {
return false;
}
bufferQueue.addFloatingBuffer(buffer);
isBufferUsed = true;
numBuffers += 1 + tryRequestBuffers();
bufferQueue.notifyAll();
}
inputChannel.notifyBufferAvailable(numBuffers);
} catch (Throwable t) {
inputChannel.setError(t);
}
return isBufferUsed;
} | 3.26 |
flink_TaskManagerExceptionUtils_tryEnrichTaskManagerError_rdh | /**
* Tries to enrich the passed exception or its causes with additional information.
*
* <p>This method improves error messages for direct and metaspace {@link OutOfMemoryError}. It
* adds descriptions about possible causes and ways of resolution.
*
* @param root
* The Throwable of which the cause tree shall be traversed.
*/
public static void tryEnrichTaskManagerError(@Nullable
Throwable root) {
tryEnrichOutOfMemoryError(root, TM_METASPACE_OOM_ERROR_MESSAGE, TM_DIRECT_OOM_ERROR_MESSAGE, null);} | 3.26 |
flink_MultipleIdsMessageAcknowledgingSourceBase_snapshotState_rdh | // ------------------------------------------------------------------------
// Checkpointing the data
// ------------------------------------------------------------------------
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
f0.add(new Tuple2<>(context.getCheckpointId(), sessionIds));
sessionIds
= new ArrayList<>(64);
super.snapshotState(context);} | 3.26 |
flink_MultipleIdsMessageAcknowledgingSourceBase_acknowledgeIDs_rdh | // ------------------------------------------------------------------------
// ID Checkpointing
// ------------------------------------------------------------------------
/**
* Acknowledges the session ids.
*
* @param checkpointId
* The id of the current checkout to acknowledge ids for.
* @param uniqueIds
* The checkpointed unique ids which are ignored here. They only serve as a
* means of de-duplicating messages when the acknowledgment after a checkpoint fails.
*/
@Override
protected final void acknowledgeIDs(long checkpointId, Set<UId> uniqueIds) {
LOG.debug("Acknowledging ids for checkpoint {}", checkpointId);
Iterator<Tuple2<Long, List<SessionId>>> iterator = f0.iterator();
while (iterator.hasNext()) {
final Tuple2<Long, List<SessionId>> next = iterator.next();
long id = next.f0;
if (id <= checkpointId) {
acknowledgeSessionIDs(next.f1);
// remove ids for this session
iterator.remove();
}
}
} | 3.26 |
flink_TextOutputFormat_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return (("TextOutputFormat (" + getOutputFilePath()) + ") - ") + this.charsetName;
} | 3.26 |
flink_TextOutputFormat_open_rdh | // --------------------------------------------------------------------------------------------
@Override
public void open(int taskNumber, int numTasks) throws IOException {
super.open(taskNumber, numTasks);
try {
this.charset = Charset.forName(charsetName);
} catch (IllegalCharsetNameException e) {
throw new IOException(("The charset " + charsetName) + " is not valid.", e);
} catch (UnsupportedCharsetException e) {
throw new IOException(("The charset " + charsetName) + " is not supported.", e);
}
} | 3.26 |
flink_CustomizedConverter_checkArgumentNumber_rdh | // ---------------------------------------------------------------------------------------------
protected static void checkArgumentNumber(CallExpression call, int... validArgumentCounts) {boolean hasValidArgumentCount = false;
for (int argumentCount : validArgumentCounts) {
if (call.getChildren().size() == argumentCount) {
hasValidArgumentCount = true;
break;
}
}
checkArgument(call, hasValidArgumentCount);
} | 3.26 |
flink_HiveTableMetaStoreFactory_listDataFileRecursively_rdh | /**
* List data files recursively.
*/
private List<FileStatus> listDataFileRecursively(FileSystem fileSystem, Path f) throws IOException {
List<FileStatus> fileStatusList = new ArrayList<>();
for (FileStatus fileStatus : fileSystem.listStatus(f)) {
if (fileStatus.isDir() && (!isStagingDir(fileStatus.getPath()))) {
fileStatusList.addAll(listDataFileRecursively(fileSystem, fileStatus.getPath()));
} else
if (isDataFile(fileStatus)) {
fileStatusList.add(fileStatus);
}
}
return fileStatusList;
} | 3.26 |
flink_CollectionInputFormat_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('[');
int num = 0;
for (T e : dataSet) {
sb.append(e);
if (num != (dataSet.size() - 1)) {
sb.append(", ");
if (sb.length() > MAX_TO_STRING_LEN) {
sb.append("...");
break;
}
}num++;
}
sb.append(']');
return sb.toString();
} | 3.26 |
flink_CollectionInputFormat_writeObject_rdh | // --------------------------------------------------------------------------------------------
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
final int size = dataSet.size();
out.writeInt(size);
if (size > 0) {
DataOutputViewStreamWrapper wrapper = new DataOutputViewStreamWrapper(out);
for (T element : dataSet) {
serializer.serialize(element, wrapper);
}
}
} | 3.26 |
flink_CollectionInputFormat_checkCollection_rdh | // --------------------------------------------------------------------------------------------
public static <X> void checkCollection(Collection<X> elements, Class<X> viewedAs) {
if ((elements == null) || (viewedAs == null)) {
throw new NullPointerException();
}
for (X elem : elements) {
if (elem == null) {
throw new IllegalArgumentException("The collection must not contain null elements.");
}
// The second part of the condition is a workaround for the situation that can arise
// from eg.
// "env.fromElements((),(),())"
// In this situation, UnitTypeInfo.getTypeClass returns void.class (when we are in the
// Java world), but
// the actual objects that we will be working with, will be BoxedUnits.
// Note: TypeInformationGenTest.testUnit tests this condition.
if ((!viewedAs.isAssignableFrom(elem.getClass()))
&& (!(elem.getClass().toString().equals("class scala.runtime.BoxedUnit") && viewedAs.equals(void.class)))) {
throw new IllegalArgumentException("The elements in the collection are not all subclasses of " + viewedAs.getCanonicalName());
}
}
} | 3.26 |
flink_CallBindingCallContext_m0_rdh | // --------------------------------------------------------------------------------------------
@Nullable
private static DataType m0(SqlCallBinding binding, @Nullable
RelDataType returnType) {
if (((returnType == null) || returnType.equals(binding.getValidator().getUnknownType())) || (returnType.getSqlTypeName() == SqlTypeName.ANY)) {
return null;
} else {
final LogicalType logicalType = FlinkTypeFactory.toLogicalType(returnType);
return TypeConversions.fromLogicalToDataType(logicalType);
}
} | 3.26 |
flink_TimestampUtil_createVectorFromConstant_rdh | // creates a Hive ColumnVector of constant timestamp value
public static ColumnVector createVectorFromConstant(int batchSize, Object value) {
if (hiveTSColVectorClz != null) {
return OrcTimestampColumnVector.createFromConstant(batchSize, value);
} else {
return OrcLegacyTimestampColumnVector.createFromConstant(batchSize, value);
}
} | 3.26 |
flink_TimestampUtil_isHiveTimestampColumnVector_rdh | // whether a ColumnVector is the new TimestampColumnVector
public static boolean isHiveTimestampColumnVector(ColumnVector vector) {return (hiveTSColVectorClz != null) && hiveTSColVectorClz.isAssignableFrom(vector.getClass());
} | 3.26 |
flink_EmptyMutableObjectIterator_get_rdh | /**
* Gets a singleton instance of the empty iterator.
*
* @param <E>
* The type of the objects (not) returned by the iterator.
* @return An instance of the iterator.
*/
public static <E> MutableObjectIterator<E> get() {
@SuppressWarnings("unchecked")
MutableObjectIterator<E> iter = ((MutableObjectIterator<E>) (INSTANCE));
return iter;
} | 3.26 |
flink_Tuple0_getArity_rdh | // ------------------------------------------------------------------------
@Overridepublic int getArity() {
return 0;
} | 3.26 |
flink_Tuple0_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form "()".
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "()";
} | 3.26 |
flink_Tuple0_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
return (this == o) || (o instanceof Tuple0);
} | 3.26 |
flink_Tuple0_readResolve_rdh | // singleton deserialization
private Object readResolve() throws ObjectStreamException {
return INSTANCE; } | 3.26 |
flink_JobSchedulingPlan_empty_rdh | /**
* Create an empty {@link JobSchedulingPlan} with no information about vertices or allocations.
*/public static JobSchedulingPlan empty() {
return new JobSchedulingPlan(VertexParallelism.empty(), Collections.emptyList());
} | 3.26 |
flink_AbstractHeapVector_getDictionaryIds_rdh | /**
* Returns the underlying integer column for ids of dictionary.
*/
@Override
public HeapIntVector getDictionaryIds() {
return dictionaryIds;
} | 3.26 |
flink_AbstractHeapVector_reset_rdh | /**
* Resets the column to default state. - fills the isNull array with false. - sets noNulls to
* true.
*/
@Override
public void reset()
{
if (!noNulls) {
Arrays.fill(isNull, false);
}
noNulls = true;
} | 3.26 |
flink_SqlColumnPosSpec_symbol_rdh | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos
pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.26 |
flink_MasterState_name_rdh | // ------------------------------------------------------------------------
public String name() {
return name;
} | 3.26 |
flink_MasterState_m0_rdh | // ------------------------------------------------------------------------
@Override
public String m0() {
return (((("name: " + name) + " ; version: ") + version) + " ; bytes: ") + Arrays.toString(bytes);
} | 3.26 |
flink_LegacySourceTransformationTranslator_translateForBatchInternal_rdh | /**
* A {@link TransformationTranslator} for the {@link LegacySourceTransformation}.
*
* @param <OUT>
* The type of the elements that the {@link LegacySourceTransformation} we are
* translating is producing.
*/
@Internalpublic class LegacySourceTransformationTranslator<OUT> extends SimpleTransformationTranslator<OUT, LegacySourceTransformation<OUT>> {
@Override
protected Collection<Integer> translateForBatchInternal(final LegacySourceTransformation<OUT> transformation, final Context context) {
return translateInternal(transformation, context);
} | 3.26 |
flink_FlinkPreparingTableBase_getStatistic_rdh | // ~ Methods ----------------------------------------------------------------
/**
* Returns the statistic of this table.
*/
public FlinkStatistic getStatistic() {
return this.statistic;
} | 3.26 |
flink_FlinkPreparingTableBase_getNames_rdh | /**
* Returns the table path in the {@link RelOptSchema}. Different with {@link #getQualifiedName()}, the latter is mainly used for table digest.
*/
public List<String> getNames() {
return names;
} | 3.26 |
flink_FlinkPreparingTableBase_getRowType_rdh | /**
* Returns the type of rows returned by this table.
*/
public RelDataType getRowType() {
return f0;
} | 3.26 |
flink_FlinkPreparingTableBase_getDistribution_rdh | /**
* Returns a description of the physical distribution of the rows in this table.
*
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#distribution
*/
public RelDistribution getDistribution() {
return null;
}
/**
* Returns whether the given columns are a key or a superset of a unique key of this table.
*
* <p>Note: Return true means TRUE. However return false means FALSE or NOT KNOWN. It's better
* to use {@link org.apache.calcite.rel.metadata.RelMetadataQuery#areRowsUnique} | 3.26 |
flink_FlinkPreparingTableBase_m0_rdh | /**
* Obtains whether the ordinal column has a default value, which is not supported now.
*
* @param rowType
* Row type of field
* @param ordinal
* Index of the given column
* @param initializerContext
* Context for {@link org.apache.calcite.sql2rel.InitializerExpressionFactory}
* @return true if the column has a default value
*/
public boolean m0(RelDataType rowType, int
ordinal, InitializerContext initializerContext) {
return false;} | 3.26 |
flink_FlinkPreparingTableBase_getExpression_rdh | /**
* Generates code for this table, which is not supported now.
*
* @param clazz
* The desired collection class, for example {@link org.apache.calcite.linq4j.Queryable}
*/
public Expression getExpression(Class clazz) {
throw new UnsupportedOperationException();
} | 3.26 |
flink_FlinkPreparingTableBase_getRowCount_rdh | /**
* Returns an estimate of the number of rows in the table.
*/
public double getRowCount() {
Double rowCnt
= getStatistic().getRowCount();
return rowCnt
== null ? DEFAULT_ROWCOUNT
: rowCnt;
} | 3.26 |
flink_FlinkPreparingTableBase_getMonotonicity_rdh | /**
* Obtains whether a given column is monotonic.
*
* @param columnName
* Column name
* @return True if the given column is monotonic
*/
public SqlMonotonicity getMonotonicity(String columnName) {
return SqlMonotonicity.NOT_MONOTONIC;
} | 3.26 |
flink_FlinkPreparingTableBase_explainSourceAsString_rdh | // ~ Tools ------------------------------------------------------------------
/**
* Returns the digest of the {@link TableSource} instance.
*/
protected List<String> explainSourceAsString(TableSource<?> ts) {
String tsDigest = ts.explainSource();
if (!Strings.isNullOrEmpty(tsDigest)) {
return ImmutableList.<String>builder().addAll(Util.skipLast(names)).add(String.format("%s, source: [%s]", Util.last(names), tsDigest)).build();
} else {
return names;
}
} | 3.26 |
flink_FlinkPreparingTableBase_getCollationList_rdh | /**
* Returns a description of the physical ordering (or orderings) of the rows returned from this
* table.
*
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#collations(RelNode)
*/
public List<RelCollation> getCollationList() {
return ImmutableList.of();} | 3.26 |
flink_FlinkPreparingTableBase_isTemporal_rdh | /**
* We recognize all tables in FLink are temporal as they are changeable.
*/
public boolean isTemporal() {
return true;
} | 3.26 |
flink_FlinkPreparingTableBase_m1_rdh | /**
* Returns unique keySets of current table.
*/
public Optional<Set<ImmutableBitSet>> m1() {Set<? extends Set<String>> uniqueKeys = statistic.getUniqueKeys();
if (uniqueKeys == null) {
return Optional.empty();
} else if (uniqueKeys.size() == 0) {
return Optional.of(ImmutableSet.of());
} else {
ImmutableSet.Builder<ImmutableBitSet> uniqueKeysSetBuilder = ImmutableSet.builder();
for (Set<String> keys : uniqueKeys) {
// some columns in original uniqueKeys may not exist in RowType after project push
// down.
boolean v5 = keys.stream().allMatch(f -> f0.getField(f, false, false) != null);
// if not all columns in original uniqueKey, skip this uniqueKey
if (v5) {
Set<Integer> keysPosition = keys.stream().map(f -> f0.getField(f, false, false).getIndex()).collect(Collectors.toSet());
uniqueKeysSetBuilder.add(ImmutableBitSet.of(keysPosition));
}
}
return Optional.of(uniqueKeysSetBuilder.build());
}
} | 3.26 |
flink_FlinkPreparingTableBase_getAllowedAccess_rdh | /**
* Obtains the access type of the table.
*
* @return all access types including SELECT/UPDATE/INSERT/DELETE
*/
public SqlAccessType getAllowedAccess() {
return SqlAccessType.ALL;
} | 3.26 |
flink_LatencyMarker_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {return true;
}
if ((o == null) || (getClass()
!= o.getClass())) {
return false;
}
LatencyMarker that = ((LatencyMarker) (o));
if (markedTime != that.markedTime) {
return false;
}
if (!operatorId.equals(that.operatorId)) {
return false;
}
return subtaskIndex == that.subtaskIndex;
} | 3.26 |
flink_LatencyMarker_getMarkedTime_rdh | /**
* Returns the timestamp marked by the LatencyMarker.
*/
public long getMarkedTime() {
return markedTime;
} | 3.26 |
flink_BiFunctionWithException_unchecked_rdh | /**
* Convert at {@link BiFunctionWithException} into a {@link BiFunction}.
*
* @param biFunctionWithException
* function with exception to convert into a function
* @param <A>
* input type
* @param <B>
* output type
* @return {@link BiFunction} which throws all checked exception as an unchecked exception.
*/
static <A, B, C> BiFunction<A, B, C> unchecked(BiFunctionWithException<A, B, C, ?> biFunctionWithException) {
return (A a,B b) -> {
try {
return biFunctionWithException.apply(a, b);} catch (Throwable t) {
ExceptionUtils.rethrow(t);
// we need this to appease the compiler :-(
return null;
}
};
} | 3.26 |
flink_OperationExecutor_runClusterAction_rdh | /**
* Retrieves the {@link ClusterClient} from the session and runs the given {@link ClusterAction}
* against it.
*
* @param configuration
* the combined configuration of {@code sessionConf} and {@code executionConfig}.
* @param handle
* the specified operation handle
* @param clusterAction
* the cluster action to run against the retrieved {@link ClusterClient}.
* @param <ClusterID>
* type of the cluster id
* @param <Result>>
* type of the result
* @throws SqlExecutionException
* if something goes wrong
*/
private <ClusterID, Result> Result runClusterAction(Configuration configuration, OperationHandle handle, ClusterAction<ClusterID, Result> clusterAction)
throws SqlExecutionException {
final ClusterClientFactory<ClusterID> clusterClientFactory = clusterClientServiceLoader.getClusterClientFactory(configuration);
final ClusterID clusterId = clusterClientFactory.getClusterId(configuration);
Preconditions.checkNotNull(clusterId, "No cluster ID found for operation " + handle);
try (final ClusterDescriptor<ClusterID> clusterDescriptor = clusterClientFactory.createClusterDescriptor(configuration);final ClusterClient<ClusterID> clusterClient = clusterDescriptor.retrieve(clusterId).getClusterClient()) {
return clusterAction.runAction(clusterClient);
} catch (FlinkException e) {
throw new SqlExecutionException("Failed to run cluster action.", e);
}
} | 3.26 |
flink_FlinkCalciteCatalogReader_toPreparingTable_rdh | /**
* Translate this {@link CatalogSchemaTable} into Flink source table.
*/
private static FlinkPreparingTableBase toPreparingTable(RelOptSchema relOptSchema, List<String> names, RelDataType rowType, CatalogSchemaTable schemaTable) {
final ResolvedCatalogBaseTable<?> resolvedBaseTable = schemaTable.getContextResolvedTable().getResolvedTable();
final CatalogBaseTable originTable = resolvedBaseTable.getOrigin();
if (originTable instanceof QueryOperationCatalogView) {
return convertQueryOperationView(relOptSchema, names, rowType, ((QueryOperationCatalogView) (originTable)));} else if (originTable instanceof ConnectorCatalogTable) {
ConnectorCatalogTable<?, ?> connectorTable = ((ConnectorCatalogTable<?, ?>) (originTable));
if (connectorTable.getTableSource().isPresent()) {
return convertLegacyTableSource(relOptSchema, rowType, schemaTable.getContextResolvedTable().getIdentifier(), connectorTable, schemaTable.getStatistic(), schemaTable.isStreamingMode());
} else {
throw new ValidationException("Cannot convert a connector table " + "without source.");}
} else if
(originTable instanceof CatalogView) {
return convertCatalogView(relOptSchema, names, rowType, schemaTable.getStatistic(), ((CatalogView) (originTable)));
} else if (originTable instanceof CatalogTable) {
return convertCatalogTable(relOptSchema, names, rowType, schemaTable);
} else {
throw new ValidationException("Unsupported table type: " + originTable);
}
} | 3.26 |
flink_FlinkCalciteCatalogReader_isLegacySourceOptions_rdh | /**
* Checks whether the {@link CatalogTable} uses legacy connector source options.
*/
private static boolean isLegacySourceOptions(CatalogSchemaTable schemaTable) {
// normalize option keys
DescriptorProperties properties = new
DescriptorProperties(true);
properties.putProperties(schemaTable.getContextResolvedTable().getResolvedTable().getOptions());if (properties.containsKey(ConnectorDescriptorValidator.CONNECTOR_TYPE)) {
return true;
} else {
// try to create legacy table source using the options,
// some legacy factories uses the new 'connector' key
try {
// The input table is ResolvedCatalogTable that the
// rowtime/proctime contains {@link TimestampKind}. However, rowtime
// is the concept defined by the WatermarkGenerator and the
// WatermarkGenerator is responsible to convert the rowtime column
// to Long. For source, it only treats the rowtime column as regular
// timestamp. So, we erase the rowtime indicator here. Please take a
// look at the usage of the {@link
// DataTypeUtils#removeTimeAttribute}
ResolvedCatalogTable originTable
= schemaTable.getContextResolvedTable().getResolvedTable();
ResolvedSchema v8 = TableSchemaUtils.removeTimeAttributeFromResolvedSchema(originTable.getResolvedSchema());
TableFactoryUtil.findAndCreateTableSource(schemaTable.getContextResolvedTable().getCatalog().orElse(null), schemaTable.getContextResolvedTable().getIdentifier(),
new ResolvedCatalogTable(CatalogTable.of(Schema.newBuilder().fromResolvedSchema(v8).build(), originTable.getComment(), originTable.getPartitionKeys(), originTable.getOptions()), v8), new Configuration(), schemaTable.isTemporary());
// success, then we will use the legacy factories
return true;
} catch (Throwable e) {
// fail, then we will use new factories
return false;
}
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.