name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_BinaryHashTable_endBuild_rdh | /**
* End build phase.
*/
public void endBuild() throws IOException {
// finalize the partitions
int buildWriteBuffers = 0;
for (BinaryHashPartition p : this.partitionsBeingBuilt) {
buildWriteBuffers += p.finalizeBuildPhase(this.ioManager, this.currentEnumerator);
}
buildSpillRetBufferNumbers += buildWriteBuffers;
// the first prober is the probe-side input, but the input is null at beginning
this.probeIterator = new ProbeIterator(this.binaryProbeSideSerializer.createInstance());
// the bucket iterator can remain constant over the time
this.bucketIterator = new LookupBucketIterator(this);
} | 3.26 |
flink_BinaryHashTable_putBuildRow_rdh | // ========================== build phase public method ======================================
/**
* Put a build side row to hash table.
*/
public void putBuildRow(RowData row) throws IOException { final int hashCode = hash(this.buildSideProjection.apply(row).hashCode(), 0);
// TODO: combine key projection and build side conversion to code gen.
insertIntoTable(originBuildSideSerializer.toBinaryRow(row), hashCode);
} | 3.26 |
flink_BinaryHashTable_clearPartitions_rdh | /**
* This method clears all partitions currently residing (partially) in memory. It releases all
* memory and deletes all spilled partitions.
*
* <p>This method is intended for a hard cleanup in the case that the join is aborted.
*/
@Override
public void clearPartitions() {
// clear the iterators, so the next call to next() will notice
this.bucketIterator = null;
this.probeIterator = null;
for (int i = this.partitionsBeingBuilt.size() - 1; i >= 0; --i) {
final BinaryHashPartition p = this.partitionsBeingBuilt.get(i);
try {
p.clearAllMemory(this.internalPool);
} catch (Exception e) {
LOG.error("Error during partition cleanup.", e);
}
}
this.partitionsBeingBuilt.clear();
// clear the partitions that are still to be done (that have files on disk)
for (final BinaryHashPartition p : this.partitionsPending) {
p.clearAllMemory(this.internalPool);
}
// clear the partitions that processed by sort merge join operator
for (final BinaryHashPartition p : this.partitionsPendingForSMJ) {try {
p.clearAllMemory(this.internalPool);
} catch (Exception e) {
LOG.error("Error during partition cleanup.", e);
}
}
this.partitionsPendingForSMJ.clear();
} | 3.26 |
flink_BinaryHashTable_spillPartition_rdh | /**
* Selects a partition and spills it. The number of the spilled partition is returned.
*
* @return The number of the spilled partition.
*/
@Override
protected int spillPartition() throws IOException {
// find the largest partition
int largestNumBlocks = 0;
int largestPartNum = -1;for (int i = 0; i < partitionsBeingBuilt.size(); i++) {
BinaryHashPartition p = partitionsBeingBuilt.get(i);
if (p.isInMemory() && (p.getNumOccupiedMemorySegments() > largestNumBlocks)) {
largestNumBlocks = p.getNumOccupiedMemorySegments();
largestPartNum = i;
}
}
final BinaryHashPartition p = partitionsBeingBuilt.get(largestPartNum);
// spill the partition
int numBuffersFreed = p.spillPartition(this.ioManager,
this.currentEnumerator.next(), this.buildSpillReturnBuffers);
this.buildSpillRetBufferNumbers += numBuffersFreed;
LOG.info(String.format("Grace hash join: Ran out memory, choosing partition "
+ "[%d] to spill, %d memory segments being freed", largestPartNum, numBuffersFreed));
// grab as many buffers as are available directly
MemorySegment currBuff;
while ((this.buildSpillRetBufferNumbers > 0) && ((currBuff = this.buildSpillReturnBuffers.poll()) != null)) {
returnPage(currBuff);
this.buildSpillRetBufferNumbers--;
}
numSpillFiles++;
spillInBytes += numBuffersFreed * segmentSize;
// The bloomFilter is built by bucket area after the data is spilled, so that we can use
// enough memory.
p.buildBloomFilterAndFreeBucket();
return largestPartNum;
} | 3.26 |
flink_BinaryHashTable_nextMatching_rdh | // ========================== rebuild phase public method ======================================
/**
* Next record from rebuilt spilled partition or build side outer partition.
*/
public boolean nextMatching() throws IOException {
if (type.needSetProbed()) {
return (processProbeIter() || m1()) || prepareNextPartition();
} else {return processProbeIter() || prepareNextPartition();
}
} | 3.26 |
flink_NullValue_getBinaryLength_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() {
return 1;
} | 3.26 |
flink_NullValue_compareTo_rdh | // --------------------------------------------------------------------------------------------
@Override
public int compareTo(NullValue o) {
return 0;
} | 3.26 |
flink_NullValue_getMaxNormalizedKeyLen_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getMaxNormalizedKeyLen() {
return 0;
} | 3.26 |
flink_NullValue_read_rdh | // --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {in.readBoolean();
} | 3.26 |
flink_NullValue_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return "(null)";
} | 3.26 |
flink_NonReusingBuildSecondReOpenableHashJoinIterator_reopenProbe_rdh | /**
* Set new input for probe side
*
* @throws java.io.IOException
*/
public void reopenProbe(MutableObjectIterator<V1> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.26 |
flink_RestartAllFailoverStrategy_getTasksNeedingRestart_rdh | /**
* Returns all vertices on any task failure.
*
* @param executionVertexId
* ID of the failed task
* @param cause
* cause of the failure
* @return set of IDs of vertices to restart
*/
@Override
public Set<ExecutionVertexID>
getTasksNeedingRestart(ExecutionVertexID executionVertexId, Throwable cause) {
return IterableUtils.toStream(topology.getVertices()).map(SchedulingExecutionVertex::getId).collect(Collectors.toSet());
} | 3.26 |
flink_SourcePlanNode_setSerializer_rdh | /**
* Sets the serializer for this PlanNode.
*
* @param serializer
* The serializer to set.
*/
public void setSerializer(TypeSerializerFactory<?> serializer) {
this.serializer = serializer;
} | 3.26 |
flink_SourcePlanNode_getDataSourceNode_rdh | // --------------------------------------------------------------------------------------------
public DataSourceNode getDataSourceNode() {
return ((DataSourceNode)
(this.template));
} | 3.26 |
flink_SourcePlanNode_accept_rdh | // --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<PlanNode> visitor) {
if (visitor.preVisit(this)) {
visitor.postVisit(this); }} | 3.26 |
flink_PojoCsvInputFormat_findAllFields_rdh | /**
* Finds all declared fields in a class and all its super classes.
*
* @param clazz
* Class for which all declared fields are found
* @param allFields
* Map containing all found fields so far
*/
private void findAllFields(Class<?> clazz, Map<String, Field> allFields) {
for (Field field : clazz.getDeclaredFields()) {
allFields.put(field.getName(), field);
}
if (clazz.getSuperclass() != null) {
findAllFields(clazz.getSuperclass(), allFields);
}
} | 3.26 |
flink_AbstractCollectResultBuffer_revert_rdh | /**
* Revert the buffer back to the result whose offset is `checkpointedOffset`.
*/
protected void revert(long checkpointedOffset) {
while (offset > checkpointedOffset) {buffer.removeLast();offset--;
}
} | 3.26 |
flink_AbstractCollectResultBuffer_next_rdh | /**
* Get next user visible result, returns null if currently there is no more.
*/ public T next() {
if (userVisibleHead == userVisibleTail) {
return null;
}
T ret = buffer.removeFirst();
userVisibleHead++;
sanityCheck();
return ret;
} | 3.26 |
flink_AbstractCollectResultBuffer_m0_rdh | /**
* Clear the whole buffer and discard all results.
*/
protected void m0() {
buffer.clear();
userVisibleHead = 0;
userVisibleTail = 0;
offset
= 0;
} | 3.26 |
flink_StandaloneResourceManagerFactory_getConfigurationWithoutResourceLimitationIfSet_rdh | /**
* Get the configuration for standalone ResourceManager, overwrite invalid configs.
*
* @param configuration
* configuration object
* @return the configuration for standalone ResourceManager
*/
@VisibleForTesting
public static Configuration getConfigurationWithoutResourceLimitationIfSet(Configuration configuration) {
final Configuration copiedConfig = new Configuration(configuration);
removeResourceLimitationConfig(copiedConfig);
return copiedConfig;
} | 3.26 |
flink_TimeIntervalJoin_removeExpiredRows_rdh | /**
* Remove the expired rows. Register a new timer if the cache still holds valid rows after the
* cleaning up.
*
* @param collector
* the collector to emit results
* @param expirationTime
* the expiration time for this cache
* @param rowCache
* the row cache
* @param timerState
* timer state for the opposite stream
* @param ctx
* the context to register the cleanup timer
* @param removeLeft
* whether to remove the left rows
*/
private void removeExpiredRows(Collector<RowData> collector, long expirationTime, MapState<Long, List<Tuple2<RowData, Boolean>>> rowCache, ValueState<Long>
timerState, OnTimerContext ctx, boolean removeLeft) throws Exception {
Iterator<Map.Entry<Long, List<Tuple2<RowData, Boolean>>>> iterator = rowCache.iterator();
long earliestTimestamp = -1L;
// We remove all expired keys and do not leave the loop early.
// Hence, we do a full pass over the state.
while (iterator.hasNext()) {Map.Entry<Long, List<Tuple2<RowData, Boolean>>> entry = iterator.next();
Long rowTime = entry.getKey();
if (rowTime <= expirationTime) {
if (removeLeft && joinType.isLeftOuter()) {
List<Tuple2<RowData, Boolean>> rows = entry.getValue();
rows.forEach((Tuple2<RowData, Boolean> tuple) -> {
if (!tuple.f1) {
// Emit a null padding result if the row has never been
// successfully joined.
collector.collect(paddingUtil.padLeft(tuple.f0));
}
});
} else if ((!removeLeft) && joinType.isRightOuter()) {
List<Tuple2<RowData,
Boolean>> rows = entry.getValue();
rows.forEach((Tuple2<RowData, Boolean> tuple) -> {
if (!tuple.f1) {
// Emit a null padding result if the row has never been
// successfully joined.
collector.collect(paddingUtil.padRight(tuple.f0));
}
});
}
iterator.remove();
} else // We find the earliest timestamp that is still valid.
if ((rowTime < earliestTimestamp) || (earliestTimestamp < 0)) {
earliestTimestamp = rowTime;
}
}
if (earliestTimestamp > 0) {
// There are rows left in the cache. Register a timer to expire them later.
registerCleanUpTimer(ctx, earliestTimestamp, removeLeft);}
else {
// No rows left in the cache. Clear the states and the timerState will be 0.
timerState.clear();
rowCache.clear();
}
} | 3.26 |
flink_TimeIntervalJoin_registerCleanUpTimer_rdh | /**
* Register a timer for cleaning up rows in a specified time.
*
* @param ctx
* the context to register timer
* @param rowTime
* time for the input row
* @param leftRow
* whether this row comes from the left stream
*/
private void registerCleanUpTimer(Context ctx, long rowTime, boolean leftRow) throws IOException {
if (leftRow) {
long cleanUpTime = (((rowTime + leftRelativeSize) + minCleanUpInterval) + allowedLateness) + 1;
registerTimer(ctx, cleanUpTime);
rightTimerState.update(cleanUpTime);
} else {
long v33 = (((rowTime + f0) + minCleanUpInterval) + allowedLateness) + 1;
registerTimer(ctx, v33);
leftTimerState.update(v33);
}
} | 3.26 |
flink_HadoopFileStatus_fromHadoopStatus_rdh | // ------------------------------------------------------------------------
/**
* Creates a new {@code HadoopFileStatus} from Hadoop's {@link org.apache.hadoop.fs.FileStatus}.
* If Hadoop's file status is <i>located</i>, i.e., it contains block information, then this
* method returns an implementation of Flink's {@link org.apache.flink.core.fs.LocatedFileStatus}.
*/
public static HadoopFileStatus fromHadoopStatus(final FileStatus fileStatus) {
return fileStatus instanceof LocatedFileStatus ? new LocatedHadoopFileStatus(((LocatedFileStatus) (fileStatus))) : new HadoopFileStatus(fileStatus);
} | 3.26 |
flink_PythonStreamGroupTableAggregateOperator_getUserDefinedFunctionsProto_rdh | /**
* Gets the proto representation of the Python user-defined table aggregate function to be
* executed.
*/
@Overridepublic UserDefinedAggregateFunctions getUserDefinedFunctionsProto() {
FlinkFnApi.UserDefinedAggregateFunctions.Builder builder = super.getUserDefinedFunctionsProto().toBuilder();
return builder.build();
} | 3.26 |
flink_OSSTestCredentials_getOSSAccessKey_rdh | /**
* Get OSS access key.
*
* @return OSS access key
*/
public static String getOSSAccessKey() {
if (ACCESS_KEY != null) {
return ACCESS_KEY;
} else {
throw new IllegalStateException("OSS access key is not available");
}
} | 3.26 |
flink_OSSTestCredentials_credentialsAvailable_rdh | // ------------------------------------------------------------------------
public static boolean credentialsAvailable() {
return (((ENDPOINT != null) && (BUCKET != null)) && (ACCESS_KEY != null)) && (SECRET_KEY != null);
} | 3.26 |
flink_OSSTestCredentials_getOSSSecretKey_rdh | /**
* Get OSS secret key.
*
* @return OSS secret key
*/
public static String
getOSSSecretKey() {
if (SECRET_KEY != null) {
return
SECRET_KEY;
} else {
throw new IllegalStateException("OSS secret key is not available");
}
} | 3.26 |
flink_BulkPartialSolutionNode_setCandidateProperties_rdh | // --------------------------------------------------------------------------------------------
public void setCandidateProperties(GlobalProperties gProps, LocalProperties lProps, Channel initialInput) {
if (this.cachedPlans
!= null) {
throw new IllegalStateException();
} else {
this.cachedPlans = Collections.<PlanNode>singletonList(new BulkPartialSolutionPlanNode(this, ("PartialSolution (" +
this.getOperator().getName()) + ")", gProps, lProps, initialInput));
}
} | 3.26 |
flink_BulkPartialSolutionNode_getOperator_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the operator (here the {@link PartialSolutionPlaceHolder}) that is represented by this
* optimizer node.
*
* @return The operator represented by this optimizer node.
*/
@Override
public PartialSolutionPlaceHolder<?> getOperator() {
return ((PartialSolutionPlaceHolder<?>) (super.getOperator()));
} | 3.26 |
flink_AvroSchemaConverter_m0_rdh | /**
* Converts Flink SQL {@link LogicalType} (can be nested) into an Avro schema.
*
* <p>The "{rowName}_" is used as the nested row type name prefix in order to generate the right
* schema. Nested record type that only differs with type name is still compatible.
*
* @param logicalType
* logical type
* @param rowName
* the record name
* @return Avro's {@link Schema} matching this logical type.
*/
public static Schema m0(LogicalType logicalType, String rowName) {
int precision;
boolean nullable = logicalType.isNullable();
switch (logicalType.getTypeRoot()) {
case NULL :
return SchemaBuilder.builder().nullType();
case BOOLEAN :
Schema bool = SchemaBuilder.builder().booleanType();
return nullable ? nullableSchema(bool) : bool;
case TINYINT :
case SMALLINT :
case INTEGER :
Schema integer = SchemaBuilder.builder().intType();
return nullable ? nullableSchema(integer) : integer;
case BIGINT :
Schema bigint = SchemaBuilder.builder().longType();
return nullable ? nullableSchema(bigint) : bigint;
case FLOAT :
Schema f = SchemaBuilder.builder().floatType();
return nullable ? nullableSchema(f) : f;
case DOUBLE :
Schema d = SchemaBuilder.builder().doubleType();
return nullable ? nullableSchema(d) : d;
case CHAR :
case VARCHAR :
Schema str = SchemaBuilder.builder().stringType();
return nullable ? nullableSchema(str) :
str;
case
BINARY :
case VARBINARY :
Schema v28 = SchemaBuilder.builder().bytesType();
return nullable ? nullableSchema(v28) : v28;
case TIMESTAMP_WITHOUT_TIME_ZONE :
// use long to represents Timestamp
final TimestampType timestampType = ((TimestampType) (logicalType));precision = timestampType.getPrecision();
LogicalType avroLogicalType;
if (precision <= 3) {
avroLogicalType = LogicalTypes.timestampMillis();
}
else {
throw new IllegalArgumentException((("Avro does not support TIMESTAMP type " + "with precision: ") + precision) + ", it only supports precision less than 3.");
}
Schema timestamp = avroLogicalType.addToSchema(SchemaBuilder.builder().longType());
return nullable ? nullableSchema(timestamp) : timestamp;case DATE :
// use int to represents Date
Schema date = LogicalTypes.date().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(date) : date;
case TIME_WITHOUT_TIME_ZONE :
precision = ((TimeType) (logicalType)).getPrecision();
if (precision > 3) {
throw new IllegalArgumentException(("Avro does not support TIME type with precision: " + precision)
+ ", it only supports precision less than 3.");
}
// use int to represents Time, we only support millisecond when deserialization
Schema time = LogicalTypes.timeMillis().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(time) : time;
case DECIMAL :
DecimalType decimalType = ((DecimalType) (logicalType));
// store BigDecimal as byte[]
Schema decimal = LogicalTypes.decimal(decimalType.getPrecision(), decimalType.getScale()).addToSchema(SchemaBuilder.builder().bytesType());
return nullable ? nullableSchema(decimal) : decimal;
case ROW :
RowType rowType = ((RowType) (logicalType));
List<String> fieldNames = rowType.getFieldNames();// we have to make sure the record name is different in a Schema
SchemaBuilder.FieldAssembler<Schema> builder = SchemaBuilder.builder().record(rowName).fields();
for (int i = 0; i < rowType.getFieldCount(); i++) {
String fieldName = fieldNames.get(i);
LogicalType fieldType = rowType.getTypeAt(i);
SchemaBuilder.GenericDefault<Schema> fieldBuilder = builder.name(fieldName).type(m0(fieldType, (rowName + "_") + fieldName));
if (fieldType.isNullable()) {
builder = fieldBuilder.withDefault(null);
} else {
builder = fieldBuilder.noDefault();
}
}
Schema record = builder.endRecord();
return nullable ? nullableSchema(record)
: record;
case MULTISET :
case MAP :
Schema map = SchemaBuilder.builder().map().values(m0(extractValueTypeToAvroMap(logicalType), rowName));
return nullable ? nullableSchema(map) : map;
case ARRAY
:
ArrayType arrayType = ((ArrayType) (logicalType));
Schema array
= SchemaBuilder.builder().array().items(m0(arrayType.getElementType(), rowName));
return nullable ? nullableSchema(array) : array;
case RAW :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
default :
throw new UnsupportedOperationException("Unsupported to derive Schema for type: " + logicalType);
}
} | 3.26 |
flink_AvroSchemaConverter_convertToDataType_rdh | /**
* Converts an Avro schema string into a nested row structure with deterministic field order and
* data types that are compatible with Flink's Table & SQL API.
*
* @param avroSchemaString
* Avro schema definition string
* @return data type matching the schema
*/
public static DataType convertToDataType(String avroSchemaString) {
Preconditions.checkNotNull(avroSchemaString, "Avro schema must not be null.");
final Schema schema;
try {
schema = new Schema.Parser().parse(avroSchemaString);
} catch (SchemaParseException e) {
throw new IllegalArgumentException("Could not parse Avro schema string.", e);
}
return convertToDataType(schema);
} | 3.26 |
flink_AvroSchemaConverter_convertToTypeInfo_rdh | /**
* Converts an Avro schema string into a nested row structure with deterministic field order and
* data types that are compatible with Flink's Table & SQL API.
*
* @param avroSchemaString
* Avro schema definition string
* @return type information matching the schema
*/
@SuppressWarnings("unchecked")
public static <T>
TypeInformation<T> convertToTypeInfo(String avroSchemaString) {
Preconditions.checkNotNull(avroSchemaString, "Avro schema must not be null.");
final Schema schema;
try {
schema = new Schema.Parser().parse(avroSchemaString);
} catch (SchemaParseException e) {
throw new IllegalArgumentException("Could not parse Avro schema string.", e);
}
return ((TypeInformation<T>) (convertToTypeInfo(schema)));
} | 3.26 |
flink_StreamExchangeModeUtils_getGlobalStreamExchangeMode_rdh | /**
* The {@link GlobalStreamExchangeMode} should be determined by the {@link StreamGraphGenerator}
* in the future.
*/
@Deprecated
static Optional<GlobalStreamExchangeMode> getGlobalStreamExchangeMode(ReadableConfig config) {
return config.getOptional(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE).map(value -> {
try {
return GlobalStreamExchangeMode.valueOf(convertLegacyShuffleMode(value).toUpperCase());
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(String.format("Unsupported value %s for config %s.", value, ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE.key()));
}
});
} | 3.26 |
flink_LocationPreferenceSlotSelectionStrategy_createDefault_rdh | // -------------------------------------------------------------------------------------------
// Factory methods
// -------------------------------------------------------------------------------------------
public static LocationPreferenceSlotSelectionStrategy createDefault() {return new DefaultLocationPreferenceSlotSelectionStrategy();
} | 3.26 |
flink_Time_hours_rdh | /**
* Creates a new {@link Time} that represents the given number of hours.
*/
public static Time hours(long hours) {return of(hours, TimeUnit.HOURS);
} | 3.26 |
flink_Time_seconds_rdh | /**
* Creates a new {@link Time} that represents the given number of seconds.
*/
public static Time seconds(long seconds) {
return of(seconds, TimeUnit.SECONDS);
} | 3.26 |
flink_Time_days_rdh | /**
* Creates a new {@link Time} that represents the given number of days.
*/
public static Time days(long days)
{
return of(days, TimeUnit.DAYS);
} | 3.26 |
flink_Time_milliseconds_rdh | /**
* Creates a new {@link Time} that represents the given number of milliseconds.
*/
public static Time milliseconds(long milliseconds) {
return of(milliseconds, TimeUnit.MILLISECONDS);} | 3.26 |
flink_Time_of_rdh | // ------------------------------------------------------------------------
// Factory
// ------------------------------------------------------------------------
/**
* Creates a new {@link Time} of the given duration and {@link TimeUnit}.
*
* <p>The {@code Time} refers to the time characteristic that is set on the dataflow via {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}.
*
* @param size
* The duration of time.
* @param unit
* The unit of time of the duration, for example {@code TimeUnit.SECONDS}.
* @return The time policy.
*/
public static Time of(long size, TimeUnit unit) {
return new Time(size, unit);
} | 3.26 |
flink_Time_minutes_rdh | /**
* Creates a new {@link Time} that represents the given number of minutes.
*/
public static Time minutes(long minutes) {
return of(minutes, TimeUnit.MINUTES); } | 3.26 |
flink_TableResultImpl_m0_rdh | /**
* Specifies print style. Default is {@link TableauStyle} with max integer column width.
*/
public Builder m0(PrintStyle printStyle) {
Preconditions.checkNotNull(printStyle, "printStyle should not be null");
this.printStyle = printStyle;
return this;
} | 3.26 |
flink_TableResultImpl_resultKind_rdh | /**
* Specifies result kind of the execution result.
*
* @param resultKind
* a {@link ResultKind} for the execution result.
*/
public Builder resultKind(ResultKind resultKind) {
Preconditions.checkNotNull(resultKind, "resultKind should not be null");
this.resultKind = resultKind;
return this;
} | 3.26 |
flink_TableResultImpl_jobClient_rdh | /**
* Specifies job client which associates the submitted Flink job.
*
* @param jobClient
* a {@link JobClient} for the submitted Flink job.
*/
public Builder jobClient(JobClient jobClient) {
this.jobClient = jobClient;
return this;
} | 3.26 |
flink_TableResultImpl_data_rdh | /**
* Specifies an row list as the execution result.
*
* @param rowList
* a row list as the execution result.
*/
public Builder data(List<Row> rowList) {
Preconditions.checkNotNull(rowList, "listRows should not be null");
this.resultProvider = new StaticResultProvider(rowList);
return this;
} | 3.26 |
flink_TableResultImpl_build_rdh | /**
* Returns a {@link TableResult} instance.
*/
public TableResultInternal build() {
if (printStyle == null) { printStyle = PrintStyle.rawContent(resultProvider.getRowDataStringConverter());
}return new TableResultImpl(jobClient, resolvedSchema, resultKind, resultProvider, printStyle);
} | 3.26 |
flink_TableResultImpl_schema_rdh | /**
* Specifies schema of the execution result.
*
* @param resolvedSchema
* a {@link ResolvedSchema} for the execution result.
*/
public Builder schema(ResolvedSchema resolvedSchema) {
Preconditions.checkNotNull(resolvedSchema, "resolvedSchema should not be null");
this.resolvedSchema = resolvedSchema;
return this;
} | 3.26 |
flink_TwoInputUdfOperator_setSemanticProperties_rdh | /**
* Sets the semantic properties for the user-defined function (UDF). The semantic properties
* define how fields of tuples and other objects are modified or preserved through this UDF. The
* configured properties can be retrieved via {@link UdfOperator#getSemanticProperties()}.
*
* @param properties
* The semantic properties for the UDF.
* @see UdfOperator#getSemanticProperties()
*/
@Internal
public void setSemanticProperties(DualInputSemanticProperties properties) {
this.udfSemantics = properties;
this.analyzedUdfSemantics = false;
} | 3.26 |
flink_TwoInputUdfOperator_withParameters_rdh | // --------------------------------------------------------------------------------------------
// Fluent API methods
// --------------------------------------------------------------------------------------------
@Override
public O withParameters(Configuration parameters) {
this.parameters = parameters;
@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_TwoInputUdfOperator_returns_rdh | /**
* Adds a type information hint about the return type of this operator. This method can be used
* in cases where Flink cannot determine automatically what the produced type of a function is.
* That can be the case if the function uses generic type variables in the return type that
* cannot be inferred from the input type.
*
* <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are
* preferable.
*
* @param typeInfo
* The type information for the returned data type.
* @return This operator using the given type information for the return type.
*/
public O returns(TypeInformation<OUT> typeInfo) {requireNonNull(typeInfo, "TypeInformation must not be null");
fillInType(typeInfo);
@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_TwoInputUdfOperator_withForwardedFieldsFirst_rdh | /**
* Adds semantic information about forwarded fields of the first input of the user-defined
* function. The forwarded fields information declares fields which are never modified by the
* function and which are forwarded at the same position to the output or unchanged copied to
* another position in the output.
*
* <p>Fields that are forwarded at the same position are specified by their position. The
* specified position must be valid for the input and output data type and have the same type.
* For example <code>withForwardedFieldsFirst("f2")</code> declares that the third field of a
* Java input tuple from the first input is copied to the third field of an output tuple.
*
* <p>Fields which are unchanged copied from the first input to another position in the output
* are declared by specifying the source field reference in the first input and the target field
* reference in the output. {@code withForwardedFieldsFirst("f0->f2")} denotes that the first
* field of the first input Java tuple is unchanged copied to the third field of the Java output
* tuple. When using a wildcard ("*") ensure that the number of declared fields and their types
* in first input and output type match.
*
* <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFieldsFirst("f2;
* f3->f0; f4")}) or separate Strings ({@code withForwardedFieldsFirst("f2", "f3->f0", "f4")}).
* Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or
* Flink's documentation for details on field references such as nested fields and wildcard.
*
* <p>It is not possible to override existing semantic information about forwarded fields of the
* first input which was for example added by a {@link org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsFirst} class
* annotation.
*
* <p><b>NOTE: Adding semantic information for functions is optional! If used correctly,
* semantic information can help the Flink optimizer to generate more efficient execution plans.
* However, incorrect semantic information can cause the optimizer to generate incorrect
* execution plans which compute wrong results! So be careful when adding semantic information.
* </b>
*
* @param forwardedFieldsFirst
* A list of forwarded field expressions for the first input of the
* function.
* @return This operator with annotated forwarded field information.
* @see org.apache.flink.api.java.functions.FunctionAnnotation
* @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsFirst
*/
@SuppressWarnings("unchecked")
public O withForwardedFieldsFirst(String... forwardedFieldsFirst) {
if ((this.udfSemantics == null) || this.analyzedUdfSemantics) {
// extract semantic properties from function annotations
setSemanticProperties(extractSemanticAnnotationsFromUdf(getFunction().getClass()));
}
if ((this.udfSemantics == null) || this.analyzedUdfSemantics) {
setSemanticProperties(new DualInputSemanticProperties());
SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, forwardedFieldsFirst, null, null, null, null, null, getInput1Type(), getInput2Type(), getResultType());
} else if (this.udfWithForwardedFieldsFirstAnnotation(getFunction().getClass())) {
// refuse semantic information as it would override the function annotation
throw new SemanticProperties.InvalidSemanticAnnotationException(("Forwarded field information " + "has already been added by a function annotation for the first input of this operator. ") + "Cannot overwrite function annotations.");
} else {
SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, forwardedFieldsFirst, null, null, null, null, null, getInput1Type(), getInput2Type(), getResultType());}O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_TwoInputUdfOperator_getBroadcastSets_rdh | // --------------------------------------------------------------------------------------------
// Accessors
// --------------------------------------------------------------------------------------------
@Override
@Internal
public Map<String,
DataSet<?>> getBroadcastSets() {
return this.broadcastVariables == null ? Collections.<String, DataSet<?>>emptyMap() : Collections.unmodifiableMap(this.broadcastVariables);
} | 3.26 |
flink_TwoInputUdfOperator_withForwardedFieldsSecond_rdh | /**
* Adds semantic information about forwarded fields of the second input of the user-defined
* function. The forwarded fields information declares fields which are never modified by the
* function and which are forwarded at the same position to the output or unchanged copied to
* another position in the output.
*
* <p>Fields that are forwarded at the same position are specified by their position. The
* specified position must be valid for the input and output data type and have the same type.
* For example <code>withForwardedFieldsSecond("f2")</code> declares that the third field of a
* Java input tuple from the second input is copied to the third field of an output tuple.
*
* <p>Fields which are unchanged copied from the second input to another position in the output
* are declared by specifying the source field reference in the second input and the target
* field reference in the output. {@code withForwardedFieldsSecond("f0->f2")} denotes that the
* first field of the second input Java tuple is unchanged copied to the third field of the Java
* output tuple. When using a wildcard ("*") ensure that the number of declared fields and their
* types in second input and output type match.
*
* <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFieldsSecond("f2;
* f3->f0; f4")}) or separate Strings ({@code withForwardedFieldsSecond("f2", "f3->f0", "f4")}).
* Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or
* Flink's documentation for details on field references such as nested fields and wildcard.
*
* <p>It is not possible to override existing semantic information about forwarded fields of the
* second input which was for example added by a {@link org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsSecond} class
* annotation.
*
* <p><b>NOTE: Adding semantic information for functions is optional! If used correctly,
* semantic information can help the Flink optimizer to generate more efficient execution plans.
* However, incorrect semantic information can cause the optimizer to generate incorrect
* execution plans which compute wrong results! So be careful when adding semantic information.
* </b>
*
* @param forwardedFieldsSecond
* A list of forwarded field expressions for the second input of
* the function.
* @return This operator with annotated forwarded field information.
* @see org.apache.flink.api.java.functions.FunctionAnnotation
* @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsSecond
*/
@SuppressWarnings("unchecked")
public O withForwardedFieldsSecond(String... forwardedFieldsSecond) {
if ((this.udfSemantics == null) || this.analyzedUdfSemantics) {
// extract semantic properties from function annotations
setSemanticProperties(extractSemanticAnnotationsFromUdf(getFunction().getClass()));
}if ((this.udfSemantics == null) || this.analyzedUdfSemantics) {
setSemanticProperties(new DualInputSemanticProperties());
SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, null, forwardedFieldsSecond, null, null, null, null, getInput1Type(), getInput2Type(), getResultType());
} else if (m0(getFunction().getClass())) {
// refuse semantic information as it would override the function annotation
throw new SemanticProperties.InvalidSemanticAnnotationException(("Forwarded field information " + "has already been added by a function annotation for the second input of this operator. ") + "Cannot overwrite function annotations.");
} else {
SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, null, forwardedFieldsSecond, null, null, null, null, getInput1Type(), getInput2Type(), getResultType());}
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_PojoSerializerSnapshot_getCompatibilityOfPreExistingFields_rdh | /**
* Finds which Pojo fields exists both in the new {@link PojoSerializer} as well as in the
* previous one (represented by this snapshot), and returns an {@link IntermediateCompatibilityResult} of the serializers of those preexisting fields.
*/
private static <T> IntermediateCompatibilityResult<T> getCompatibilityOfPreExistingFields(PojoSerializer<T> newPojoSerializer, LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots) {
// the present entries dictates the preexisting fields, because removed fields would be
// represented as absent keys in the optional map.
final Set<LinkedOptionalMap.KeyValue<Field, TypeSerializerSnapshot<?>>> presentFieldSnapshots = fieldSerializerSnapshots.getPresentEntries();
final ArrayList<TypeSerializerSnapshot<?>> associatedFieldSerializerSnapshots = new ArrayList<>(presentFieldSnapshots.size());
final ArrayList<TypeSerializer<?>> associatedNewFieldSerializers = new ArrayList<>(presentFieldSnapshots.size());
final Map<Field, TypeSerializer<?>> newFieldSerializersIndex
= buildNewFieldSerializersIndex(newPojoSerializer);
for (LinkedOptionalMap.KeyValue<Field, TypeSerializerSnapshot<?>> presentFieldEntry : presentFieldSnapshots) {
TypeSerializer<?> associatedNewFieldSerializer = newFieldSerializersIndex.get(presentFieldEntry.getKey());
checkState(associatedNewFieldSerializer != null, "a present field should have its associated new field serializer available.");
associatedFieldSerializerSnapshots.add(presentFieldEntry.getValue());
associatedNewFieldSerializers.add(associatedNewFieldSerializer);
}return CompositeTypeSerializerUtil.constructIntermediateCompatibilityResult(associatedNewFieldSerializers.toArray(new TypeSerializer<?>[associatedNewFieldSerializers.size()]), associatedFieldSerializerSnapshots.toArray(new TypeSerializerSnapshot<?>[associatedFieldSerializerSnapshots.size()]));
} | 3.26 |
flink_PojoSerializerSnapshot_decomposeSubclassSerializerRegistry_rdh | /**
* Transforms the subclass serializer registry structure, {@code LinkedHashMap<Class<?>,
* TypeSerializer<?>>} to 2 separate structures: a map containing with registered classes as key
* and their corresponding ids (order in the original map) as value, as well as a separate array
* of the corresponding subclass serializers.
*/
@SuppressWarnings("unchecked")
private static Tuple2<LinkedHashMap<Class<?>, Integer>, TypeSerializer<Object>[]> decomposeSubclassSerializerRegistry(LinkedHashMap<Class<?>, TypeSerializer<?>> subclassSerializerRegistry) {
final LinkedHashMap<Class<?>, Integer> subclassIds =
CollectionUtil.newLinkedHashMapWithExpectedSize(subclassSerializerRegistry.size());
final
TypeSerializer[] subclassSerializers = new TypeSerializer[subclassSerializerRegistry.size()];
subclassSerializerRegistry.forEach((registeredSubclassClass, serializer) -> {
int id =
subclassIds.size();
subclassIds.put(registeredSubclassClass, id);
subclassSerializers[id] = serializer;
});
return Tuple2.of(subclassIds, subclassSerializers);
} | 3.26 |
flink_PojoSerializerSnapshot_previousSerializerHasNonRegisteredSubclasses_rdh | /**
* Checks whether the previous serializer, represented by this snapshot, has non-registered
* subclasses.
*/
private static boolean previousSerializerHasNonRegisteredSubclasses(LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots) {
return nonRegisteredSubclassSerializerSnapshots.size() > 0;
}
/**
* Creates a reconfigured version of the {@link PojoSerializer}.
*
* @param originalNewPojoSerializer
* the original new {@link PojoSerializer} to create a
* reconfigured version of.
* @param fieldSerializerCompatibility
* compatibility of preexisting fields' serializers.
* @param registeredSerializerSnapshots
* snapshot of previous registered subclasses' serializers.
* @param preExistingRegistrationsCompatibility
* compatibility of preexisting subclasses'
* serializers.
* @param nonRegisteredSubclassSerializerSnapshots
* snapshot of previous non-registered
* subclasses' serializers.
* @return a reconfigured version of the original new {@link PojoSerializer} | 3.26 |
flink_PojoSerializerSnapshot_m2_rdh | /**
* Checks whether the new {@link PojoSerializer} has a different subclass registration order
* compared to the previous one.
*/
private static boolean m2(LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots, PojoSerializer<?> newPojoSerializer) {
Set<Class<?>> previousRegistrationOrder = registeredSubclassSerializerSnapshots.unwrapOptionals().keySet();
Set<Class<?>> newRegistrationOrder = newPojoSerializer.getRegisteredClasses().keySet();
return !isPreviousRegistrationPrefixOfNewRegistration(previousRegistrationOrder,
newRegistrationOrder);
} | 3.26 |
flink_PojoSerializerSnapshot_getCompatibilityOfPreExistingRegisteredSubclasses_rdh | /**
* Finds which registered subclasses exists both in the new {@link PojoSerializer} as well as in
* the previous one (represented by this snapshot), and returns an {@link IntermediateCompatibilityResult} of the serializers of this preexisting registered
* subclasses.
*/
private static <T> IntermediateCompatibilityResult<T> getCompatibilityOfPreExistingRegisteredSubclasses(PojoSerializer<T> newPojoSerializer, LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots) {
final LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> unwrappedSerializerSnapshots = registeredSubclassSerializerSnapshots.unwrapOptionals();
final ArrayList<TypeSerializerSnapshot<?>> associatedSubclassSerializerSnapshots = new ArrayList<>();
final ArrayList<TypeSerializer<?>> associatedNewSubclassSerializers = new ArrayList<>();
final LinkedHashMap<Class<?>, TypeSerializer<?>> v31 = newPojoSerializer.getBundledSubclassSerializerRegistry();
for (Map.Entry<Class<?>, TypeSerializerSnapshot<?>> entry : unwrappedSerializerSnapshots.entrySet()) {
TypeSerializer<?> v33 = v31.get(entry.getKey());
if (v33 != null) {associatedSubclassSerializerSnapshots.add(entry.getValue());
associatedNewSubclassSerializers.add(v33);
}
}
return CompositeTypeSerializerUtil.constructIntermediateCompatibilityResult(associatedNewSubclassSerializers.toArray(new TypeSerializer<?>[associatedNewSubclassSerializers.size()]), associatedSubclassSerializerSnapshots.toArray(new TypeSerializerSnapshot<?>[associatedSubclassSerializerSnapshots.size()]));
} | 3.26 |
flink_PojoSerializerSnapshot_newPojoSerializerIsCompatibleWithReconfiguredSerializer_rdh | /**
* Checks if the new {@link PojoSerializer} is compatible with a reconfigured instance.
*/ private static <T> boolean newPojoSerializerIsCompatibleWithReconfiguredSerializer(PojoSerializer<T> newPojoSerializer, IntermediateCompatibilityResult<T> fieldSerializerCompatibility, IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility, LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots, LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots) {
return ((m2(registeredSubclassSerializerSnapshots, newPojoSerializer) || previousSerializerHasNonRegisteredSubclasses(nonRegisteredSubclassSerializerSnapshots)) || fieldSerializerCompatibility.isCompatibleWithReconfiguredSerializer()) || preExistingRegistrationsCompatibility.isCompatibleWithReconfiguredSerializer();
} | 3.26 |
flink_PojoSerializerSnapshot_newPojoHasNewOrRemovedFields_rdh | /**
* Checks whether the new {@link PojoSerializer} has new or removed fields compared to the
* previous one.
*/
private static boolean
newPojoHasNewOrRemovedFields(LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots, PojoSerializer<?> newPojoSerializer) {
int numRemovedFields = fieldSerializerSnapshots.absentKeysOrValues().size();
int numPreexistingFields = fieldSerializerSnapshots.size() -
numRemovedFields;
boolean hasRemovedFields = numRemovedFields > 0;
boolean
hasNewFields = (newPojoSerializer.getFields().length - numPreexistingFields) > 0;
return hasRemovedFields || hasNewFields;
} | 3.26 |
flink_PojoSerializerSnapshot_m1_rdh | // ---------------------------------------------------------------------------------------------
// Utility methods
// ---------------------------------------------------------------------------------------------
/**
* Transforms a {@link LinkedHashMap} with {@link TypeSerializerSnapshot}s as the value to
* {@link TypeSerializer} as the value by restoring the snapshot.
*/
private static <K> LinkedHashMap<K, TypeSerializer<?>> m1(LinkedHashMap<K, TypeSerializerSnapshot<?>> snapshotsMap) {
final LinkedHashMap<K, TypeSerializer<?>> restoredSerializersMap = CollectionUtil.newLinkedHashMapWithExpectedSize(snapshotsMap.size());
snapshotsMap.forEach((key, snapshot) -> restoredSerializersMap.put(key, snapshot.restoreSerializer()));
return restoredSerializersMap;
} | 3.26 |
flink_PojoSerializerSnapshot_newPojoSerializerIsCompatibleAfterMigration_rdh | /**
* Checks if the new {@link PojoSerializer} is compatible after migration.
*/
private static <T> boolean newPojoSerializerIsCompatibleAfterMigration(PojoSerializer<T> newPojoSerializer, IntermediateCompatibilityResult<T> fieldSerializerCompatibility, IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility, LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots) {
return (newPojoHasNewOrRemovedFields(fieldSerializerSnapshots, newPojoSerializer) || fieldSerializerCompatibility.isCompatibleAfterMigration()) || preExistingRegistrationsCompatibility.isCompatibleAfterMigration();
} | 3.26 |
flink_PojoSerializerSnapshot_buildNewFieldSerializersIndex_rdh | /**
* Builds an index of fields to their corresponding serializers for the new {@link PojoSerializer} for faster field serializer lookups.
*/private static <T> Map<Field, TypeSerializer<?>> buildNewFieldSerializersIndex(PojoSerializer<T> newPojoSerializer) {final Field[] newFields = newPojoSerializer.getFields();
final TypeSerializer<?>[] newFieldSerializers = newPojoSerializer.getFieldSerializers();
checkState(newFields.length == newFieldSerializers.length);
int numFields = newFields.length;
final Map<Field, TypeSerializer<?>> index = CollectionUtil.newHashMapWithExpectedSize(numFields);
for (int i = 0; i < numFields; i++) {
index.put(newFields[i], newFieldSerializers[i]);
}
return index;
} | 3.26 |
flink_TemporalRowTimeJoinOperator_cleanupExpiredVersionInState_rdh | /**
* Removes all expired version in the versioned table's state according to current watermark.
*/
private void cleanupExpiredVersionInState(long currentWatermark, List<RowData> rightRowsSorted) throws Exception {
int v14 = 0;
int indexToKeep = firstIndexToKeep(currentWatermark, rightRowsSorted);
// clean old version data that behind current watermark
while (v14 < indexToKeep) {
long v16 = getRightTime(rightRowsSorted.get(v14));rightState.remove(v16);
v14 += 1;
}
} | 3.26 |
flink_TemporalRowTimeJoinOperator_cleanupState_rdh | /**
* The method to be called when a cleanup timer fires.
*
* @param time
* The timestamp of the fired timer.
*/
@Override
public void cleanupState(long time) {
leftState.clear();
rightState.clear();
nextLeftIndex.clear();
registeredTimer.clear();
} | 3.26 |
flink_TemporalRowTimeJoinOperator_latestRightRowToJoin_rdh | /**
* Binary search {@code rightRowsSorted} to find the latest right row to join with {@code leftTime}. Latest means a right row with largest time that is still smaller or equal to
* {@code leftTime}. For example with: rightState = [1(+I), 4(+U), 7(+U), 9(-D), 12(I)],
*
* <p>If left time is 6, the valid period should be [4, 7), data 4(+U) should be joined.
*
* <p>If left time is 10, the valid period should be [9, 12), but data 9(-D) is a DELETE message
* which means the correspond version has no data in period [9, 12), data 9(-D) should not be
* correlated.
*
* @return found element or {@code Optional.empty} If such row was not found (either {@code rightRowsSorted} is empty or all {@code rightRowsSorted} are are newer).
*/
private Optional<RowData> latestRightRowToJoin(List<RowData> rightRowsSorted, long leftTime) {
return latestRightRowToJoin(rightRowsSorted, 0, rightRowsSorted.size() - 1, leftTime);
} | 3.26 |
flink_Rowtime_timestampsFromExtractor_rdh | /**
* Sets a custom timestamp extractor to be used for the rowtime attribute.
*
* @param extractor
* The {@link TimestampExtractor} to extract the rowtime attribute from the
* physical type.
*/
public Rowtime timestampsFromExtractor(TimestampExtractor extractor) {
internalProperties.putProperties(extractor.toProperties());
return this;
} | 3.26 |
flink_Rowtime_m0_rdh | /**
* Sets a custom watermark strategy to be used for the rowtime attribute.
*/
public Rowtime m0(WatermarkStrategy strategy) {
internalProperties.putProperties(strategy.toProperties());
return this;
} | 3.26 |
flink_Rowtime_timestampsFromSource_rdh | /**
* Sets a built-in timestamp extractor that converts the assigned timestamps from a DataStream
* API record into the rowtime attribute and thus preserves the assigned timestamps from the
* source.
*
* <p>Note: This extractor only works in streaming environments.
*/
public Rowtime timestampsFromSource() {
internalProperties.putString(ROWTIME_TIMESTAMPS_TYPE, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_SOURCE);
return this;
} | 3.26 |
flink_Rowtime_timestampsFromField_rdh | /**
* Sets a built-in timestamp extractor that converts an existing {@link Long} or {@link Types#SQL_TIMESTAMP} field into the rowtime attribute.
*
* @param fieldName
* The field to convert into a rowtime attribute.
*/
public Rowtime timestampsFromField(String fieldName) {
internalProperties.putString(ROWTIME_TIMESTAMPS_TYPE, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD);
internalProperties.putString(ROWTIME_TIMESTAMPS_FROM, fieldName);
return this;
} | 3.26 |
flink_Rowtime_watermarksPeriodicBounded_rdh | /**
* Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a bounded
* time interval.
*
* <p>Emits watermarks which are the maximum observed timestamp minus the specified delay.
*
* @param delay
* delay in milliseconds
*/
public Rowtime watermarksPeriodicBounded(long delay) {
internalProperties.putString(ROWTIME_WATERMARKS_TYPE, ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_BOUNDED);
internalProperties.putLong(ROWTIME_WATERMARKS_DELAY, delay);
return this;
} | 3.26 |
flink_Rowtime_watermarksPeriodicAscending_rdh | /**
* Sets a built-in watermark strategy for ascending rowtime attributes.
*
* <p>Emits a watermark of the maximum observed timestamp so far minus 1. Rows that have a
* timestamp equal to the max timestamp are not late.
*/
public Rowtime watermarksPeriodicAscending() {
internalProperties.putString(ROWTIME_WATERMARKS_TYPE, ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_ASCENDING);
return this;
} | 3.26 |
flink_Rowtime_toProperties_rdh | /**
* Converts this descriptor into a set of properties.
*/
@Override
public Map<String, String> toProperties() {
final DescriptorProperties properties = new DescriptorProperties();
properties.putProperties(internalProperties);
return properties.asMap();} | 3.26 |
flink_Rowtime_watermarksFromSource_rdh | /**
* Sets a built-in watermark strategy which indicates the watermarks should be preserved from
* the underlying DataStream API and thus preserves the assigned watermarks from the source.
*/ public Rowtime watermarksFromSource() { internalProperties.putString(ROWTIME_WATERMARKS_TYPE, ROWTIME_WATERMARKS_TYPE_VALUE_FROM_SOURCE);
return this;
} | 3.26 |
flink_FileExecutionGraphInfoStore_calculateSize_rdh | // --------------------------------------------------------------
// Internal methods
// --------------------------------------------------------------
private int calculateSize(JobID jobId, ExecutionGraphInfo serializableExecutionGraphInfo) {final File executionGraphInfoFile = getExecutionGraphFile(jobId);
if (executionGraphInfoFile.exists()) {
return Math.toIntExact(executionGraphInfoFile.length());
} else {
LOG.debug("Could not find execution graph information file for {}. Estimating the size instead.", jobId);
final ArchivedExecutionGraph serializableExecutionGraph = serializableExecutionGraphInfo.getArchivedExecutionGraph();
return (serializableExecutionGraph.getAllVertices().size() * 1000) + (serializableExecutionGraph.getAccumulatorsSerialized().size() * 1000);
}
} | 3.26 |
flink_FileExecutionGraphInfoStore_getStorageDir_rdh | // Testing methods
// --------------------------------------------------------------
@VisibleForTesting
File getStorageDir() {
return storageDir;
} | 3.26 |
flink_ChangelogMode_insertOnly_rdh | /**
* Shortcut for a simple {@link RowKind#INSERT}-only changelog.
*/
public static ChangelogMode insertOnly() {return INSERT_ONLY;
} | 3.26 |
flink_ChangelogMode_newBuilder_rdh | /**
* Builder for configuring and creating instances of {@link ChangelogMode}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
flink_ChangelogMode_all_rdh | /**
* Shortcut for a changelog that can contain all {@link RowKind}s.
*/
public static ChangelogMode all() {
return ALL;} | 3.26 |
flink_AccumulatorSnapshot_deserializeUserAccumulators_rdh | /**
* Gets the user-defined accumulators values.
*
* @return the serialized map
*/
public Map<String, Accumulator<?, ?>> deserializeUserAccumulators(ClassLoader classLoader) throws IOException, ClassNotFoundException {
return userAccumulators.deserializeValue(classLoader);
} | 3.26 |
flink_KeyedBroadcastProcessFunction_onTimer_rdh | /**
* Called when a timer set using {@link TimerService} fires.
*
* @param timestamp
* The timestamp of the firing timer.
* @param ctx
* An {@link OnTimerContext} that allows querying the timestamp of the firing timer,
* querying the current processing/event time, iterating the broadcast state with
* <b>read-only</b> access, querying the {@link TimeDomain} of the firing timer and getting
* a {@link TimerService} for registering timers and querying the time. The context is only
* valid during the invocation of this method, do not store it.
* @param out
* The collector for returning result values.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
public void onTimer(final long timestamp, final OnTimerContext ctx, final Collector<OUT> out) throws Exception {
// the default implementation does nothing.
} | 3.26 |
flink_ResolvedCatalogTable_getOptions_rdh | // --------------------------------------------------------------------------------------------
// Delegations to original CatalogTable
// --------------------------------------------------------------------------------------------
@Override
public Map<String, String> getOptions() {
return origin.getOptions();
} | 3.26 |
flink_PipelinedApproximateSubpartition_createReadView_rdh | /**
* To simply the view releasing threading model, {@link PipelinedApproximateSubpartition#releaseView()} is called only before creating a new view.
*
* <p>There is still one corner case when a downstream task fails continuously in a short period
* of time then multiple netty worker threads can createReadView at the same time. TODO: This
* problem will be solved in FLINK-19774
*/
@Override
public PipelinedSubpartitionView createReadView(BufferAvailabilityListener availabilityListener) {
synchronized(buffers) {checkState(!isReleased);
releaseView();
LOG.debug("{}: Creating read view for subpartition {} of partition {}.", parent.getOwningTaskName(), getSubPartitionIndex(), parent.getPartitionId());
readView = new PipelinedApproximateSubpartitionView(this, availabilityListener);
}
return readView;
} | 3.26 |
flink_PipelinedApproximateSubpartition_setIsPartialBufferCleanupRequired_rdh | /**
* for testing only.
*/
@VisibleForTesting
void setIsPartialBufferCleanupRequired() {
isPartialBufferCleanupRequired = true;
} | 3.26 |
flink_ManagedInitializationContext_isRestored_rdh | /**
* Returns true, if state was restored from the snapshot of a previous execution.
*/
default boolean isRestored() {
return getRestoredCheckpointId().isPresent();
} | 3.26 |
flink_HsFileDataManager_run_rdh | // Note, this method is synchronized on `this`, not `lock`. The purpose here is to prevent
// concurrent `run()` executions. Concurrent calls to other methods are allowed.
@Override
public synchronized void run() {
int numBuffersRead = tryRead();endCurrentRoundOfReading(numBuffersRead);} | 3.26 |
flink_HsFileDataManager_release_rdh | /**
* Releases this file data manager and delete shuffle data after all readers is removed.
*/
public void release() {
synchronized(lock) {
if (isReleased) {
return;}
isReleased = true;
List<HsSubpartitionFileReader> pendingReaders = new ArrayList<>(allReaders);
mayNotifyReleased();
failSubpartitionReaders(pendingReaders, new IllegalStateException("Result partition has been already released."));
// close data index and delete shuffle file only when no reader is reading now.
releaseFuture.thenRun(this::closeDataIndexAndDeleteShuffleFile);
}
} | 3.26 |
flink_HsFileDataManager_tryRead_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
/**
*
* @return number of buffers read.
*/
private int
tryRead() {
Queue<HsSubpartitionFileReader> availableReaders = prepareAndGetAvailableReaders();
if (availableReaders.isEmpty()) {
return 0;
}
Queue<MemorySegment>
buffers;
try {
buffers = allocateBuffers();
} catch (Exception exception) {
// fail all pending subpartition readers immediately if any exception occurs
failSubpartitionReaders(availableReaders, exception);
LOG.error("Failed to request buffers for data reading.", exception);
return 0;
}
int numBuffersAllocated = buffers.size();
if (numBuffersAllocated <= 0) {
return 0;
}
readData(availableReaders, buffers);
int numBuffersRead = numBuffersAllocated - buffers.size();
releaseBuffers(buffers);
return numBuffersRead;
} | 3.26 |
flink_HsFileDataManager_recycle_rdh | // ------------------------------------------------------------------------
// Implementation Methods of BufferRecycler
// ------------------------------------------------------------------------
@Override
public void recycle(MemorySegment segment) {
synchronized(lock) {
bufferPool.recycle(segment);
--numRequestedBuffers;
mayTriggerReading();
}
} | 3.26 |
flink_HsFileDataManager_setup_rdh | /**
* Setup read buffer pool.
*/
public void setup() {
bufferPool.initialize();
} | 3.26 |
flink_HsFileDataManager_releaseSubpartitionReader_rdh | /**
* Release specific {@link HsSubpartitionFileReader} from {@link HsFileDataManager}.
*
* @param subpartitionFileReader
* to release.
*/
public void releaseSubpartitionReader(HsSubpartitionFileReader subpartitionFileReader) {
synchronized(lock) {
removeSubpartitionReaders(Collections.singleton(subpartitionFileReader));
}
} | 3.26 |
flink_HsFileDataManager_m0_rdh | /**
* This method only called by result partition to create subpartitionFileReader.
*/
public HsDataView m0(int subpartitionId, HsConsumerId consumerId, HsSubpartitionConsumerInternalOperations operation) throws IOException {
synchronized(lock) {
checkState(!isReleased, "HsFileDataManager is already released.");
lazyInitialize();
HsSubpartitionFileReader subpartitionReader = fileReaderFactory.createFileReader(subpartitionId, consumerId, dataFileChannel, operation, dataIndex, hybridShuffleConfiguration.getMaxBuffersReadAhead(), this::releaseSubpartitionReader,
headerBuf);
allReaders.add(subpartitionReader);
mayTriggerReading();
return subpartitionReader;
}
} | 3.26 |
flink_ReduceOperatorBase_setCustomPartitioner_rdh | // --------------------------------------------------------------------------------------------
public void setCustomPartitioner(Partitioner<?> customPartitioner) {
if (customPartitioner !=
null) {
int[] keys = getKeyColumns(0);
if ((keys == null) || (keys.length == 0)) {
throw new IllegalArgumentException("Cannot use custom partitioner for a non-grouped GroupReduce (AllGroupReduce)");
}
if (keys.length > 1) {
throw new IllegalArgumentException("Cannot use the key partitioner for composite keys (more than one key field)");
}
}
this.customPartitioner = customPartitioner;
} | 3.26 |
flink_ReduceOperatorBase_executeOnCollections_rdh | // --------------------------------------------------------------------------------------------
@Override
protected List<T> executeOnCollections(List<T> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
// make sure we can handle empty inputs
if (inputData.isEmpty()) {
return Collections.emptyList();
}
ReduceFunction<T> function = this.userFunction.getUserCodeObject();
UnaryOperatorInformation<T, T> operatorInfo = getOperatorInfo();
TypeInformation<T> inputType = operatorInfo.getInputType();
int[] inputColumns = getKeyColumns(0);
if ((!(inputType instanceof CompositeType)) && (inputColumns.length > 1)) {
throw new InvalidProgramException("Grouping is only possible on composite types.");
}
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
TypeSerializer<T> serializer = getOperatorInfo().getInputType().createSerializer(executionConfig);if (inputColumns.length > 0) {
boolean[] inputOrderings = new boolean[inputColumns.length];
TypeComparator<T> inputComparator = (inputType instanceof AtomicType) ? ((AtomicType<T>) (inputType)).createComparator(false, executionConfig) : ((CompositeType<T>) (inputType)).createComparator(inputColumns, inputOrderings, 0, executionConfig);
Map<TypeComparable<T>, T> aggregateMap = new HashMap<TypeComparable<T>, T>(inputData.size() / 10);
for (T next : inputData) {
TypeComparable<T> wrapper = new TypeComparable<T>(next, inputComparator);
T existing = aggregateMap.get(wrapper);
T v12;
if (existing != null) {
v12 = function.reduce(existing, serializer.copy(next));
} else {
v12 = next;
}
v12 = serializer.copy(v12);aggregateMap.put(wrapper, v12);
}
FunctionUtils.closeFunction(function);
return new ArrayList<T>(aggregateMap.values());
} else {
T aggregate = inputData.get(0);
aggregate = serializer.copy(aggregate);
for (int i = 1; i < inputData.size(); i++) {
T next = function.reduce(aggregate, serializer.copy(inputData.get(i)));
aggregate = serializer.copy(next);
}
FunctionUtils.setFunctionRuntimeContext(function, ctx);
return Collections.singletonList(aggregate);
}
} | 3.26 |
flink_BoundedBlockingSubpartition_createWithMemoryMappedFile_rdh | /**
* Creates a BoundedBlockingSubpartition that stores the partition data in memory mapped file.
* Data is written to and read from the mapped memory region. Disk spilling happens lazily, when
* the OS swaps out the pages from the memory mapped file.
*/
public static BoundedBlockingSubpartition createWithMemoryMappedFile(int index, ResultPartition
parent, File tempFile) throws IOException {
final MemoryMappedBoundedData bd = MemoryMappedBoundedData.create(tempFile.toPath());
return new BoundedBlockingSubpartition(index, parent, bd, false);
} | 3.26 |
flink_BoundedBlockingSubpartition_unsynchronizedGetNumberOfQueuedBuffers_rdh | // ---------------------------- statistics --------------------------------
@Override
public int unsynchronizedGetNumberOfQueuedBuffers() {
return 0;
} | 3.26 |
flink_BoundedBlockingSubpartition_isFinished_rdh | // ------------------------------------------------------------------------
/**
* Checks if writing is finished. Readers cannot be created until writing is finished, and no
* further writes can happen after that.
*/
public boolean isFinished() {
return isFinished;
} | 3.26 |
flink_BoundedBlockingSubpartition_createWithFileChannel_rdh | // ---------------------------- factories --------------------------------
/**
* Creates a BoundedBlockingSubpartition that simply stores the partition data in a file. Data
* is eagerly spilled (written to disk) and readers directly read from the file.
*/
public static BoundedBlockingSubpartition createWithFileChannel(int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException {
final FileChannelBoundedData bd = FileChannelBoundedData.create(tempFile.toPath(), readBufferSize);
return new BoundedBlockingSubpartition(index, parent, bd, !sslEnabled);
} | 3.26 |
flink_BoundedBlockingSubpartition_createWithFileAndMemoryMappedReader_rdh | /**
* Creates a BoundedBlockingSubpartition that stores the partition data in a file and memory
* maps that file for reading. Data is eagerly spilled (written to disk) and then mapped into
* memory. The main difference to the {@link #createWithMemoryMappedFile(int, ResultPartition,
* File)} variant is that no I/O is necessary when pages from the memory mapped file are
* evicted.
*/
public static BoundedBlockingSubpartition createWithFileAndMemoryMappedReader(int index, ResultPartition parent, File tempFile) throws IOException {
final FileChannelMemoryMappedBoundedData bd = FileChannelMemoryMappedBoundedData.create(tempFile.toPath());
return new BoundedBlockingSubpartition(index, parent, bd, false);
} | 3.26 |
flink_ExecEdge_translateToFusionCodegenSpec_rdh | /**
* Translates this edge into operator fusion codegen spec generator.
*
* @param planner
* The {@link Planner} of the translated Table.
*/
public OpFusionCodegenSpecGenerator translateToFusionCodegenSpec(Planner planner) {
return source.translateToFusionCodegenSpec(planner);
} | 3.26 |
flink_ExecEdge_getOutputType_rdh | /**
* Returns the output {@link LogicalType} of the data passing this edge.
*/
public LogicalType getOutputType() {
return source.getOutputType();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.