name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_PythonOperatorChainingOptimizer_buildOutputMap_rdh
|
/**
* Construct the key-value pairs where the value is the output transformations of the key
* transformation.
*/
private static Map<Transformation<?>, Set<Transformation<?>>> buildOutputMap(List<Transformation<?>> transformations) {
final Map<Transformation<?>, Set<Transformation<?>>> outputMap = new HashMap<>();
final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations);
final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet();
while (!toTransformQueue.isEmpty()) {
Transformation<?> transformation = toTransformQueue.poll();
if (!alreadyTransformed.contains(transformation)) {
alreadyTransformed.add(transformation);
for (Transformation<?> v21 : transformation.getInputs()) {
Set<Transformation<?>> outputs = outputMap.computeIfAbsent(v21, i -> Sets.newHashSet());
outputs.add(transformation);
}
toTransformQueue.addAll(transformation.getInputs());
}
}
return outputMap;
}
| 3.26 |
flink_PythonOperatorChainingOptimizer_replaceInput_rdh
|
// ----------------------- Utility Methods -----------------------
private static void replaceInput(Transformation<?> transformation, Transformation<?> oldInput, Transformation<?> newInput) {
try {
if (((((((transformation instanceof OneInputTransformation) || (transformation instanceof FeedbackTransformation)) || (transformation instanceof SideOutputTransformation)) || (transformation instanceof ReduceTransformation)) || (transformation instanceof LegacySinkTransformation)) ||
(transformation instanceof TimestampsAndWatermarksTransformation)) || (transformation instanceof PartitionTransformation)) {
final Field v44 = transformation.getClass().getDeclaredField("input");
v44.setAccessible(true);
v44.set(transformation, newInput);
} else if (transformation instanceof SinkTransformation) {
final Field inputField = transformation.getClass().getDeclaredField("input");
inputField.setAccessible(true);
inputField.set(transformation, newInput);
final Field transformationField = DataStream.class.getDeclaredField("transformation");
transformationField.setAccessible(true);
transformationField.set(((SinkTransformation<?, ?>) (transformation)).getInputStream(),
newInput);
} else if (transformation instanceof TwoInputTransformation) {
final Field inputField;
if (((TwoInputTransformation<?, ?, ?>) (transformation)).getInput1() == oldInput) {
inputField = transformation.getClass().getDeclaredField("input1");
} else {
inputField = transformation.getClass().getDeclaredField("input2");
}
inputField.setAccessible(true);
inputField.set(transformation, newInput);
} else if ((transformation instanceof UnionTransformation) || (transformation instanceof AbstractMultipleInputTransformation)) {
final Field inputsField = transformation.getClass().getDeclaredField("inputs");
inputsField.setAccessible(true);
List<Transformation<?>> newInputs = Lists.newArrayList();newInputs.addAll(transformation.getInputs());
newInputs.remove(oldInput); newInputs.add(newInput);
inputsField.set(transformation, newInputs);
} else if (transformation instanceof AbstractBroadcastStateTransformation)
{
final Field inputField;
if (((AbstractBroadcastStateTransformation<?, ?, ?>) (transformation)).getRegularInput() == oldInput) {
inputField = transformation.getClass().getDeclaredField("regularInput");
} else {
inputField = transformation.getClass().getDeclaredField("broadcastInput");
}
inputField.setAccessible(true);
inputField.set(transformation, newInput);
} else {
throw new RuntimeException("Unsupported transformation: "
+ transformation);
}
} catch (NoSuchFieldException | IllegalAccessException e) {
// This should never happen
throw new RuntimeException(e);
}
}
| 3.26 |
flink_PythonOperatorChainingOptimizer_optimize_rdh
|
/**
* Perform chaining optimization. It will returns the chained transformations and the
* transformation after chaining optimization for the given transformation.
*/
public static Tuple2<List<Transformation<?>>, Transformation<?>> optimize(List<Transformation<?>> transformations, Transformation<?> targetTransformation) {
final Map<Transformation<?>, Set<Transformation<?>>> outputMap =
buildOutputMap(transformations);
final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>();
final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet();
final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque();
toTransformQueue.add(targetTransformation);
while (!toTransformQueue.isEmpty()) {
final Transformation<?> toTransform = toTransformQueue.poll(); if (!alreadyTransformed.contains(toTransform)) {
alreadyTransformed.add(toTransform);
final ChainInfo chainInfo = chainWithInputIfPossible(toTransform, outputMap);
chainedTransformations.add(chainInfo.newTransformation);chainedTransformations.removeAll(chainInfo.oldTransformations);
alreadyTransformed.addAll(chainInfo.oldTransformations);
// Add the chained transformation and its inputs to the to-optimize list
toTransformQueue.add(chainInfo.newTransformation);
toTransformQueue.addAll(chainInfo.newTransformation.getInputs());
if (toTransform == targetTransformation) {
targetTransformation = chainInfo.newTransformation;
}
}
} return Tuple2.of(new ArrayList<>(chainedTransformations), targetTransformation);
}
| 3.26 |
flink_HybridShuffleConfiguration_getFullStrategyReleaseBufferRatio_rdh
|
/**
* The proportion of buffers to be released. Used by {@link HsFullSpillingStrategy}.
*/
public float getFullStrategyReleaseBufferRatio() {
return fullStrategyReleaseBufferRatio;
}
| 3.26 |
flink_HybridShuffleConfiguration_getSpillingStrategyType_rdh
|
/**
* Get {@link SpillingStrategyType} for hybrid shuffle mode.
*/
public SpillingStrategyType getSpillingStrategyType() {
return spillingStrategyType;
}
| 3.26 |
flink_HybridShuffleConfiguration_getSelectiveStrategySpillBufferRatio_rdh
|
/**
* The proportion of buffers to be spilled. Used by {@link HsSelectiveSpillingStrategy}.
*/
public float getSelectiveStrategySpillBufferRatio() {
return selectiveStrategySpillBufferRatio;
}
| 3.26 |
flink_HybridShuffleConfiguration_getMaxBuffersReadAhead_rdh
|
/**
* Determine how many buffers to read ahead at most for each subpartition to prevent other
* consumers from starving.
*/
public int getMaxBuffersReadAhead() {
return maxBuffersReadAhead;
}
| 3.26 |
flink_HybridShuffleConfiguration_getRegionGroupSizeInBytes_rdh
|
/**
* Segment size of hybrid spilled file data index.
*/
public int getRegionGroupSizeInBytes() {
return regionGroupSizeInBytes;
}
| 3.26 |
flink_HybridShuffleConfiguration_getBufferRequestTimeout_rdh
|
/**
* Maximum time to wait when requesting read buffers from the buffer pool before throwing an
* exception.
*/
public Duration getBufferRequestTimeout() {
return bufferRequestTimeout;}
| 3.26 |
flink_HybridShuffleConfiguration_getFullStrategyReleaseThreshold_rdh
|
/**
* When the number of buffers that have been requested exceeds this threshold, trigger the
* release operation. Used by {@link HsFullSpillingStrategy}.
*/
public float getFullStrategyReleaseThreshold() {
return fullStrategyReleaseThreshold;
}
| 3.26 |
flink_HybridShuffleConfiguration_getBufferPoolSizeCheckIntervalMs_rdh
|
/**
* Check interval of buffer pool's size.
*/
public long getBufferPoolSizeCheckIntervalMs() {
return bufferPoolSizeCheckIntervalMs;
}
| 3.26 |
flink_HybridShuffleConfiguration_getNumRetainedInMemoryRegionsMax_rdh
|
/**
* Max number of hybrid retained regions in memory.
*/
public long getNumRetainedInMemoryRegionsMax() {
return numRetainedInMemoryRegionsMax;
}
| 3.26 |
flink_FileSource_forRecordStreamFormat_rdh
|
// ------------------------------------------------------------------------
// Entry-point Factory Methods
// ------------------------------------------------------------------------
/**
* Builds a new {@code FileSource} using a {@link StreamFormat} to read record-by-record from a
* file stream.
*
* <p>When possible, stream-based formats are generally easier (preferable) to file-based
* formats, because they support better default behavior around I/O batching or progress
* tracking (checkpoints).
*
* <p>Stream formats also automatically de-compress files based on the file extension. This
* supports files ending in ".deflate" (Deflate), ".xz" (XZ), ".bz2" (BZip2), ".gz", ".gzip"
* (GZip).
*/public static <T> FileSourceBuilder<T> forRecordStreamFormat(final StreamFormat<T> streamFormat, final Path... paths) {return forBulkFileFormat(new StreamFormatAdapter<>(streamFormat), paths);
}
| 3.26 |
flink_FileSource_forBulkFileFormat_rdh
|
/**
* Builds a new {@code FileSource} using a {@link BulkFormat} to read batches of records from
* files.
*
* <p>Examples for bulk readers are compressed and vectorized formats such as ORC or Parquet.
*/
public static <T> FileSourceBuilder<T> forBulkFileFormat(final BulkFormat<T, FileSourceSplit> bulkFormat, final Path... paths) {
checkNotNull(bulkFormat, "reader");
checkNotNull(paths, "paths");
checkArgument(paths.length > 0, "paths must not be empty");
return new FileSourceBuilder<>(paths, bulkFormat);
}
| 3.26 |
flink_FileSource_forRecordFileFormat_rdh
|
/**
* Builds a new {@code FileSource} using a {@link FileRecordFormat} to read record-by-record
* from a a file path.
*
* <p>A {@code FileRecordFormat} is more general than the {@link StreamFormat}, but also
* requires often more careful parametrization.
*
* @deprecated Please use {@link #forRecordStreamFormat(StreamFormat, Path...)} instead.
*/
@Deprecated
public static <T> FileSourceBuilder<T> forRecordFileFormat(final FileRecordFormat<T> recordFormat, final Path... paths) {
return forBulkFileFormat(new FileRecordFormatAdapter<>(recordFormat), paths);
}
| 3.26 |
flink_SqlJsonValueFunctionWrapper_explicitTypeSpec_rdh
|
/**
* Copied and modified from the original {@link SqlJsonValueFunction}.
*
* <p>Changes: Instead of returning {@link Optional} this method returns null directly.
*/
private static RelDataType explicitTypeSpec(SqlOperatorBinding opBinding) {
if (((opBinding.getOperandCount() > 2) && opBinding.isOperandLiteral(2, false)) && (opBinding.getOperandLiteralValue(2, Object.class) instanceof SqlJsonValueReturning)) {
return opBinding.getOperandType(3);
}
return null;
}
| 3.26 |
flink_StateWithExecutionGraph_updateTaskExecutionState_rdh
|
/**
* Updates the execution graph with the given task execution state transition.
*
* @param taskExecutionStateTransition
* taskExecutionStateTransition to update the ExecutionGraph
* with
* @param failureLabels
* the failure labels to attach to the task failure cause
* @return {@code true} if the update was successful; otherwise {@code false}
*/
boolean updateTaskExecutionState(TaskExecutionStateTransition taskExecutionStateTransition, CompletableFuture<Map<String, String>> failureLabels) {
// collect before updateState, as updateState may deregister the execution
final Optional<AccessExecution> maybeExecution = executionGraph.findExecution(taskExecutionStateTransition.getID());
final Optional<String> maybeTaskName = executionGraph.findVertexWithAttempt(taskExecutionStateTransition.getID());
final ExecutionState desiredState = taskExecutionStateTransition.getExecutionState();
boolean successfulUpdate = getExecutionGraph().updateState(taskExecutionStateTransition);
if (successfulUpdate && (desiredState == ExecutionState.FAILED)) {final AccessExecution execution = maybeExecution.orElseThrow(NoSuchElementException::new);
final String taskName = maybeTaskName.orElseThrow(NoSuchElementException::new);
final ExecutionState currentState = execution.getState();
if (currentState == desiredState) {
failureCollection.add(ExceptionHistoryEntry.create(execution, taskName, failureLabels));onFailure(ErrorInfo.handleMissingThrowable(taskExecutionStateTransition.getError(f0))); }
}
return successfulUpdate;}
| 3.26 |
flink_AllGroupReduceDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<GroupReduceFunction<IT, OT>, OT> context) {this.taskContext = context;
}
| 3.26 |
flink_AllGroupReduceDriver_prepare_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void prepare() throws Exception {
final TaskConfig config = this.taskContext.getTaskConfig();this.strategy = config.getDriverStrategy();
switch (this.strategy) {
case ALL_GROUP_REDUCE_COMBINE :
if (!(this.taskContext.getStub() instanceof GroupCombineFunction)) {
throw new Exception("Using combiner on a UDF that does not implement the combiner interface " + GroupCombineFunction.class.getName());
}
case ALL_GROUP_REDUCE :
case ALL_GROUP_COMBINE :
break;
default :
throw new Exception("Unrecognized driver strategy for AllGroupReduce driver: " + this.strategy.name());
}
this.serializer = this.taskContext.<IT>getInputSerializer(0).getSerializer();
this.input = this.taskContext.getInput(0);
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
this.objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (LOG.isDebugEnabled()) {
LOG.debug(("AllGroupReduceDriver object reuse: " + (this.objectReuseEnabled ? "ENABLED" : "DISABLED")) + ".");
}
}
| 3.26 |
flink_S3Recoverable_m0_rdh
|
// ------------------------------------------------------------------------
public String m0() {
return
uploadId;
}
| 3.26 |
flink_LeaderRetrievalUtils_retrieveLeaderInformation_rdh
|
/**
* Retrieves the leader pekko url and the current leader session ID. The values are stored in a
* {@link LeaderInformation} instance.
*
* @param leaderRetrievalService
* Leader retrieval service to retrieve the leader connection
* information
* @param timeout
* Timeout when to give up looking for the leader
* @return LeaderInformation containing the leader's rpc URL and the current leader session ID
* @throws LeaderRetrievalException
*/
public static LeaderInformation retrieveLeaderInformation(LeaderRetrievalService leaderRetrievalService, Duration timeout) throws LeaderRetrievalException {
LeaderInformationListener listener = new LeaderInformationListener();
try {
leaderRetrievalService.start(listener);
return listener.getLeaderInformationFuture().get(timeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new LeaderRetrievalException("Could not retrieve the leader address and leader " + "session ID.", e);
} finally {
try {
leaderRetrievalService.stop();
} catch (Exception fe) {
LOG.warn("Could not stop the leader retrieval service.", fe);
}
}
}
| 3.26 |
flink_RecordAndPosition_getRecord_rdh
|
// ------------------------------------------------------------------------
public E getRecord() {
return record;
}
| 3.26 |
flink_RecordAndPosition_toString_rdh
|
// ------------------------------------------------------------------------
@Override
public String toString() {
return String.format("%s @ %d + %d", record, offset, recordSkipCount);
}
| 3.26 |
flink_HadoopOutputFormatBase_writeObject_rdh
|
// --------------------------------------------------------------------------------------------
// Custom serialization methods
// --------------------------------------------------------------------------------------------
private void writeObject(ObjectOutputStream out) throws IOException {
super.write(out);
out.writeUTF(mapredOutputFormat.getClass().getName());
jobConf.write(out);
}
| 3.26 |
flink_HadoopOutputFormatBase_open_rdh
|
/**
* create the temporary output file for hadoop RecordWriter.
*
* @param taskNumber
* The number of the parallel instance.
* @param numTasks
* The number of parallel tasks.
* @throws java.io.IOException
*/
@Overridepublic void open(int taskNumber, int numTasks) throws IOException {
// enforce sequential open() calls
synchronized(OPEN_MUTEX) {
if (Integer.toString(taskNumber + 1).length() > 6) {
throw new IOException("Task id too large.");
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName((("attempt__0000_r_" + String.format(("%" + (6 - Integer.toString(taskNumber + 1).length())) + "s", " ").replace(" ", "0")) + Integer.toString(taskNumber + 1)) + "_0");
this.jobConf.set("mapred.task.id", taskAttemptID.toString());
this.jobConf.setInt("mapred.task.partition", taskNumber + 1);
// for hadoop 2.2
this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString());
this.jobConf.setInt("mapreduce.task.partition", taskNumber + 1);
this.context = new TaskAttemptContextImpl(this.jobConf, taskAttemptID);
this.outputCommitter = this.jobConf.getOutputCommitter();
JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());
this.outputCommitter.setupJob(jobContext);
this.recordWriter = this.mapredOutputFormat.getRecordWriter(null, this.jobConf, Integer.toString(taskNumber + 1), new
HadoopDummyProgressable());
}
}
| 3.26 |
flink_HadoopOutputFormatBase_configure_rdh
|
// --------------------------------------------------------------------------------------------
// OutputFormat
// --------------------------------------------------------------------------------------------
@Override
public void configure(Configuration parameters) {
// enforce sequential configure() calls
synchronized(f0) {
// configure MR OutputFormat if necessary
if (this.mapredOutputFormat instanceof Configurable) {
((Configurable) (this.mapredOutputFormat)).setConf(this.jobConf);
} else if (this.mapredOutputFormat instanceof JobConfigurable) {
((JobConfigurable) (this.mapredOutputFormat)).configure(this.jobConf);
}
}
}
| 3.26 |
flink_HadoopOutputFormatBase_close_rdh
|
/**
* commit the task by moving the output file out from the temporary directory.
*
* @throws java.io.IOException
*/
@Override
public void close() throws IOException {
// enforce sequential close() calls
synchronized(CLOSE_MUTEX) {
this.recordWriter.close(new HadoopDummyReporter());
if
(this.outputCommitter.needsTaskCommit(this.context)) {
this.outputCommitter.commitTask(this.context);
}
}
}
| 3.26 |
flink_SqlJsonUtils_createArrayNode_rdh
|
/**
* Returns a new {@link ArrayNode}.
*/
public static ArrayNode createArrayNode() {
return MAPPER.createArrayNode();}
| 3.26 |
flink_SqlJsonUtils_getNodeFactory_rdh
|
/**
* Returns the {@link JsonNodeFactory} for creating nodes.
*/
public static JsonNodeFactory getNodeFactory() {
return MAPPER.getNodeFactory();
}
/**
* Returns a new {@link ObjectNode}
| 3.26 |
flink_SqlJsonUtils_serializeJson_rdh
|
/**
* Serializes the given {@link JsonNode} to a JSON string.
*/
public static String serializeJson(JsonNode node) {
try {
// For JSON functions to have deterministic output, we need to sort the keys. However,
// Jackson's built-in features don't work on the tree representation, so we need to
// convert the tree first.
final Object convertedNode = MAPPER.treeToValue(node, Object.class);
return MAPPER.writeValueAsString(convertedNode);
} catch (JsonProcessingException e) {
throw new TableException("JSON object could not be serialized: " + node.asText(), e);
}
}
| 3.26 |
flink_StateMap_releaseSnapshot_rdh
|
/**
* Releases a snapshot for this {@link StateMap}. This method should be called once a snapshot
* is no more needed.
*
* @param snapshotToRelease
* the snapshot to release, which was previously created by this state
* map.
*/
public void releaseSnapshot(StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> snapshotToRelease) {
}
| 3.26 |
flink_SerializableHadoopConfigWrapper_writeObject_rdh
|
// ------------------------------------------------------------------------
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
// we write the Hadoop config through a separate serializer to avoid cryptic exceptions when
// it
// corrupts the serialization stream
final DataOutputSerializer ser = new DataOutputSerializer(256);
hadoopConfig.write(ser);
out.writeInt(ser.length());
out.write(ser.getSharedBuffer(), 0, ser.length());
}
| 3.26 |
flink_IterationTailTask_initialize_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected void initialize() throws Exception {
super.initialize();
// sanity check: the tail has to update either the workset or the solution set
if ((!isWorksetUpdate) && (!isSolutionSetUpdate)) {
throw new RuntimeException("The iteration tail doesn't update workset or the solution set.");}
// set the last output collector of this task to reflect the iteration tail state update:
// a) workset update,
// b) solution set update, or
// c) merged workset and solution set update
Collector<OT> outputCollector =
null;
if (isWorksetUpdate) {
outputCollector = createWorksetUpdateOutputCollector();
// we need the WorksetUpdateOutputCollector separately to count the collected elements
if (isWorksetIteration) {
worksetUpdateOutputCollector = ((WorksetUpdateOutputCollector<OT>) (outputCollector));
}
}
if (isSolutionSetUpdate) {
if (isWorksetIteration) {
outputCollector = createSolutionSetUpdateOutputCollector(outputCollector);
} else // Bulk iteration with termination criterion
{
outputCollector = new Collector<OT>() {
@Override
public void collect(OT record) {
}
@Override
public void close() {
}
};
}
if (!isWorksetUpdate) {
solutionSetUpdateBarrier = SolutionSetUpdateBarrierBroker.instance().get(brokerKey());
}
}
setLastOutputCollector(outputCollector);
}
| 3.26 |
flink_ColumnStats_copy_rdh
|
/**
* Create a deep copy of "this" instance.
*
* @return a deep copy
*/
public ColumnStats copy() {
if ((maxValue != null) || (f0 != null)) {
return new ColumnStats(this.ndv, this.nullCount, this.avgLen, this.maxLen, this.maxValue, this.f0);
} else {
return new ColumnStats(this.ndv, this.nullCount, this.avgLen, this.maxLen, this.max, this.min);
}
}
| 3.26 |
flink_ColumnStats_merge_rdh
|
/**
* Merges two column stats. When the stats are unknown, whatever the other are, we need return
* unknown stats. The unknown definition for column stats is null.
*
* @param other
* The other column stats to merge.
* @return The merged column stats.
*/
public ColumnStats merge(ColumnStats other, boolean isPartitionKey) {
if ((this == UNKNOWN) || (other ==
UNKNOWN)) {
return UNKNOWN;
}
Long ndv;
if (isPartitionKey) {
ndv = combineIfNonNull(Long::sum, this.ndv,
other.ndv);
} else {
ndv = combineIfNonNull(Long::max, this.ndv, other.ndv);
}
Long nullCount = combineIfNonNull(Long::sum, this.nullCount, other.nullCount);
Double avgLen = combineIfNonNull((a1, a2) -> (a1 + a2) / 2,
this.avgLen, other.avgLen);
Integer maxLen = combineIfNonNull(Math::max, this.maxLen, other.maxLen);
Number maxValue = combineIfNonNull((n1, n2) -> n1.doubleValue() > n2.doubleValue() ? n1 : n2, this.maxValue, other.maxValue);
Number minValue = combineIfNonNull((n1, n2) -> n1.doubleValue() < n2.doubleValue() ? n1 : n2, this.f0, other.f0);
@SuppressWarnings("unchecked")
Comparable max = combineIfNonNull((c1,
c2) -> ((Comparable) (c1)).compareTo(c2) > 0 ? c1 : c2, this.max, other.max);
@SuppressWarnings("unchecked")
Comparable min = combineIfNonNull((c1, c2) ->
((Comparable) (c1)).compareTo(c2) < 0 ? c1 : c2, this.min, other.min);
if ((max != null) || (min != null)) {
return
new ColumnStats(ndv, nullCount, avgLen, maxLen, max, min);
} else {
return new ColumnStats(ndv, nullCount, avgLen, maxLen, maxValue, minValue);
}}
| 3.26 |
flink_ColumnStats_getMaxValue_rdh
|
/**
* Deprecated because Number type max/min is not well supported comparable type, e.g. {@link java.util.Date}, {@link java.sql.Timestamp}.
*
* <p>Returns null if this instance is constructed by {@link ColumnStats.Builder}.
*/
@Deprecated
public Number getMaxValue() {
return maxValue;
}
| 3.26 |
flink_ColumnStats_getMax_rdh
|
/**
* Returns null if this instance is constructed by {@link ColumnStats#ColumnStats(Long, Long,
* Double, Integer, Number, Number)}.
*/
public Comparable<?> getMax() {return max;
}
/**
* Deprecated because Number type max/min is not well supported comparable type, e.g. {@link java.util.Date}, {@link java.sql.Timestamp}.
*
* <p>Returns null if this instance is constructed by {@link ColumnStats.Builder}
| 3.26 |
flink_ColumnStats_getMin_rdh
|
/**
* Returns null if this instance is constructed by {@link ColumnStats#ColumnStats(Long, Long,
* Double, Integer, Number, Number)}.
*/
public Comparable<?> getMin() {
return min;
}
| 3.26 |
flink_HttpHeader_m0_rdh
|
/**
* Returns the name of this HTTP header.
*
* @return the name of this HTTP header
*/
public String m0() {
return name;
}
| 3.26 |
flink_HttpHeader_getValue_rdh
|
/**
* Returns the value of this HTTP header.
*
* @return the value of this HTTP header
*/public String getValue() {
return value;
}
| 3.26 |
flink_ConnectedStreams_process_rdh
|
/**
* Applies the given {@link KeyedCoProcessFunction} on the connected input streams, thereby
* creating a transformed output stream.
*
* <p>The function will be called for every element in the input streams and can produce zero or
* more output elements. Contrary to the {@link #flatMap(CoFlatMapFunction)} function, this
* function can also query the time and set timers. When reacting to the firing of set timers
* the function can directly emit elements and/or register yet more timers.
*
* @param keyedCoProcessFunction
* The {@link KeyedCoProcessFunction} that is called for each
* element in the stream.
* @param <R>
* The type of elements emitted by the {@code CoProcessFunction}.
* @return The transformed {@link DataStream}.
*/
@Internal
public <K, R> SingleOutputStreamOperator<R>
process(KeyedCoProcessFunction<K, IN1, IN2, R> keyedCoProcessFunction, TypeInformation<R> outputType) {
TwoInputStreamOperator<IN1, IN2,
R> operator;
if ((inputStream1 instanceof KeyedStream) && (inputStream2 instanceof KeyedStream)) {
operator = new KeyedCoProcessOperator<>(inputStream1.clean(keyedCoProcessFunction));
} else {
throw new UnsupportedOperationException("KeyedCoProcessFunction can only be used " + "when both input streams are of type KeyedStream.");
}
return transform("Co-Keyed-Process", outputType, operator);
}
| 3.26 |
flink_ConnectedStreams_flatMap_rdh
|
/**
* Applies a CoFlatMap transformation on a {@link ConnectedStreams} and maps the output to a
* common type. The transformation calls a {@link CoFlatMapFunction#flatMap1} for each element
* of the first input and {@link CoFlatMapFunction#flatMap2} for each element of the second
* input. Each CoFlatMapFunction call returns any number of elements including none.
*
* @param coFlatMapper
* The CoFlatMapFunction used to jointly transform the two input DataStreams
* @param outputType
* {@link TypeInformation} for the result type of the function.
* @return The transformed {@link DataStream}
*/
public <R> SingleOutputStreamOperator<R> flatMap(CoFlatMapFunction<IN1, IN2, R> coFlatMapper, TypeInformation<R> outputType) {
return transform("Co-Flat Map", outputType, new CoStreamFlatMap<>(inputStream1.clean(coFlatMapper)));
}
| 3.26 |
flink_ConnectedStreams_getType2_rdh
|
/**
* Gets the type of the second input.
*
* @return The type of the second input
*/
public TypeInformation<IN2> getType2() {
return inputStream2.getType();
}
| 3.26 |
flink_ConnectedStreams_map_rdh
|
/**
* Applies a CoMap transformation on a {@link ConnectedStreams} and maps the output to a common
* type. The transformation calls a {@link CoMapFunction#map1} for each element of the first
* input and {@link CoMapFunction#map2} for each element of the second input. Each CoMapFunction
* call returns exactly one element.
*
* @param coMapper
* The CoMapFunction used to jointly transform the two input DataStreams
* @param outputType
* {@link TypeInformation} for the result type of the function.
* @return The transformed {@link DataStream}
*/
public <R> SingleOutputStreamOperator<R> map(CoMapFunction<IN1, IN2, R> coMapper, TypeInformation<R>
outputType) { return transform("Co-Map", outputType, new CoStreamMap<>(inputStream1.clean(coMapper)));
}
| 3.26 |
flink_ConnectedStreams_keyBy_rdh
|
/**
* KeyBy operation for connected data stream. Assigns keys to the elements of input1 and input2
* using keySelector1 and keySelector2 with explicit type information for the common key type.
*
* @param keySelector1
* The {@link KeySelector} used for grouping the first input
* @param keySelector2
* The {@link KeySelector} used for grouping the second input
* @param keyType
* The type information of the common key type.
* @return The partitioned {@link ConnectedStreams}
*/
public <KEY> ConnectedStreams<IN1, IN2> keyBy(KeySelector<IN1, KEY> keySelector1, KeySelector<IN2, KEY> keySelector2, TypeInformation<KEY> keyType) {
return new ConnectedStreams<>(environment, inputStream1.keyBy(keySelector1, keyType), inputStream2.keyBy(keySelector2, keyType));
}
| 3.26 |
flink_ConnectedStreams_getFirstInput_rdh
|
/**
* Returns the first {@link DataStream}.
*
* @return The first DataStream.
*/
public DataStream<IN1> getFirstInput() {
return inputStream1;
}
| 3.26 |
flink_ConnectedStreams_getType1_rdh
|
/**
* Gets the type of the first input.
*
* @return The type of the first input
*/
public TypeInformation<IN1> getType1() {
return inputStream1.getType();
}
| 3.26 |
flink_SqlGatewayOptionsParser_getSqlGatewayOptions_rdh
|
// --------------------------------------------------------------------------------------------
private static Options getSqlGatewayOptions() {
Options options = new Options();
options.addOption(OPTION_HELP);
options.addOption(DYNAMIC_PROPERTY_OPTION);return options;
}
| 3.26 |
flink_SqlGatewayOptionsParser_parseSqlGatewayOptions_rdh
|
// --------------------------------------------------------------------------------------------
// Line Parsing
// --------------------------------------------------------------------------------------------
public static SqlGatewayOptions parseSqlGatewayOptions(String[] args) {
try {
DefaultParser parser = new DefaultParser();CommandLine line = parser.parse(getSqlGatewayOptions(), args, true);
return new SqlGatewayOptions(line.hasOption(SqlGatewayOptionsParser.OPTION_HELP.getOpt()), line.getOptionProperties(DYNAMIC_PROPERTY_OPTION.getOpt()));
} catch (ParseException e) {
throw new SqlGatewayException(e.getMessage());
}
}
| 3.26 |
flink_SqlGatewayOptionsParser_printHelpSqlGateway_rdh
|
// --------------------------------------------------------------------------------------------
// Help
// --------------------------------------------------------------------------------------------
/**
* Prints the help for the client.
*/
public static void printHelpSqlGateway(PrintStream writer) {
writer.println();
printHelpForStart(writer);
}
| 3.26 |
flink_OperationTreeBuilder_addAliasToTheCallInAggregate_rdh
|
/**
* Add a default name to the call in the grouping expressions, e.g., groupBy(a % 5) to groupBy(a
* % 5 as TMP_0) or make aggregate a named aggregate.
*/
private List<Expression> addAliasToTheCallInAggregate(List<String> inputFieldNames, List<Expression> expressions) {
int attrNameCntr = 0;
Set<String> usedFieldNames = new HashSet<>(inputFieldNames);
List<Expression> result = new ArrayList<>();
for (Expression groupingExpression : expressions) {
if ((groupingExpression instanceof UnresolvedCallExpression) && (!ApiExpressionUtils.isFunction(groupingExpression, BuiltInFunctionDefinitions.AS))) {
String tempName = getUniqueName("TMP_" + attrNameCntr, usedFieldNames);
attrNameCntr += 1;
usedFieldNames.add(tempName);
result.add(unresolvedCall(BuiltInFunctionDefinitions.AS, groupingExpression, valueLiteral(tempName)));
} else {
result.add(groupingExpression);
}
}
return result;
}
| 3.26 |
flink_OperationTreeBuilder_addColumns_rdh
|
/**
* Adds additional columns. Existing fields will be replaced if replaceIfExist is true.
*/
public QueryOperation addColumns(boolean replaceIfExist, List<Expression> fieldLists, QueryOperation child) {
final List<Expression> newColumns;
if (replaceIfExist) {
final List<String> fieldNames = child.getResolvedSchema().getColumnNames();
newColumns = ColumnOperationUtils.addOrReplaceColumns(fieldNames, fieldLists);
} else {
newColumns = new ArrayList<>(fieldLists);
newColumns.add(0, unresolvedRef("*"));
}
return project(newColumns, child, false);
}
| 3.26 |
flink_OperationTreeBuilder_aliasBackwardFields_rdh
|
/**
* Rename fields in the input {@link QueryOperation}.
*/
private QueryOperation aliasBackwardFields(QueryOperation inputOperation, List<String> alias, int aliasStartIndex) {
if (!alias.isEmpty()) {
List<String> namesBeforeAlias = inputOperation.getResolvedSchema().getColumnNames();
List<String> namesAfterAlias = new ArrayList<>(namesBeforeAlias);
for (int i = 0; i < alias.size(); i++) {
int withOffset = aliasStartIndex + i;namesAfterAlias.remove(withOffset);
namesAfterAlias.add(withOffset, alias.get(i));
}
return this.alias(namesAfterAlias.stream().map(ApiExpressionUtils::unresolvedRef).collect(Collectors.toList()), inputOperation);
} else {
return inputOperation;
}
}
| 3.26 |
flink_OperationTreeBuilder_getUniqueName_rdh
|
/**
* Return a unique name that does not exist in usedFieldNames according to the input name.
*/
private String getUniqueName(String inputName, Collection<String> usedFieldNames) {
int i = 0;
String resultName = inputName;
while (usedFieldNames.contains(resultName)) {resultName = (resultName + "_") + i;
i += 1;
}
return resultName;
}
| 3.26 |
flink_FlinkSemiAntiJoinJoinTransposeRule_setJoinAdjustments_rdh
|
/**
* Sets an array to reflect how much each index corresponding to a field needs to be adjusted.
* The array corresponds to fields in a 3-way join between (X, Y, and Z). X remains unchanged,
* but Y and Z need to be adjusted by some fixed amount as determined by the input.
*
* @param adjustments
* array to be filled out
* @param nFieldsX
* number of fields in X
* @param nFieldsY
* number of fields in Y
* @param nFieldsZ
* number of fields in Z
* @param adjustY
* the amount to adjust Y by
* @param adjustZ
* the amount to adjust Z by
*/private void setJoinAdjustments(int[] adjustments, int nFieldsX, int nFieldsY, int nFieldsZ, int adjustY, int adjustZ) {
for (int i = 0; i < nFieldsX; i++) {
adjustments[i] = 0;
}
for (int i = nFieldsX; i < (nFieldsX +
nFieldsY); i++) {
adjustments[i] = adjustY;
}
for (int i = nFieldsX + nFieldsY; i < ((nFieldsX + nFieldsY) + nFieldsZ); i++) {
adjustments[i] = adjustZ;
}
}
| 3.26 |
flink_ZookeeperModuleFactory_createModule_rdh
|
/**
* A {@link SecurityModuleFactory} for {@link ZooKeeperModule}.
*/public class ZookeeperModuleFactory implements SecurityModuleFactory {
@Overridepublic SecurityModule createModule(SecurityConfiguration securityConfig) {
return new ZooKeeperModule(securityConfig);
}
| 3.26 |
flink_VertexFlameGraphFactory_createFullFlameGraphFrom_rdh
|
/**
* Converts {@link VertexThreadInfoStats} into a FlameGraph.
*
* @param sample
* Thread details sample containing stack traces.
* @return FlameGraph data structure
*/
public static VertexFlameGraph createFullFlameGraphFrom(VertexThreadInfoStats sample) {
EnumSet<Thread.State> included = EnumSet.allOf(Thread.State.class);
return createFlameGraphFromSample(sample, included);
}
| 3.26 |
flink_VertexFlameGraphFactory_createOnCpuFlameGraph_rdh
|
/**
* Converts {@link VertexThreadInfoStats} into a FlameGraph representing actively running
* (On-CPU) threads.
*
* <p>Includes threads in states Thread.State.[RUNNABLE, NEW].
*
* @param sample
* Thread details sample containing stack traces.
* @return FlameGraph data structure
*/
public static VertexFlameGraph createOnCpuFlameGraph(VertexThreadInfoStats sample) {
EnumSet<Thread.State> v2 = EnumSet.of(Thread.State.RUNNABLE, Thread.State.NEW);
return createFlameGraphFromSample(sample, v2);
}
| 3.26 |
flink_VertexFlameGraphFactory_createOffCpuFlameGraph_rdh
|
/**
* Converts {@link VertexThreadInfoStats} into a FlameGraph representing blocked (Off-CPU)
* threads.
*
* <p>Includes threads in states Thread.State.[TIMED_WAITING, BLOCKED, WAITING].
*
* @param sample
* Thread details sample containing stack traces.
* @return FlameGraph data structure.
*/
public static VertexFlameGraph createOffCpuFlameGraph(VertexThreadInfoStats sample) {
EnumSet<Thread.State> included = EnumSet.of(Thread.State.TIMED_WAITING, Thread.State.BLOCKED, Thread.State.WAITING);
return createFlameGraphFromSample(sample, included);
}
| 3.26 |
flink_VertexFlameGraphFactory_m0_rdh
|
// Note that Thread.getStackTrace() performs a similar logic - the stack trace returned
// by this method will not contain lambda references with it. But ThreadMXBean does collect
// lambdas, so we have to clean them up explicitly.
private static StackTraceElement[] m0(StackTraceElement[] stackTrace) {
StackTraceElement[] result = new StackTraceElement[stackTrace.length];
final String v11 = System.getProperty("java.version");
final Pattern lambdaClassName = (v11.compareTo("21") >= 0) ? JDK21_LAMBDA_CLASS_NAME : LAMBDA_CLASS_NAME;for (int i = 0; i < stackTrace.length; i++) {
StackTraceElement element =
stackTrace[i];
Matcher matcher = lambdaClassName.matcher(element.getClassName());
if (matcher.find()) {
// org.apache.flink.streaming.runtime.io.RecordProcessorUtils$$Lambda$773/0x00000001007f84a0
// -->
// org.apache.flink.streaming.runtime.io.RecordProcessorUtils$$Lambda$0/0x0
// This ensures that the name is stable across JVMs, but at the same time
// keeps the stack frame in the call since it has the method name, which
// may be useful for analysis.
String newClassName = matcher.replaceFirst("$10/$20");
result[i] = new StackTraceElement(newClassName, element.getMethodName(), element.getFileName(), element.getLineNumber());
} else {
result[i] = element;
}
}return result;
}
| 3.26 |
flink_PartitionLoader_loadPartition_rdh
|
/**
* Load a single partition.
*
* @param partSpec
* the specification for the single partition
* @param srcPaths
* the paths for the files used to load to the single partition
* @param srcPathIsDir
* whether the every path in {@param srcPaths} is directory or not. If true,
* it will load the files under the directory of the every path. If false, every path in
* {@param srcPaths} is considered as single file, and it will load the single file for
* every path.
*/
public void loadPartition(LinkedHashMap<String, String> partSpec, List<Path> srcPaths, boolean srcPathIsDir) throws Exception {
Optional<Path> pathFromMeta = metaStore.getPartition(partSpec);
Path path = pathFromMeta.orElseGet(() -> new Path(metaStore.getLocationPath(), generatePartitionPath(partSpec)));
overwriteAndMoveFiles(srcPaths, path, srcPathIsDir);
commitPartition(partSpec, path);
}
| 3.26 |
flink_PartitionLoader_loadNonPartition_rdh
|
/**
* Load a non-partition files to output path.
*
* @param srcPaths
* the paths for the files used to load to the single partition
* @param srcPathIsDir
* whether the every path in {@param srcPaths} is directory or not. If true,
* it will load the files under the directory of the every path. If false, every path in
* {@param srcPaths} is considered as single file, and it will load the single file for
* every path.
*/
public void loadNonPartition(List<Path> srcPaths, boolean srcPathIsDir) throws Exception
{
Path tableLocation = metaStore.getLocationPath();
overwriteAndMoveFiles(srcPaths, tableLocation, srcPathIsDir);
commitPartition(new LinkedHashMap<>(), tableLocation);
metaStore.finishWritingTable(tableLocation);
}
| 3.26 |
flink_PartitionLoader_moveFiles_rdh
|
/**
* Moves files from srcDir to destDir.
*/
private void moveFiles(List<Path> srcPaths, Path destDir, boolean srcPathIsDir) throws Exception {
if (srcPathIsDir) {
// if the src path is still a directory, list the directory to get the files that needed
// to be moved.
for (Path srcDir : srcPaths) {
if (!srcDir.equals(destDir)) {
FileStatus[] srcFiles = listStatusWithoutHidden(fs, srcDir);
if (srcFiles != null) {
for (FileStatus srcFile : srcFiles) {
moveFile(srcFile.getPath(), destDir);
}
}
}
}} else {
for (Path v13 : srcPaths) {
moveFile(v13, destDir);
}
}
}
| 3.26 |
flink_PartitionLoader_loadEmptyPartition_rdh
|
/**
* The flink job does not write data to the partition, but the corresponding partition needs to
* be created or updated.
*
* <p>The partition does not exist, create it.
*
* <p>The partition exists:
*
* <pre>
* if overwrite is true, delete the path, then create it;
* if overwrite is false, do nothing;
* </pre>
*/
public void loadEmptyPartition(LinkedHashMap<String, String> partSpec) throws Exception {
Optional<Path> pathFromMeta = metaStore.getPartition(partSpec);
if (pathFromMeta.isPresent() && (!overwrite)) {
commitPartition(partSpec, pathFromMeta.get());
return;
}
Path path = new Path(metaStore.getLocationPath(), generatePartitionPath(partSpec));
if (pathFromMeta.isPresent()) {
fs.delete(pathFromMeta.get(), true);
fs.mkdirs(path);
}
commitPartition(partSpec, path);
}
| 3.26 |
flink_PartitionLoader_commitPartition_rdh
|
/**
* Reuse of PartitionCommitPolicy mechanisms. The default in Batch mode is metastore and
* success-file.
*/
private void commitPartition(LinkedHashMap<String, String> partitionSpec, Path path) throws Exception {
PartitionCommitPolicy.Context context = new CommitPolicyContextImpl(partitionSpec, path);
for (PartitionCommitPolicy policy : policies) {
if (policy instanceof MetastoreCommitPolicy) {
if (partitionSpec.isEmpty()) {
// Non partition table skip commit meta data.
continue;
}
((MetastoreCommitPolicy) (policy)).setMetastore(metaStore);
}
policy.commit(context);
}
}
| 3.26 |
flink_SourceCoordinator_aggregate_rdh
|
/**
* Update the {@link Watermark} for the given {@code key)}.
*
* @return the new updated combined {@link Watermark} if the value has changed. {@code Optional.empty()} otherwise.
*/
public Optional<Watermark> aggregate(T key, Watermark watermark) {
Watermark oldAggregatedWatermark = getAggregatedWatermark();
WatermarkElement watermarkElement = new WatermarkElement(watermark);
WatermarkElement oldWatermarkElement = watermarks.put(key, watermarkElement);
if (oldWatermarkElement != null) {
orderedWatermarks.remove(oldWatermarkElement);
}
orderedWatermarks.add(watermarkElement);
Watermark newAggregatedWatermark = getAggregatedWatermark();
if (newAggregatedWatermark.equals(oldAggregatedWatermark)) {
return Optional.empty();
}
return Optional.of(newAggregatedWatermark);
}
| 3.26 |
flink_SourceCoordinator_deserializeCheckpoint_rdh
|
/**
* Restore the state of this source coordinator from the state bytes.
*
* @param bytes
* The checkpoint bytes that was returned from {@link #toBytes(long)}
* @throws Exception
* When the deserialization failed.
*/
private EnumChkT
deserializeCheckpoint(byte[] bytes) throws Exception {
try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes);DataInputStream in = new DataInputViewStreamWrapper(bais)) {final int coordinatorSerdeVersion = readAndVerifyCoordinatorSerdeVersion(in);
int enumSerializerVersion = in.readInt();
int serializedEnumChkptSize = in.readInt();
byte[] serializedEnumChkpt = readBytes(in, serializedEnumChkptSize);
if ((coordinatorSerdeVersion != SourceCoordinatorSerdeUtils.VERSION_0) && (bais.available() > 0)) {
throw new IOException("Unexpected trailing bytes in enumerator checkpoint data");
}
return enumCheckpointSerializer.deserialize(enumSerializerVersion, serializedEnumChkpt);
}
}
| 3.26 |
flink_SourceCoordinator_handleRequestSplitEvent_rdh
|
// --------------------- private methods -------------
private void handleRequestSplitEvent(int subtask, int attemptNumber, RequestSplitEvent event) {
LOG.info("Source {} received split request from parallel task {} (#{})", operatorName, subtask, attemptNumber);
// request splits from the enumerator only if the enumerator has un-assigned splits
// this helps to reduce unnecessary split requests to the enumerator
if (!context.hasNoMoreSplits(subtask)) {
enumerator.handleSplitRequest(subtask, event.hostName());
}
}
| 3.26 |
flink_SourceCoordinator_toBytes_rdh
|
// --------------------- Serde -----------------------
/**
* Serialize the coordinator state. The current implementation may not be super efficient, but
* it should not matter that much because most of the state should be rather small. Large states
* themselves may already be a problem regardless of how the serialization is implemented.
*
* @return A byte array containing the serialized state of the source coordinator.
* @throws Exception
* When something goes wrong in serialization.
*/
private byte[] toBytes(long checkpointId) throws Exception {
return writeCheckpointBytes(enumerator.snapshotState(checkpointId), enumCheckpointSerializer);
}
| 3.26 |
flink_SortingThread_go_rdh
|
/**
* Entry point of the thread.
*/
@Override
public void go() throws InterruptedException {
boolean alive = true;
// loop as long as the thread is marked alive
while (isRunning() && alive) {
final CircularElement<E> element = this.dispatcher.take(SortStage.SORT);
if ((element != EOF_MARKER) && (element != SPILLING_MARKER)) {
if (element.getBuffer().size() == 0) {
element.getBuffer().reset();
this.dispatcher.send(SortStage.READ, element);
continue;
}
LOG.debug("Sorting buffer {}.", element.getId());
this.sorter.sort(element.getBuffer());LOG.debug("Sorted buffer {}.", element.getId());
} else if (element == EOF_MARKER) {
LOG.debug("Sorting thread done.");
alive = false;
}
this.dispatcher.send(SortStage.SPILL, element);
}
}
| 3.26 |
flink_FlinkConvertletTable_convertTryCast_rdh
|
// Slightly modified version of StandardConvertletTable::convertCast
private RexNode convertTryCast(SqlRexContext cx, final SqlCall call) {
RelDataTypeFactory typeFactory = cx.getTypeFactory();
final SqlNode leftNode = call.operand(0);
final SqlNode rightNode = call.operand(1);
final RexNode valueRex = cx.convertExpression(leftNode);
RelDataType type;
if (rightNode instanceof SqlIntervalQualifier) {
type = typeFactory.createSqlIntervalType(((SqlIntervalQualifier) (rightNode)));
} else if (rightNode instanceof SqlDataTypeSpec) {
SqlDataTypeSpec dataType = ((SqlDataTypeSpec) (rightNode));
type = dataType.deriveType(cx.getValidator());
if (type == null) {
type = cx.getValidator().getValidatedNodeType(dataType.getTypeName());
}
} else {
throw new IllegalStateException("Invalid right argument type for TRY_CAST: " + rightNode);
}
type = typeFactory.createTypeWithNullability(type, true);
if (SqlUtil.isNullLiteral(leftNode, false)) {
final SqlValidatorImpl validator = ((SqlValidatorImpl) (cx.getValidator()));
validator.setValidatedNodeType(leftNode, type);
return cx.convertExpression(leftNode);
}
return cx.getRexBuilder().makeCall(type, FlinkSqlOperatorTable.TRY_CAST, Collections.singletonList(valueRex));
}
| 3.26 |
flink_TemporalTableJoinUtil_isRowTimeTemporalTableJoinCondition_rdh
|
/**
* Check if the given rexCall is a rewrote join condition on event time.
*/
public static boolean isRowTimeTemporalTableJoinCondition(RexCall call) {
// (LEFT_TIME_ATTRIBUTE, RIGHT_TIME_ATTRIBUTE, LEFT_KEY, RIGHT_KEY, PRIMARY_KEY)
return (call.getOperator() == TemporalJoinUtil.TEMPORAL_JOIN_CONDITION()) && (call.operands.size() == 5);
}
| 3.26 |
flink_TemporalTableJoinUtil_isEventTimeTemporalJoin_rdh
|
/**
* Check if the given join condition is an initial temporal join condition or a rewrote join
* condition on event time.
*/
public static boolean isEventTimeTemporalJoin(@Nonnull
RexNode joinCondition) {
RexVisitor<Void> temporalConditionFinder = new RexVisitorImpl<Void>(true) {
@Override
public Void visitCall(RexCall call) {
if (((call.getOperator() == TemporalJoinUtil.INITIAL_TEMPORAL_JOIN_CONDITION()) &&
TemporalJoinUtil.isInitialRowTimeTemporalTableJoin(call)) || isRowTimeTemporalTableJoinCondition(call)) {
// has initial temporal join condition or
throw new Util.FoundOne(call);
}
return super.visitCall(call);
}
};
try {
joinCondition.accept(temporalConditionFinder);
} catch (Util.FoundOne found) {
return true;
}
return false;
}
| 3.26 |
flink_ViewUpdater_notifyOfAddedView_rdh
|
/**
* Notifies this ViewUpdater of a new metric that should be regularly updated.
*
* @param view
* metric that should be regularly updated
*/
public void notifyOfAddedView(View view) {
synchronized(lock) {
toAdd.add(view);
}
}
| 3.26 |
flink_ViewUpdater_notifyOfRemovedView_rdh
|
/**
* Notifies this ViewUpdater of a metric that should no longer be regularly updated.
*
* @param view
* metric that should no longer be regularly updated
*/
public void notifyOfRemovedView(View view) {
synchronized(lock) {
toRemove.add(view);
}
}
| 3.26 |
flink_FeedbackTransformation_getFeedbackEdges_rdh
|
/**
* Returns the list of feedback {@code Transformations}.
*/
public List<Transformation<T>> getFeedbackEdges() {
return feedbackEdges;
}
| 3.26 |
flink_FeedbackTransformation_addFeedbackEdge_rdh
|
/**
* Adds a feedback edge. The parallelism of the {@code Transformation} must match the
* parallelism of the input {@code Transformation} of this {@code FeedbackTransformation}
*
* @param transform
* The new feedback {@code Transformation}.
*/
public void
addFeedbackEdge(Transformation<T> transform) {
if (transform.getParallelism() != this.getParallelism()) {
throw new UnsupportedOperationException((((("Parallelism of the feedback stream must match the parallelism of the original" + " stream. Parallelism of original stream: ") + this.getParallelism()) + "; parallelism of feedback stream: ") + transform.getParallelism()) + ". Parallelism can be modified using DataStream#setParallelism() method");
}
feedbackEdges.add(transform);}
| 3.26 |
flink_FeedbackTransformation_getWaitTime_rdh
|
/**
* Returns the wait time. This is the amount of time that the feedback operator keeps listening
* for feedback elements. Once the time expires the operation will close and will not receive
* further elements.
*/
public Long getWaitTime() {
return waitTime;
}
| 3.26 |
flink_SerializedThrowable_printStackTrace_rdh
|
// ------------------------------------------------------------------------
// Override the behavior of Throwable
// ------------------------------------------------------------------------
@Override
public void printStackTrace(PrintStream s) {
s.print(fullStringifiedStackTrace);
s.flush();
}
| 3.26 |
flink_SerializedThrowable_get_rdh
|
// ------------------------------------------------------------------------
// Static utilities
// ------------------------------------------------------------------------
public static Throwable get(Throwable serThrowable, ClassLoader loader) {
if (serThrowable instanceof SerializedThrowable) {
return ((SerializedThrowable) (serThrowable)).deserializeError(loader);
} else {
return serThrowable;
}
}
| 3.26 |
flink_RegisteredOperatorStateBackendMetaInfo_deepCopy_rdh
|
/**
* Creates a deep copy of the itself.
*/
@Nonnull
public RegisteredOperatorStateBackendMetaInfo<S> deepCopy() {
return new RegisteredOperatorStateBackendMetaInfo<>(this);
}
| 3.26 |
flink_SpillChannelManager_m0_rdh
|
/**
* Removes a channel reader/writer from the list of channels that are to be removed at shutdown.
*
* @param channel
* The channel reader/writer.
*/
synchronized void m0(FileIOChannel channel) {
openChannels.remove(channel);
}
| 3.26 |
flink_SpillChannelManager_registerChannelToBeRemovedAtShutdown_rdh
|
/**
* Adds a channel to the list of channels that are to be removed at shutdown.
*
* @param channel
* The channel id.
*/
synchronized void registerChannelToBeRemovedAtShutdown(FileIOChannel.ID channel) {
channelsToDeleteAtShutdown.add(channel);
}
| 3.26 |
flink_SpillChannelManager_registerOpenChannelToBeRemovedAtShutdown_rdh
|
/**
* Adds a channel reader/writer to the list of channels that are to be removed at shutdown.
*
* @param channel
* The channel reader/writer.
*/
synchronized void registerOpenChannelToBeRemovedAtShutdown(FileIOChannel channel) {openChannels.add(channel);
}
| 3.26 |
flink_SpillChannelManager_unregisterChannelToBeRemovedAtShutdown_rdh
|
/**
* Removes a channel from the list of channels that are to be removed at shutdown.
*
* @param channel
* The channel id.
*/
synchronized void unregisterChannelToBeRemovedAtShutdown(FileIOChannel.ID channel) {
channelsToDeleteAtShutdown.remove(channel);
}
| 3.26 |
flink_TieredStorageIdMappingUtils_convertId_rdh
|
/**
* Utils to convert the Ids to Tiered Storage Ids, or vice versa.
*/
| 3.26 |
flink_CepRuntimeContext_addAccumulator_rdh
|
// -----------------------------------------------------------------------------------
// Unsupported operations
// -----------------------------------------------------------------------------------
@Override
public <V, A extends Serializable> void addAccumulator(final String name, final Accumulator<V, A> accumulator) {
throw new UnsupportedOperationException("Accumulators are not supported.");
}
| 3.26 |
flink_JoinSpec_getJoinKeySize_rdh
|
/**
* Gets number of keys in join key.
*/
@JsonIgnore
public int getJoinKeySize() {
return
leftKeys.length;
}
| 3.26 |
flink_HivePartitionUtils_getAllPartitions_rdh
|
/**
* Returns all HiveTablePartitions of a hive table, returns single HiveTablePartition if the
* hive table is not partitioned.
*/
public static List<HiveTablePartition> getAllPartitions(JobConf jobConf, String hiveVersion, ObjectPath tablePath, List<String> partitionColNames, List<Map<String, String>> remainingPartitions) {
List<HiveTablePartition> allHivePartitions = new ArrayList<>();
try (HiveMetastoreClientWrapper client =
HiveMetastoreClientFactory.create(HiveConfUtils.create(jobConf), hiveVersion)) {
String dbName = tablePath.getDatabaseName();
String tableName = tablePath.getObjectName();
Table hiveTable = client.getTable(dbName, tableName);
Properties tableProps = HiveReflectionUtils.getTableMetadata(HiveShimLoader.loadHiveShim(hiveVersion), hiveTable);
if ((partitionColNames != null) && (partitionColNames.size() > 0)) {
List<Partition> partitions = new ArrayList<>();
if (remainingPartitions != null) {
List<String> partitionNames = getPartitionNames(remainingPartitions,
partitionColNames, JobConfUtils.getDefaultPartitionName(jobConf));
partitions.addAll(client.getPartitionsByNames(dbName, tableName, partitionNames));
} else {
partitions.addAll(client.listPartitions(dbName, tableName, ((short) (-1))));
}
for (Partition partition : partitions) {
HiveTablePartition hiveTablePartition = toHiveTablePartition(partitionColNames, tableProps, partition);
allHivePartitions.add(hiveTablePartition);
}
} else {
allHivePartitions.add(new HiveTablePartition(hiveTable.getSd(), tableProps));
}
} catch (TException e) {
throw new FlinkHiveException("Failed to collect all partitions from hive metaStore", e);
}
return allHivePartitions;
}
| 3.26 |
flink_HivePartitionUtils_createPartitionSpec_rdh
|
/**
* Creates a {@link CatalogPartitionSpec} from a Hive partition name string. Example of Hive
* partition name string - "name=bob/year=2019". If the partition name for the given partition
* column is equal to {@param defaultPartitionName}, the partition value in returned {@link CatalogPartitionSpec} will be null.
*/
public static CatalogPartitionSpec createPartitionSpec(String hivePartitionName, String defaultPartitionName) {
String[] partKeyVals =
hivePartitionName.split("/");
Map<String, String>
spec = CollectionUtil.newHashMapWithExpectedSize(partKeyVals.length);
for (String keyVal : partKeyVals) {
String[] kv = keyVal.split("=");
String partitionValue = unescapePathName(kv[1]);
spec.put(unescapePathName(kv[0]), partitionValue.equals(defaultPartitionName) ? null : partitionValue);
}
return new CatalogPartitionSpec(spec);
}
| 3.26 |
flink_HivePartitionUtils_parsePartitionValues_rdh
|
/**
* Parse partition string specs into object values.
*/
public static Map<String, Object> parsePartitionValues(Map<String, String> partitionSpecs, String[] fieldNames, DataType[] fieldTypes, String defaultPartitionName, HiveShim shim) {
checkArgument(fieldNames.length == fieldTypes.length);
List<String> fieldNameList = Arrays.asList(fieldNames);
Map<String, Object> partitionColValues
= new HashMap<>();
for (Map.Entry<String, String> spec : partitionSpecs.entrySet()) {
String
partitionKey = spec.getKey();
String valueString = spec.getValue();
int index = fieldNameList.indexOf(partitionKey);
if (index < 0) {
throw new IllegalStateException(String.format("Partition spec %s and column names %s doesn't match", partitionSpecs, fieldNameList));
}
LogicalType partitionType = fieldTypes[index].getLogicalType();
final Object value = restorePartitionValueFromType(shim, valueString, partitionType, defaultPartitionName);
partitionColValues.put(partitionKey, value);
}
return partitionColValues;
}
| 3.26 |
flink_Tuple1_toString_rdh
|
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0), where the individual fields
* are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ("(" + StringUtils.arrayAwareToString(this.f0)) + ")";
}
| 3.26 |
flink_Tuple1_setFields_rdh
|
/**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
*/ public void setFields(T0 f0) {
this.f0 = f0;
}
| 3.26 |
flink_Tuple1_equals_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple1)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple1 tuple = ((Tuple1) (o));
if
(f0
!= null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
return true;}
| 3.26 |
flink_HiveTableSink_toStagingDir_rdh
|
// get a staging dir
private String toStagingDir(String stagingParentDir, Configuration conf) throws IOException {
if (!stagingParentDir.endsWith(Path.SEPARATOR)) {
stagingParentDir += Path.SEPARATOR;
}
// TODO: may append something more meaningful than a timestamp, like query ID
stagingParentDir += ".staging_" + System.currentTimeMillis();
Path path = new Path(stagingParentDir);
FileSystem fs = path.getFileSystem(conf);
Preconditions.checkState(fs.exists(path) || fs.mkdirs(path), "Failed to create staging dir " + path);
fs.deleteOnExit(path);
return stagingParentDir;
}
| 3.26 |
flink_JsonRowSchemaConverter_convert_rdh
|
/**
* Converts a JSON schema into Flink's type information. Throws an exception if the schema
* cannot converted because of loss of precision or too flexible schema.
*
* <p>The converter can resolve simple schema references to solve those cases where entities are
* defined at the beginning and then used throughout a document.
*/
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> convert(String jsonSchema) {Preconditions.checkNotNull(jsonSchema, "JSON schema");
final ObjectMapper mapper = JacksonMapperFactory.createObjectMapper();
mapper.getFactory().enable(Feature.ALLOW_COMMENTS).enable(Feature.ALLOW_UNQUOTED_FIELD_NAMES).enable(Feature.ALLOW_SINGLE_QUOTES);
final JsonNode v1;try {
v1 = mapper.readTree(jsonSchema);
} catch (IOException e) {
throw new IllegalArgumentException("Invalid JSON schema.", e);
}
return ((TypeInformation<T>) (convertType("<root>", v1, v1)));
}
| 3.26 |
flink_MaterializedCollectStreamResult_processRecord_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected void processRecord(RowData row) {synchronized(resultLock) {
boolean isInsertOp = (row.getRowKind() == RowKind.INSERT) || (row.getRowKind() == RowKind.UPDATE_AFTER);
// Always set the RowKind to INSERT, so that we can compare rows correctly (RowKind will
// be ignored),
row.setRowKind(RowKind.INSERT);
// insert
if (isInsertOp) {
processInsert(row);
} else // delete
{
processDelete(row);
}
}
}
| 3.26 |
flink_MaterializedCollectStreamResult_processInsert_rdh
|
// --------------------------------------------------------------------------------------------
private void processInsert(RowData row) {
// limit the materialized table
if
((materializedTable.size() - validRowPosition) >= maxRowCount) {
cleanUp();
}materializedTable.add(row);
f0.put(row, materializedTable.size() - 1);
}
| 3.26 |
flink_CustomHeadersDecorator_getCustomHeaders_rdh
|
/**
* Returns the custom headers added to the message.
*
* @return The custom headers as a collection of {@link HttpHeader}.
*/
@Override
public Collection<HttpHeader> getCustomHeaders() {
return customHeaders;
}
| 3.26 |
flink_CustomHeadersDecorator_addCustomHeader_rdh
|
/**
* Adds a custom header to the message. Initializes the custom headers collection if it hasn't
* been initialized yet.
*
* @param httpHeader
* The header to add.
*/
public void addCustomHeader(HttpHeader httpHeader) {
if (customHeaders == null) {
customHeaders = new ArrayList<>();
}
customHeaders.add(httpHeader);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.