name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SynchronousChainedCombineDriver_close_rdh | // --------------------------------------------------------------------------------------------
@Override
public void close() {
try {
sortAndCombine();
} catch (Exception e) {
throw new ExceptionInChainedStubException(this.taskName, e);
}
this.outputCollector.close();
dispose(false);
} | 3.26 |
flink_SynchronousChainedCombineDriver_getStub_rdh | // --------------------------------------------------------------------------------------------
public Function getStub() {
return this.combiner;
} | 3.26 |
flink_AbstractExternalTwoInputPythonFunctionOperator_getLeftInputType_rdh | // ----------------------------------------------------------------------
// Getters
// ----------------------------------------------------------------------
protected TypeInformation<IN1> getLeftInputType() {
return inputTypeInfo1;
} | 3.26 |
flink_AdaptiveBatchScheduler_tryGetConsumedResultsInfo_rdh | /**
* Get information of consumable results.
*/
private Optional<List<BlockingResultInfo>> tryGetConsumedResultsInfo(final ExecutionJobVertex jobVertex) {
List<BlockingResultInfo> consumableResultInfo = new ArrayList<>();DefaultLogicalVertex logicalVertex = logicalTopology.getVertex(jobVertex.getJobVertexId());
Iterable<DefaultLogicalResult> consumedResults = logicalVertex.getConsumedResults();
for (DefaultLogicalResult consumedResult : consumedResults) {
final ExecutionJobVertex producerVertex =
getExecutionJobVertex(consumedResult.getProducer().getId());
if (producerVertex.isFinished()) {
BlockingResultInfo resultInfo = checkNotNull(blockingResultInfos.get(consumedResult.getId()));
consumableResultInfo.add(resultInfo);
} else {
// not all inputs consumable, return Optional.empty()
return Optional.empty();
}
}return Optional.of(consumableResultInfo);
} | 3.26 |
flink_AdaptiveBatchScheduler_computeVertexParallelismStoreForDynamicGraph_rdh | /**
* Compute the {@link VertexParallelismStore} for all given vertices in a dynamic graph, which
* will set defaults and ensure that the returned store contains valid parallelisms, with the
* configured default max parallelism.
*
* @param vertices
* the vertices to compute parallelism for
* @param defaultMaxParallelism
* the global default max parallelism
* @return the computed parallelism store
*/@VisibleForTestingpublic static VertexParallelismStore computeVertexParallelismStoreForDynamicGraph(Iterable<JobVertex> vertices, int defaultMaxParallelism) {
// for dynamic graph, there is no need to normalize vertex parallelism. if the max
// parallelism is not configured and the parallelism is a positive value, max
// parallelism can be computed against the parallelism, otherwise it needs to use the
// global default max parallelism.
return computeVertexParallelismStore(vertices, v -> {
if (v.getParallelism() > 0) {
return getDefaultMaxParallelism(v);
} else {
return defaultMaxParallelism;
}
},
Function.identity());
} | 3.26 |
flink_ExistingSavepoint_readKeyedState_rdh | /**
* Read keyed state from an operator in a {@code Savepoint}.
*
* @param uid
* The uid of the operator.
* @param function
* The {@link KeyedStateReaderFunction} that is called for each key in state.
* @param keyTypeInfo
* The type information of the key in state.
* @param outTypeInfo
* The type information of the output of the transform reader function.
* @param <K>
* The type of the key in state.
* @param <OUT>
* The output type of the transform function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException
* If the savepoint does not contain operator state with the given uid.
*/
public <K, OUT> DataSource<OUT> readKeyedState(String uid, KeyedStateReaderFunction<K, OUT> function, TypeInformation<K> keyTypeInfo, TypeInformation<OUT> outTypeInfo) throws IOException {
OperatorState operatorState = metadata.getOperatorState(uid);
KeyedStateInputFormat<K, VoidNamespace, OUT> inputFormat = new
KeyedStateInputFormat<>(operatorState, stateBackend, env.getConfiguration(), new KeyedStateReaderOperator<>(function, keyTypeInfo));
return env.createInput(inputFormat, outTypeInfo);
} | 3.26 |
flink_ExistingSavepoint_readUnionState_rdh | /**
* Read operator {@code UnionState} from a {@code Savepoint}.
*
* @param uid
* The uid of the operator.
* @param name
* The (unique) name for the state.
* @param typeInfo
* The type of the elements in the state.
* @param <T>
* The type of the values that are in the union state.
* @return A {@code DataSet} representing the elements in state.
* @throws IOException
* If the savepoint path is invalid or the uid does not exist.
*/
public <T> DataSource<T> readUnionState(String uid, String name, TypeInformation<T> typeInfo) throws IOException {
OperatorState operatorState = metadata.getOperatorState(uid);
ListStateDescriptor<T> descriptor = new ListStateDescriptor<>(name, typeInfo);
UnionStateInputFormat<T>
v8 = new UnionStateInputFormat<>(operatorState, env.getConfiguration(), stateBackend, descriptor);
return env.createInput(v8, typeInfo);
}
/**
* Read operator {@code UnionState} from a {@code Savepoint} when a custom serializer was used;
* e.g., a different serializer than the one returned by {@code TypeInformation#createSerializer}.
*
* @param uid
* The uid of the operator.
* @param name
* The (unique) name for the state.
* @param typeInfo
* The type of the elements in the state.
* @param serializer
* The serializer used to write the elements into state.
* @param <T>
* The type of the values that are in the union state.
* @return A {@code DataSet} | 3.26 |
flink_ExistingSavepoint_m0_rdh | /**
* Read window state from an operator in a {@code Savepoint}. This method supports reading from
* any type of window.
*
* @param assigner
* The {@link WindowAssigner} used to write out the operator.
* @return A {@link WindowReader}.
*/
public <W extends Window> WindowReader<W> m0(WindowAssigner<?, W> assigner) {
Preconditions.checkNotNull(assigner, "The window assigner must not be null");
TypeSerializer<W> windowSerializer = assigner.getWindowSerializer(env.getConfig());
return m0(windowSerializer);
} | 3.26 |
flink_ExistingSavepoint_window_rdh | /**
* Read window state from an operator in a {@code Savepoint}. This method supports reading from
* any type of window.
*
* @param windowSerializer
* The serializer used for the window type.
* @return A {@link WindowReader}.
*/
public <W extends Window> WindowReader<W> window(TypeSerializer<W> windowSerializer) {
Preconditions.checkNotNull(windowSerializer, "The window serializer must not be null");
return new WindowReader<>(env, metadata, stateBackend, windowSerializer);
} | 3.26 |
flink_ExistingSavepoint_readListState_rdh | /**
* Read operator {@code ListState} from a {@code Savepoint} when a custom serializer was used;
* e.g., a different serializer than the one returned by {@code TypeInformation#createSerializer}.
*
* @param uid
* The uid of the operator.
* @param name
* The (unique) name for the state.
* @param typeInfo
* The type of the elements in the state.
* @param serializer
* The serializer used to write the elements into state.
* @param <T>
* The type of the values that are in the list state.
* @return A {@code DataSet} representing the elements in state.
* @throws IOException
* If the savepoint path is invalid or the uid does not exist.
*/
public <T> DataSource<T> readListState(String uid, String name, TypeInformation<T> typeInfo, TypeSerializer<T> serializer) throws IOException {
OperatorState operatorState = metadata.getOperatorState(uid);
ListStateDescriptor<T> descriptor = new ListStateDescriptor<>(name, serializer);
ListStateInputFormat<T> inputFormat = new ListStateInputFormat<>(operatorState, env.getConfiguration(), stateBackend, descriptor);
return env.createInput(inputFormat, typeInfo);
} | 3.26 |
flink_ExistingSavepoint_readBroadcastState_rdh | /**
* Read operator {@code BroadcastState} from a {@code Savepoint} when a custom serializer was
* used; e.g., a different serializer than the one returned by {@code TypeInformation#createSerializer}.
*
* @param uid
* The uid of the operator.
* @param name
* The (unique) name for the state.
* @param keyTypeInfo
* The type information for the keys in the state.
* @param valueTypeInfo
* The type information for the values in the state.
* @param keySerializer
* The type serializer used to write keys into the state.
* @param valueSerializer
* The type serializer used to write values into the state.
* @param <K>
* The type of keys in state.
* @param <V>
* The type of values in state.
* @return A {@code DataSet} of key-value pairs from state.
* @throws IOException
* If the savepoint path is invalid or the uid does not exist.
*/
public <K, V> DataSource<Tuple2<K, V>> readBroadcastState(String uid, String name, TypeInformation<K> keyTypeInfo, TypeInformation<V> valueTypeInfo, TypeSerializer<K> keySerializer, TypeSerializer<V> valueSerializer) throws IOException {
OperatorState operatorState = metadata.getOperatorState(uid);
MapStateDescriptor<K, V> descriptor = new MapStateDescriptor<>(name, keySerializer, valueSerializer);
BroadcastStateInputFormat<K, V> inputFormat = new BroadcastStateInputFormat<>(operatorState, env.getConfiguration(), stateBackend, descriptor);
return env.createInput(inputFormat, new TupleTypeInfo<>(keyTypeInfo, valueTypeInfo));
} | 3.26 |
flink_SolutionSetPlanNode_getSolutionSetNode_rdh | // --------------------------------------------------------------------------------------------
public SolutionSetNode getSolutionSetNode() {
return ((SolutionSetNode) (this.template));
} | 3.26 |
flink_SolutionSetPlanNode_accept_rdh | // --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<PlanNode> visitor) {if (visitor.preVisit(this)) {
visitor.postVisit(this);
}
} | 3.26 |
flink_DefaultJobLeaderService_containsJob_rdh | // -----------------------------------------------------------
// Testing methods
// -----------------------------------------------------------
/**
* Check whether the service monitors the given job.
*
* @param jobId
* identifying the job
* @return True if the given job is monitored; otherwise false
*/
@Override
@VisibleForTesting
public boolean containsJob(JobID jobId) {
Preconditions.checkState(DefaultJobLeaderService.State.STARTED == state, "The service is currently not running.");
return jobLeaderServices.containsKey(jobId);
} | 3.26 |
flink_DefaultJobLeaderService_start_rdh | // -------------------------------------------------------------------------------
// Methods
// -------------------------------------------------------------------------------
@Override
public void
start(final String initialOwnerAddress, final RpcService initialRpcService, final HighAvailabilityServices initialHighAvailabilityServices, final JobLeaderListener initialJobLeaderListener) {
if (DefaultJobLeaderService.State.CREATED != state) {
throw new IllegalStateException("The service has already been started.");
} else {
f0.info("Start job leader service.");
this.ownerAddress = Preconditions.checkNotNull(initialOwnerAddress);
this.rpcService = Preconditions.checkNotNull(initialRpcService);
this.highAvailabilityServices = Preconditions.checkNotNull(initialHighAvailabilityServices);
this.jobLeaderListener = Preconditions.checkNotNull(initialJobLeaderListener);
state = DefaultJobLeaderService.State.STARTED;
}
} | 3.26 |
flink_BigDecSerializer_readBigDecimal_rdh | // --------------------------------------------------------------------------------------------
// Static Helpers for BigInteger Serialization
// --------------------------------------------------------------------------------------------
public static BigDecimal readBigDecimal(DataInputView source) throws IOException
{
final BigInteger unscaledValue = BigIntSerializer.readBigInteger(source);
if (unscaledValue == null) {
return null;
}
final int scale = source.readInt();
// fast-path for 0, 1, 10
if (scale == 0) {
if (unscaledValue == BigInteger.ZERO) {
return BigDecimal.ZERO;
} else if (unscaledValue == BigInteger.ONE) {
return BigDecimal.ONE;
}
else if (unscaledValue == BigInteger.TEN) {
return BigDecimal.TEN;
}
}
// default
return
new BigDecimal(unscaledValue, scale);
} | 3.26 |
flink_DataViewUtils_createStateId_rdh | // --------------------------------------------------------------------------------------------
private static String createStateId(int fieldIndex, String fieldName) {
return (("agg" + fieldIndex) + "$") + fieldName;
} | 3.26 |
flink_DataViewUtils_createDistinctViewDataType_rdh | /**
* Creates a special {@link DataType} for DISTINCT aggregates.
*/
public static DataType createDistinctViewDataType(DataType keyDataType, int filterArgs, int filterArgsLimit) {
final DataType valueDataType;
if (filterArgs <= filterArgsLimit) {
valueDataType = DataTypes.BIGINT().notNull();
} else {
valueDataType = DataTypes.ARRAY(DataTypes.BIGINT().notNull()).bridgedTo(long[].class);}
return MapView.newMapViewDataType(keyDataType, valueDataType);}
/**
* Creates a special {@link DistinctViewSpec} | 3.26 |
flink_DataViewUtils_extractDataViews_rdh | /**
* Searches for data views in the data type of an accumulator and extracts them.
*/
public static List<DataViewSpec> extractDataViews(int aggIndex, DataType accumulatorDataType) {
final LogicalType accumulatorType = accumulatorDataType.getLogicalType();
if ((!accumulatorType.is(ROW)) && (!accumulatorType.is(STRUCTURED_TYPE))) {
return Collections.emptyList();
}
final List<String> v1 = getFieldNames(accumulatorType);
final List<DataType> fieldDataTypes = accumulatorDataType.getChildren();
final List<DataViewSpec> specs = new ArrayList<>();
for (int fieldIndex = 0; fieldIndex < fieldDataTypes.size(); fieldIndex++)
{
final DataType fieldDataType = fieldDataTypes.get(fieldIndex);
final LogicalType fieldType = fieldDataType.getLogicalType();
if (isDataView(fieldType, ListView.class)) {
specs.add(new ListViewSpec(createStateId(aggIndex, v1.get(fieldIndex)), fieldIndex, fieldDataType.getChildren().get(0)));
} else if (isDataView(fieldType, MapView.class)) {
specs.add(new MapViewSpec(createStateId(aggIndex, v1.get(fieldIndex)), fieldIndex, fieldDataType.getChildren().get(0),
false));
}
if (fieldType.getChildren().stream().anyMatch(c -> hasNested(c, t -> isDataView(t, DataView.class)))) {
throw new TableException("Data views are only supported in the first level of a composite accumulator type.");
}
}
return specs;
} | 3.26 |
flink_DataViewUtils_adjustDataViews_rdh | /**
* Modifies the data type of an accumulator regarding data views.
*
* <p>For performance reasons, each data view is wrapped into a RAW type which gives it {@link LazyBinaryFormat} semantics and avoids multiple deserialization steps during access.
* Furthermore, a data view will not be serialized if a state backend is used (the serializer of
* the RAW type will be a {@link NullSerializer} in this case).
*/
public static DataType adjustDataViews(DataType accumulatorDataType, boolean hasStateBackedDataViews) { final Function<DataType, TypeSerializer<?>> serializer;
if (hasStateBackedDataViews) {
serializer = dataType -> NullSerializer.INSTANCE;
} else {
serializer = ExternalSerializer::of;
}
return DataTypeUtils.transform(accumulatorDataType, new DataViewsTransformation(serializer));
} | 3.26 |
flink_TaskDeploymentDescriptorFactory_shouldOffload_rdh | /**
* Determine whether shuffle descriptors should be offloaded to blob server.
*
* @param shuffleDescriptorsToSerialize
* shuffle descriptors to serialize
* @param numConsumers
* how many consumers this serialized shuffle descriptor should be sent
* @return whether shuffle descriptors should be offloaded to blob server
*/
private boolean shouldOffload(ShuffleDescriptorAndIndex[] shuffleDescriptorsToSerialize, int numConsumers) {
return (shuffleDescriptorsToSerialize.length * numConsumers) >= offloadShuffleDescriptorsThreshold;
} | 3.26 |
flink_LatencyTrackingStateFactory_m0_rdh | /**
* Create latency tracking state if enabled.
*/
public static <K, N, V, S extends State> InternalKvState<K, N, ?> m0(InternalKvState<K, N, ?> kvState, StateDescriptor<S, V> stateDescriptor, LatencyTrackingStateConfig latencyTrackingStateConfig)
throws Exception {
if (latencyTrackingStateConfig.isEnabled()) {
return new LatencyTrackingStateFactory<>(kvState, stateDescriptor, latencyTrackingStateConfig).createState();
}
return kvState;
} | 3.26 |
flink_JavaRecordBuilderFactory_setField_rdh | /**
* Set record field by index. If parameter index mapping is provided, the index is mapped,
* otherwise it is used as is.
*
* @param i
* index of field to be set
* @param value
* field value
*/
void setField(int i, Object value) {
if (paramIndexMapping != null) {
args[paramIndexMapping[i]] = value;
} else {
args[i] = value;
}
} | 3.26 |
flink_SortPartitionOperator_m0_rdh | /**
* Returns whether using key selector or not.
*/
public boolean m0() {
return useKeySelector;
} | 3.26 |
flink_SortPartitionOperator_translateToDataFlow_rdh | // --------------------------------------------------------------------------------------------
// Translation
// --------------------------------------------------------------------------------------------
protected SingleInputOperator<?, T, ?> translateToDataFlow(Operator<T> input) {
String name =
"Sort at " + sortLocationName;
if (useKeySelector) {
return translateToDataFlowWithKeyExtractor(input, ((Keys.SelectorFunctionKeys<T, ?>) (keys.get(0))), orders.get(0), name);
}
// flatten sort key positions
List<Integer> allKeyPositions = new ArrayList<>();
List<Order> allOrders = new ArrayList<>();
for (int i = 0, length = keys.size(); i < length; i++) {
int[] sortKeyPositions = keys.get(i).computeLogicalKeyPositions();
Order order = orders.get(i);
for (int sortKeyPosition : sortKeyPositions) {
allKeyPositions.add(sortKeyPosition);
allOrders.add(order);
}
}
Ordering partitionOrdering = new Ordering();
for
(int i = 0, length = allKeyPositions.size(); i < length; i++) {
partitionOrdering.appendOrdering(allKeyPositions.get(i), null, allOrders.get(i));
}
// distinguish between partition types
UnaryOperatorInformation<T, T> operatorInfo = new UnaryOperatorInformation<>(getType(), getType());
SortPartitionOperatorBase<T> noop = new SortPartitionOperatorBase<>(operatorInfo, partitionOrdering, name);
noop.setInput(input);
if (this.getParallelism() < 0) {
// use parallelism of input if not explicitly specified
noop.setParallelism(input.getParallelism());
} else {
// use explicitly specified parallelism
noop.setParallelism(this.getParallelism());
}
return noop;
} | 3.26 |
flink_SortPartitionOperator_sortPartition_rdh | /**
* Appends an additional sort order with the specified field in the specified order to the local
* partition sorting of the DataSet.
*
* @param field
* The field expression referring to the field of the additional sort order of the
* local partition sorting.
* @param order
* The order of the additional sort order of the local partition sorting.
* @return The DataSet with sorted local partitions.
*/
public SortPartitionOperator<T>
sortPartition(String field, Order order) {
if (useKeySelector) {
throw new InvalidProgramException("Expression keys cannot be appended after a KeySelector");
}
ensureSortableKey(field);
keys.add(new Keys.ExpressionKeys<>(field, getType()));
orders.add(order);
return this;
} | 3.26 |
flink_BufferCompressor_compressToIntermediateBuffer_rdh | /**
* Compresses the given {@link Buffer} using {@link BlockCompressor}. The compressed data will
* be stored in the intermediate buffer of this {@link BufferCompressor} and returned to the
* caller. The caller must guarantee that the returned {@link Buffer} has been freed when
* calling the method next time.
*
* <p>Notes that the compression will always start from offset 0 to the size of the input {@link Buffer}.
*/
public Buffer compressToIntermediateBuffer(Buffer buffer) {
int compressedLen;
if ((compressedLen = compress(buffer)) == 0) {
return buffer;
}
internalBuffer.setCompressed(true);
internalBuffer.setSize(compressedLen);
return internalBuffer.retainBuffer();
} | 3.26 |
flink_BufferCompressor_compress_rdh | /**
* Compresses the given {@link Buffer} into the intermediate buffer and returns the compressed
* data size.
*/
private int compress(Buffer buffer) {
checkArgument(buffer != null, "The input buffer must not be null.");
checkArgument(buffer.isBuffer(), "Event can not be compressed.");
checkArgument(!buffer.isCompressed(), "Buffer already compressed.");
checkArgument(buffer.getReaderIndex() ==
0, "Reader index of the input buffer must be 0.");
checkArgument(buffer.readableBytes() > 0, "No data to be compressed.");
checkState(internalBuffer.refCnt() == 1, "Illegal reference count, buffer need to be released.");
try {
int compressedLen;
int length = buffer.getSize();
MemorySegment memorySegment = buffer.getMemorySegment();
// If buffer is on-heap, manipulate the underlying array directly. There are two main
// reasons why NIO buffer is not directly used here: One is that some compression
// libraries will use the underlying array for heap buffer, but our input buffer may be
// a read-only ByteBuffer, and it is illegal to access internal array. Another reason
// is that for the on-heap buffer, directly operating the underlying array can reduce
// additional overhead compared to generating a NIO buffer.
if (!memorySegment.isOffHeap()) {
compressedLen = blockCompressor.compress(memorySegment.getArray(), buffer.getMemorySegmentOffset(), length, internalBufferArray, 0);
} else {
// compress the given buffer into the internal heap buffer
compressedLen
=
blockCompressor.compress(buffer.getNioBuffer(0, length), 0, length, internalBuffer.getNioBuffer(0, internalBuffer.capacity()), 0);
}
return compressedLen < length ? compressedLen : 0;
} catch (Throwable throwable) {
// return the original buffer if failed to compress
return 0;
}
} | 3.26 |
flink_BufferCompressor_compressToOriginalBuffer_rdh | /**
* The difference between this method and {@link #compressToIntermediateBuffer(Buffer)} is that
* this method will copy the compressed data back to the input {@link Buffer} starting from
* offset 0.
*
* <p>The caller must guarantee that the input {@link Buffer} is writable.
*/
public Buffer compressToOriginalBuffer(Buffer buffer) {
int compressedLen;
if ((compressedLen = compress(buffer)) == 0) {
return buffer;
}
// copy the compressed data back
int memorySegmentOffset = buffer.getMemorySegmentOffset();
MemorySegment segment = buffer.getMemorySegment();
segment.put(memorySegmentOffset, internalBufferArray, 0, compressedLen);
return new ReadOnlySlicedNetworkBuffer(buffer.asByteBuf(), 0, compressedLen, memorySegmentOffset, true);
} | 3.26 |
flink_PhysicalFile_deleteIfNecessary_rdh | /**
* Delete this physical file if there is no reference count from logical files (all discarded),
* and this physical file is closed (no further writing on it).
*
* @throws IOException
* if anything goes wrong with file system.
*/
public void deleteIfNecessary() throws IOException {
synchronized(this) {
if (((!isOpen()) && (!deleted)) && (this.logicalFileRefCount.get() <= 0)) {
if (outputStream != null)
{try {
outputStream.close();
} catch (IOException e) {
LOG.warn("Fail to close output stream when deleting file: {}", filePath);
}
}
if (deleter != null) {
deleter.perform(filePath);
}
this.deleted = true;}
}
} | 3.26 |
flink_PhysicalFile_isOpen_rdh | /**
*
* @return whether this physical file is still open for writing.
*/
public boolean isOpen() {
return (!closed) && (outputStream != null);
} | 3.26 |
flink_PhysicalFile_innerClose_rdh | /**
* Close the physical file, stop reusing.
*
* @throws IOException
* if anything goes wrong with file system.
*/
private void innerClose() throws IOException {
closed = true;
if (outputStream != null) {
outputStream.close();
outputStream = null;}
} | 3.26 |
flink_DependencyTree_getKey_rdh | /**
* We don't use the {@link Dependency} as a key because we don't want lookups to be dependent on
* scope or the optional flag.
*
* @param dependency
* @return */
@VisibleForTesting
static String getKey(Dependency dependency) {
return (((((dependency.getGroupId() + ":") + dependency.getArtifactId()) + ":") + dependency.getVersion()) + ":") + dependency.getClassifier().orElse("(no-classifier)");
} | 3.26 |
flink_StreamSource_isCanceledOrStopped_rdh | /**
* Checks whether the source has been canceled or stopped.
*
* @return True, if the source is canceled or stopped, false is not.
*/
protected boolean isCanceledOrStopped() {
return canceledOrStopped;
} | 3.26 |
flink_StreamSource_markCanceledOrStopped_rdh | /**
* Marks this source as canceled or stopped.
*
* <p>This indicates that any exit of the {@link #run(Object, Output, OperatorChain)} method
* cannot be interpreted as the result of a finite source.
*/
protected void markCanceledOrStopped() {
this.canceledOrStopped = true;
} | 3.26 |
flink_LocatableInputSplit_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return this.f0;
} | 3.26 |
flink_LocatableInputSplit_getHostnames_rdh | /**
* Returns the names of the hosts storing the data this input split refers to
*
* @return the names of the hosts storing the data this input split refers to
*/
public String[] getHostnames() {
return this.hostnames;
} | 3.26 |
flink_BinaryRawValueData_toObject_rdh | // ------------------------------------------------------------------------------------------
// Public Interfaces
// ------------------------------------------------------------------------------------------
@Override
public T toObject(TypeSerializer<T> serializer) {
if (javaObject == null) {
try {
javaObject = InstantiationUtil.deserializeFromByteArray(serializer, m0(serializer));
} catch (IOException e) {
throw new FlinkRuntimeException(e);
}
}
return javaObject;
} | 3.26 |
flink_BinaryRawValueData_materialize_rdh | // ------------------------------------------------------------------------------------
// Internal methods
// ------------------------------------------------------------------------------------
@Override
protected BinarySection materialize(TypeSerializer<T> serializer) {
try {
byte[] bytes = InstantiationUtil.serializeToByteArray(serializer, javaObject);
return new BinarySection(new MemorySegment[]{ MemorySegmentFactory.wrap(bytes) }, 0, bytes.length);
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
flink_BinaryRawValueData_fromBytes_rdh | /**
* Creates a {@link BinaryStringData} instance from the given bytes with offset and number of
* bytes.
*/
public static <T> BinaryRawValueData<T> fromBytes(byte[]
bytes, int offset, int numBytes) {
return new BinaryRawValueData<>(new MemorySegment[]{ MemorySegmentFactory.wrap(bytes) }, offset, numBytes);
} | 3.26 |
flink_BinaryRawValueData_fromObject_rdh | // ------------------------------------------------------------------------------------------
// Construction Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates a {@link BinaryRawValueData} instance from the given Java object.
*/
public static <T> BinaryRawValueData<T> fromObject(T javaObject) {
if (javaObject == null) {return null;
}
return new BinaryRawValueData<>(javaObject);
} | 3.26 |
flink_OneInputStateTransformation_transform_rdh | /**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory
* A factory returning transformation logic type of the return stream
* @return An {@link StateBootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public StateBootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) {
return new
StateBootstrapTransformation<>(stream, operatorMaxParallelism, factory);
} | 3.26 |
flink_OneInputStateTransformation_keyBy_rdh | /**
* Partitions the operator state of a {@link OperatorTransformation} using field expressions. A
* field expression is either the name of a public field or a getter method with parentheses of
* the {@code OperatorTransformation}'s underlying type. A dot can be used to drill down into
* objects, as in {@code "field1.getInnerField2()"}.
*
* @param fields
* One or more field expressions on which the state of the {@link OperatorTransformation} operators will be partitioned.
* @return The {@code OperatorTransformation} with partitioned state (i.e. KeyedStream)
*/
| 3.26 |
flink_OneInputStateTransformation_m0_rdh | /**
* It creates a new {@link KeyedOperatorTransformation} that uses the provided key for
* partitioning its operator states.
*
* @param keySelector
* The KeySelector to be used for extracting the key for partitioning.
* @return The {@code BootstrapTransformation} with partitioned state.
*/
public <K> KeyedStateTransformation<K, T> m0(KeySelector<T, K> keySelector) {
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keySelector, stream.getType());
return new KeyedStateTransformation<>(stream, operatorMaxParallelism, keySelector, keyType);
} | 3.26 |
flink_OneInputStateTransformation_setMaxParallelism_rdh | /**
* Sets the maximum parallelism of this operator.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
* number of key groups used for partitioned state.
*
* @param maxParallelism
* Maximum parallelism
* @return The operator with set maximum parallelism
*/
@PublicEvolving
public OneInputStateTransformation<T> setMaxParallelism(int maxParallelism) {
this.operatorMaxParallelism = OptionalInt.of(maxParallelism);
return this;
} | 3.26 |
flink_PythonTableUtils_createTableFromElement_rdh | /**
* Create a table from {@link PythonDynamicTableSource} that read data from input file with
* specific {@link DataType}.
*
* @param tEnv
* The TableEnvironment to create table.
* @param filePath
* the file path of the input data.
* @param schema
* The python data type.
* @param batched
* Whether to read data in a batch
* @return Table with InputFormat.
*/
public static Table createTableFromElement(TableEnvironment tEnv, String filePath, DataType schema, boolean batched) {
TableDescriptor.Builder builder = TableDescriptor.forConnector(PythonDynamicTableFactory.IDENTIFIER).option(PythonDynamicTableOptions.INPUT_FILE_PATH, filePath).option(PythonDynamicTableOptions.BATCH_MODE, batched).schema(Schema.newBuilder().fromRowDataType(schema).build());
return tEnv.from(builder.build());
} | 3.26 |
flink_CastRule_canFail_rdh | /**
* Returns true if the {@link CastExecutor} can fail at runtime.
*/default boolean
canFail(LogicalType inputLogicalType, LogicalType targetLogicalType) {
return false;
} | 3.26 |
flink_CastRule_create_rdh | /**
* Create a casting context.
*/
static Context create(boolean isPrinting, boolean legacyBehaviour, ZoneId zoneId,
ClassLoader classLoader) {
return new Context() {
@Override
public boolean isPrinting() {
return isPrinting;
}
@Override
public boolean legacyBehaviour() {
return legacyBehaviour;
}
@Override
public ZoneId getSessionZoneId() {
return zoneId;
}
@Override
public ClassLoader getClassLoader() {
return classLoader;
}
};
} | 3.26 |
flink_ProcessPythonEnvironmentManager_getBootLog_rdh | /**
* Returns the boot log of the Python Environment.
*/
public String getBootLog() throws Exception {
File bootLogFile = new File((resource.baseDirectory + File.separator) + "flink-python-udf-boot.log");
String v5 = "Failed to create stage bundle factory!";
if (bootLogFile.exists()) {
byte[] output = Files.readAllBytes(bootLogFile.toPath());
v5 += String.format(" %s", new String(output, Charset.defaultCharset()));
}return v5;
} | 3.26 |
flink_ProcessPythonEnvironmentManager_createRetrievalToken_rdh | /**
* Returns an empty RetrievalToken because no files will be transmit via ArtifactService in
* process mode.
*
* @return The path of empty RetrievalToken.
*/
public String createRetrievalToken() throws IOException {
File retrievalToken = new File(resource.baseDirectory, ("retrieval_token_" + UUID.randomUUID().toString()) + ".json");
if (retrievalToken.createNewFile()) {
final DataOutputStream dos = new DataOutputStream(new FileOutputStream(retrievalToken));
dos.writeBytes("{\"manifest\": {}}");
dos.flush();dos.close();
return retrievalToken.getAbsolutePath();
} else {
throw new IOException("Could not create the RetrievalToken file: " + retrievalToken.getAbsolutePath());
}
} | 3.26 |
flink_CoFeedbackTransformation_addFeedbackEdge_rdh | /**
* Adds a feedback edge. The parallelism of the {@code Transformation} must match the
* parallelism of the input {@code Transformation} of the upstream {@code Transformation}.
*
* @param transform
* The new feedback {@code Transformation}.
*/
public void addFeedbackEdge(Transformation<F> transform) {
if (transform.getParallelism() != this.getParallelism()) {
throw new UnsupportedOperationException(((("Parallelism of the feedback stream must match the parallelism of the original" + " stream. Parallelism of original stream: ") + this.getParallelism()) + "; parallelism of feedback stream: ") + transform.getParallelism());
}
f0.add(transform);
} | 3.26 |
flink_CoFeedbackTransformation_getWaitTime_rdh | /**
* Returns the wait time. This is the amount of time that the feedback operator keeps listening
* for feedback elements. Once the time expires the operation will close and will not receive
* further elements.
*/
public Long getWaitTime() {
return waitTime;
} | 3.26 |
flink_CoFeedbackTransformation_getFeedbackEdges_rdh | /**
* Returns the list of feedback {@code Transformations}.
*/public List<Transformation<F>> getFeedbackEdges() {
return f0;
} | 3.26 |
flink_HiveInspectors_toInspectors_rdh | /**
* Get an array of ObjectInspector from the give array of args and their types.
*/
public static ObjectInspector[] toInspectors(HiveShim hiveShim, Object[] args, DataType[] argTypes) {
assert
args.length == argTypes.length;
ObjectInspector[] argumentInspectors = new ObjectInspector[argTypes.length];
for (int i = 0; i < argTypes.length; i++) {
Object constant = args[i];
if (constant == null) {
argumentInspectors[i] = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(HiveTypeUtil.toHiveTypeInfo(argTypes[i], false));
} else {
PrimitiveTypeInfo primitiveTypeInfo = ((PrimitiveTypeInfo) (HiveTypeUtil.toHiveTypeInfo(argTypes[i], false)));
constant = getConversion(getObjectInspector(primitiveTypeInfo), argTypes[i].getLogicalType(), hiveShim).toHiveObject(constant);
argumentInspectors[i] = getObjectInspectorForPrimitiveConstant(primitiveTypeInfo, constant, hiveShim);
}
}
return argumentInspectors;
} | 3.26 |
flink_HiveInspectors_getConversion_rdh | /**
* Get conversion for converting Flink object to Hive object from an ObjectInspector and the
* corresponding Flink DataType.
*/
public static HiveObjectConversion getConversion(ObjectInspector inspector, LogicalType dataType, HiveShim hiveShim) {
if (inspector instanceof PrimitiveObjectInspector) {
HiveObjectConversion conversion;
if ((((((((((inspector instanceof BooleanObjectInspector) || (inspector instanceof StringObjectInspector)) || (inspector instanceof ByteObjectInspector)) || (inspector instanceof ShortObjectInspector)) || (inspector instanceof IntObjectInspector))
|| (inspector instanceof LongObjectInspector))
|| (inspector instanceof FloatObjectInspector)) || (inspector
instanceof DoubleObjectInspector)) || (inspector instanceof BinaryObjectInspector)) || (inspector instanceof VoidObjectInspector)) {conversion = IdentityConversion.INSTANCE;
} else if (inspector instanceof DateObjectInspector) {
conversion = hiveShim::toHiveDate;
} else if (inspector instanceof TimestampObjectInspector) {
conversion = hiveShim::toHiveTimestamp;
} else if (inspector instanceof HiveCharObjectInspector) {
conversion = o -> o ==
null ? null : new HiveChar(((String) (o)), ((CharType) (dataType)).getLength());
} else if (inspector instanceof HiveVarcharObjectInspector) {
conversion = o -> o == null ? null : new HiveVarchar(((String) (o)), ((VarCharType) (dataType)).getLength());
} else if (inspector instanceof HiveDecimalObjectInspector) {
conversion = o -> o == null ? null : HiveDecimal.create(((BigDecimal) (o)));
} else if (inspector instanceof HiveIntervalYearMonthObjectInspector) {
conversion = o
-> {
if (o == null) {
return null;
} else {
Period period = ((Period) (o));
return new HiveIntervalYearMonth(period.getYears(), period.getMonths());
}
};
} else if (inspector instanceof HiveIntervalDayTimeObjectInspector) {
conversion = o -> {
if (o == null) {
return null;
} else {
Duration duration = ((Duration) (o));
return new HiveIntervalDayTime(duration.getSeconds(), duration.getNano());
}
};
} else {
throw new FlinkHiveUDFException("Unsupported primitive object inspector " + inspector.getClass().getName());
}
// if the object inspector prefers Writable objects, we should add an extra conversion
// for that
// currently this happens for constant arguments for UDFs
if (((PrimitiveObjectInspector) (inspector)).preferWritable()) {
conversion = new WritableHiveObjectConversion(conversion, hiveShim);
}
return conversion;
}
if (inspector
instanceof ListObjectInspector) {
HiveObjectConversion eleConvert = getConversion(((ListObjectInspector) (inspector)).getListElementObjectInspector(), ((ArrayType) (dataType)).getElementType(), hiveShim);
return o -> {
if (o == null) {
return null;
}
Object[] array = ((Object[]) (o));
List<Object> result = new ArrayList<>();
for (Object
v14 : array) {
result.add(eleConvert.toHiveObject(v14));
}
return result;
};
}
if (inspector instanceof MapObjectInspector) {
MapObjectInspector v15 = ((MapObjectInspector) (inspector));
MapType kvType = ((MapType) (dataType));
HiveObjectConversion keyConversion = getConversion(v15.getMapKeyObjectInspector(), kvType.getKeyType(), hiveShim);
HiveObjectConversion valueConversion = getConversion(v15.getMapValueObjectInspector(), kvType.getValueType(), hiveShim);
return o -> {
if (o == null) {
return null;
}
Map<Object, Object> map = ((Map) (o));
Map<Object, Object> result = CollectionUtil.newHashMapWithExpectedSize(map.size());
for (Map.Entry<Object, Object> entry : map.entrySet()) { result.put(keyConversion.toHiveObject(entry.getKey()), valueConversion.toHiveObject(entry.getValue()));
}
return result;
};
}
if (inspector instanceof StructObjectInspector) {
StructObjectInspector structInspector = ((StructObjectInspector) (inspector));
List<? extends StructField> structFields = structInspector.getAllStructFieldRefs();
List<RowType.RowField> rowFields = ((RowType) (dataType)).getFields();
HiveObjectConversion[] conversions = new HiveObjectConversion[structFields.size()];
for (int i = 0; i < structFields.size(); i++) {
conversions[i] = getConversion(structFields.get(i).getFieldObjectInspector(), rowFields.get(i).getType(), hiveShim);
}
return o -> {
if (o == null) {return null;
}
Row row =
((Row) (o));
List<Object> result = new ArrayList<>(row.getArity());
for (int i = 0; i < row.getArity(); i++) {
result.add(conversions[i].toHiveObject(row.getField(i)));
}
return result;
};
}
throw new FlinkHiveUDFException(String.format("Flink doesn't support convert object conversion for %s yet", inspector));
} | 3.26 |
flink_HiveInspectors_getArgInspectors_rdh | /**
* Get object inspector for each function argument.
*/
public static ObjectInspector[] getArgInspectors(HiveShim hiveShim, HiveFunctionArguments arguments) {
ObjectInspector[] inspectors = new ObjectInspector[arguments.size()];
for (int i = 0; i < inspectors.length; i++) {
if (arguments.isLiteral(i)) {
Object constant = arguments.getArg(i);
PrimitiveTypeInfo primitiveTypeInfo = ((PrimitiveTypeInfo) (HiveTypeUtil.toHiveTypeInfo(arguments.getDataType(i), false)));
constant = getConversion(getObjectInspector(primitiveTypeInfo), arguments.getDataType(i).getLogicalType(), hiveShim).toHiveObject(constant);
inspectors[i] = getObjectInspectorForPrimitiveConstant(primitiveTypeInfo, constant, hiveShim);
} else {
inspectors[i] = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(HiveTypeUtil.toHiveTypeInfo(arguments.getDataType(i), false));
}}
return inspectors;
} | 3.26 |
flink_HiveInspectors_getObjectInspector_rdh | /**
* Get Hive {@link ObjectInspector} for a Flink {@link LogicalType}.
*/
public static ObjectInspector getObjectInspector(LogicalType flinkType) {
return getObjectInspector(HiveTypeUtil.toHiveTypeInfo(flinkType, true));
} | 3.26 |
flink_RetryRule_evaluate_rdh | /**
* Retry a test in case of a failure with a specific exception.
*
* @throws Throwable
*/
@Override
public void evaluate() throws Throwable {
for (int currentRun = 0; currentRun <= timesOnFailure; currentRun++) {try {
statement.evaluate();
break;// success
} catch (Throwable t) {if ((expectedException != null) && expectedException.isAssignableFrom(t.getClass())) {
throw t;
}
LOG.warn(String.format("Test run failed (%d/%d).", currentRun, timesOnFailure + 1), t);
if ((!exceptionClass.isAssignableFrom(t.getClass())) || (currentRun >= timesOnFailure)) {
// Throw the failure if retried too often, or if it is the wrong exception
throw t;
}}
}
} | 3.26 |
flink_MapNode_computeOperatorSpecificDefaultEstimates_rdh | /**
* Computes the estimates for the Map operator. We assume that by default, Map takes one value
* and transforms it into another value. The cardinality consequently stays the same.
*/
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
this.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();} | 3.26 |
flink_SlidingEventTimeWindows_of_rdh | /**
* Creates a new {@code SlidingEventTimeWindows} {@link WindowAssigner} that assigns elements to
* time windows based on the element timestamp and offset.
*
* <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of
* each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time
* windows start at 0:15:00,1:15:00,2:15:00,etc.
*
* <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as
* China which is using UTC+08:00,and you want a time window with size of one day, and window
* begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}.
* The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than
* UTC time.
*
* @param size
* The size of the generated windows.
* @param slide
* The slide interval of the generated windows.
* @param offset
* The offset which window start would be shifted by.
* @return The time policy.
*/public static SlidingEventTimeWindows of(Time size, Time slide, Time offset) {
return new SlidingEventTimeWindows(size.toMilliseconds(), slide.toMilliseconds(),
offset.toMilliseconds());
} | 3.26 |
flink_ListStateDescriptor_getElementSerializer_rdh | /**
* Gets the serializer for the elements contained in the list.
*
* @return The serializer for the elements in the list.
*/
public TypeSerializer<T> getElementSerializer() {
// call getSerializer() here to get the initialization check and proper error message
final TypeSerializer<List<T>> rawSerializer = getSerializer();
if (!(rawSerializer instanceof ListSerializer)) {
throw new IllegalStateException();
}
return ((ListSerializer<T>) (rawSerializer)).getElementSerializer();
} | 3.26 |
flink_AbstractExternalDataStreamPythonFunctionOperator_getOutputTagById_rdh | // ----------------------------------------------------------------------
// Side outputs
// ----------------------------------------------------------------------
protected OutputTag<?> getOutputTagById(String id) {
Preconditions.checkArgument(sideOutputTags.containsKey(id));
return sideOutputTags.get(id);
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_trigger_rdh | /**
* Triggers the next queued runnable and executes it synchronously. This method throws an
* exception if no Runnable is currently queued.
*/
public void trigger() {
final Runnable next;
synchronized(queuedRunnables) {
next = queuedRunnables.removeFirst();}
next.run();
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_numQueuedRunnables_rdh | /**
* Gets the number of Runnables currently queued.
*/
public int numQueuedRunnables() {
synchronized(queuedRunnables) {
return queuedRunnables.size();
}
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_triggerNonPeriodicScheduledTasksWithRecursion_rdh | /**
* Triggers all non-periodically scheduled tasks. In contrast to {@link #triggerNonPeriodicScheduledTasks()}, if such a task schedules another non-periodically
* schedule task, then this new task will also be triggered.
*/
public void triggerNonPeriodicScheduledTasksWithRecursion() {
while (!nonPeriodicScheduledTasks.isEmpty()) {
final ScheduledTask<?> scheduledTask = nonPeriodicScheduledTasks.poll();
if (!scheduledTask.isCancelled()) {
scheduledTask.execute();
}
}
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_triggerNonPeriodicScheduledTask_rdh | /**
* Triggers a single non-periodically scheduled task.
*
* @throws NoSuchElementException
* If there is no such task.
*/
public void triggerNonPeriodicScheduledTask() {
final ScheduledTask<?> poll = nonPeriodicScheduledTasks.remove();
if (poll != null) {
poll.execute();
}
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_shutdown_rdh | // ------------------------------------------------------------------------
// service shutdown
// ------------------------------------------------------------------------
@Override
public void shutdown() {
shutdown = true;
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_execute_rdh | // ------------------------------------------------------------------------
// (scheduled) execution
// ------------------------------------------------------------------------
@Override
public void execute(@Nonnull
Runnable command) {
synchronized(queuedRunnables) {
queuedRunnables.addLast(command);
} } | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_triggerAllNonPeriodicTasks_rdh | // ------------------------------------------------------------------------
// Execution triggering and access to the queued tasks
// ------------------------------------------------------------------------
/**
* Executes all runnable and scheduled non-periodic tasks until none are left to run. This is
* essentially a combination of {@link #triggerAll()} and {@link #triggerNonPeriodicScheduledTasks()} that allows making a test agnostic of how exactly a
* runnable is passed to the executor.
*/
public void triggerAllNonPeriodicTasks() {
while ((numQueuedRunnables() > 0) || (!nonPeriodicScheduledTasks.isEmpty())) {
triggerAll();
triggerNonPeriodicScheduledTasks();}
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_triggerScheduledTasks_rdh | /**
* Triggers all registered tasks.
*/
public void triggerScheduledTasks() {
triggerPeriodicScheduledTasks();triggerNonPeriodicScheduledTasks();
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_submit_rdh | // ------------------------------------------------------------------------
// non-implemented future task methods
// ------------------------------------------------------------------------
@Override
public <T> Future<T> submit(Callable<T> task) {
throw new UnsupportedOperationException();
} | 3.26 |
flink_ManuallyTriggeredScheduledExecutorService_triggerAll_rdh | /**
* Triggers all {@code queuedRunnables}.
*/
public void triggerAll() {
while (numQueuedRunnables() > 0) {trigger();
}
} | 3.26 |
flink_ClusterEntrypoint_runClusterEntrypoint_rdh | // Helper methods
// --------------------------------------------------
public static void runClusterEntrypoint(ClusterEntrypoint clusterEntrypoint) {
final String clusterEntrypointName = clusterEntrypoint.getClass().getSimpleName();
try {
clusterEntrypoint.startCluster();
} catch (ClusterEntrypointException e) {
LOG.error(String.format("Could not start cluster entrypoint %s.",
clusterEntrypointName), e);
System.exit(STARTUP_FAILURE_RETURN_CODE);
}
int returnCode;
Throwable throwable = null;
try {
returnCode = clusterEntrypoint.getTerminationFuture().get().processExitCode();
} catch (Throwable e) {
throwable = ExceptionUtils.stripExecutionException(e);
returnCode = RUNTIME_FAILURE_RETURN_CODE;
}
LOG.info("Terminating cluster entrypoint process {} with exit code {}.", clusterEntrypointName, returnCode, throwable);
System.exit(returnCode);
} | 3.26 |
flink_ClusterEntrypoint_closeClusterComponent_rdh | /**
* Close cluster components and deregister the Flink application from the resource management
* system by signalling the {@link ResourceManager}.
*
* @param applicationStatus
* to terminate the application with
* @param shutdownBehaviour
* shutdown behaviour
* @param diagnostics
* additional information about the shut down, can be {@code null}
* @return Future which is completed once the shut down
*/
private CompletableFuture<Void> closeClusterComponent(ApplicationStatus applicationStatus, ShutdownBehaviour shutdownBehaviour, @Nullable
String diagnostics) {
synchronized(lock) {
if (clusterComponent != null) {
switch (shutdownBehaviour) {
case GRACEFUL_SHUTDOWN :
return clusterComponent.stopApplication(applicationStatus, diagnostics);
case PROCESS_FAILURE :
default :
return clusterComponent.stopProcess();
}
} else {
return CompletableFuture.completedFuture(null);
} }
}
/**
* Clean up of temporary directories created by the {@link ClusterEntrypoint} | 3.26 |
flink_ClusterEntrypoint_m0_rdh | /**
* Returns the port range for the common {@link RpcService}.
*
* @param configuration
* to extract the port range from
* @return Port range for the common {@link RpcService}
*/
protected String
m0(Configuration configuration) {
if (ZooKeeperUtils.isZooKeeperRecoveryMode(configuration)) {
return configuration.getString(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE);
} else {
return String.valueOf(configuration.getInteger(JobManagerOptions.PORT));
}
} | 3.26 |
flink_ClusterEntrypoint_generateClusterConfiguration_rdh | // --------------------------------------------------
// Internal methods
// --------------------------------------------------
private Configuration generateClusterConfiguration(Configuration configuration) {
final Configuration resultConfiguration = new Configuration(Preconditions.checkNotNull(configuration));
final String webTmpDir = configuration.getString(WebOptions.TMP_DIR);
final File uniqueWebTmpDir = new File(webTmpDir, "flink-web-" + UUID.randomUUID());
resultConfiguration.setString(WebOptions.TMP_DIR, uniqueWebTmpDir.getAbsolutePath());
return resultConfiguration;
} | 3.26 |
flink_NettyShuffleEnvironment_close_rdh | /**
* Tries to shut down all network I/O components.
*/
@Override
public void close() {
synchronized(lock) {
if (isClosed) {
return;
}
LOG.info("Shutting down the network environment and its components.");
// terminate all network connections
try {
LOG.debug("Shutting down network connection manager");
connectionManager.shutdown();
} catch (Throwable t) {
LOG.warn("Cannot shut down the network connection manager.", t);
}
// shutdown all intermediate results
try {
LOG.debug("Shutting down intermediate result partition manager");
resultPartitionManager.shutdown();
} catch (Throwable t) {
LOG.warn("Cannot shut down the result partition manager.", t);
}// make sure that the global buffer pool re-acquires all buffers
try {
networkBufferPool.destroyAllBufferPools();
} catch (Throwable t) {
LOG.warn("Could not destroy all buffer pools.", t);
}// destroy the buffer pool
try {
networkBufferPool.destroy();
} catch (Throwable t) {
LOG.warn("Network buffer pool did not shut down properly.", t);
}
// delete all the temp directories
try {
fileChannelManager.close();
} catch (Throwable t) {
LOG.warn("Cannot close the file channel manager properly.", t);
}
try {
gracefulShutdown(10, TimeUnit.SECONDS, batchShuffleReadIOExecutor);
} catch (Throwable t) {
LOG.warn("Cannot shut down batch shuffle read IO executor properly.", t);
}
try {
batchShuffleReadBufferPool.destroy();
} catch (Throwable t) {
LOG.warn("Cannot shut down batch shuffle read buffer pool properly.", t);
}
isClosed = true;
}
} | 3.26 |
flink_NettyShuffleEnvironment_getResultPartitionManager_rdh | // Properties
// --------------------------------------------------------------------------------------------
@VisibleForTesting
public ResultPartitionManager getResultPartitionManager() {
return resultPartitionManager;
} | 3.26 |
flink_NettyShuffleEnvironment_registerLegacyNetworkMetrics_rdh | /**
* Registers legacy network metric groups before shuffle service refactoring.
*
* <p>Registers legacy metric groups if shuffle service implementation is original default one.
*
* @deprecated should be removed in future
*/
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public void registerLegacyNetworkMetrics(MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) {NettyShuffleMetricFactory.registerLegacyNetworkMetrics(config.isNetworkDetailedMetrics(), metricGroup, producedPartitions, inputGates);
} | 3.26 |
flink_DynamicPartitionPruningUtils_isNewSource_rdh | /**
* Returns true if the source is FLIP-27 source, else false.
*/
private static boolean isNewSource(ScanTableSource scanTableSource) {
ScanTableSource.ScanRuntimeProvider provider = scanTableSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
if (provider instanceof SourceProvider) {
return true;
} else if (provider instanceof TransformationScanProvider) {
Transformation<?> transformation
= ((TransformationScanProvider) (provider)).createTransformation(name -> Optional.empty());
return transformation instanceof SourceTransformation;
} else if (provider instanceof DataStreamScanProvider) {
// Suppose DataStreamScanProvider of sources that support dynamic filtering will use
// new Source. It's not reliable and should be checked.
// TODO FLINK-28864 check if the source used by the DataStreamScanProvider is
// actually a new source. This situation will not generate wrong result because it's
// handled when translating BatchTableSourceScan. The only effect is the physical
// plan and the exec node plan have DPP nodes, but they do not work in runtime.
return true;
}
// TODO supports more
return false;
} | 3.26 |
flink_DynamicPartitionPruningUtils_isSuitableJoin_rdh | /**
* Judge whether the join node is suitable one for dpp pattern.
*/
public static boolean isSuitableJoin(Join join) {
// Now dynamic partition pruning supports left/right join, inner and semi
// join. but now semi join can not join reorder.
if ((((join.getJoinType() != JoinRelType.INNER) && (join.getJoinType() != JoinRelType.SEMI)) && (join.getJoinType() != JoinRelType.LEFT)) && (join.getJoinType() != JoinRelType.RIGHT)) {
return false;
}
JoinInfo v2 = join.analyzeCondition();
return !v2.leftKeys.isEmpty();
} | 3.26 |
flink_DynamicPartitionPruningUtils_canConvertAndConvertDppFactSide_rdh | /**
* Judge whether the input RelNode can be converted to the dpp fact side. If the input RelNode
* can be converted, this method will return the converted fact side whose partitioned table
* source will be converted to {@link BatchPhysicalDynamicFilteringTableSourceScan}, If not,
* this method will return the origin RelNode.
*/
public static Tuple2<Boolean, RelNode> canConvertAndConvertDppFactSide(RelNode rel, ImmutableIntList joinKeys, RelNode dimSide, ImmutableIntList dimSideJoinKey) {
DppFactSideChecker dppFactSideChecker = new DppFactSideChecker(rel, joinKeys, dimSide, dimSideJoinKey);
return dppFactSideChecker.canConvertAndConvertDppFactSide();
} | 3.26 |
flink_DynamicPartitionPruningUtils_visitDimSide_rdh | /**
* Visit dim side to judge whether dim side has filter condition and whether dim side's
* source table scan is non partitioned scan.
*/
private void visitDimSide(RelNode rel) {
// TODO Let visitDimSide more efficient and more accurate. Like a filter on dim table or
// a filter for the partition field on fact table.
if (rel instanceof TableScan) {
TableScan scan = ((TableScan) (rel));
TableSourceTable table = scan.getTable().unwrap(TableSourceTable.class);if (table == null) {
return;
}
if (((!hasFilter) && (table.abilitySpecs() != null)) && (table.abilitySpecs().length != 0)) {
for (SourceAbilitySpec spec : table.abilitySpecs()) {
if (spec instanceof FilterPushDownSpec) {
List<RexNode> predicates = ((FilterPushDownSpec) (spec)).getPredicates();
for (RexNode predicate : predicates) {
if (isSuitableFilter(predicate)) {
hasFilter = true;
}
}
}
}
}
CatalogTable catalogTable = table.contextResolvedTable().getResolvedTable();
if
(catalogTable.isPartitioned()) {
hasPartitionedScan = true;return;
}
// To ensure there is only one source on the dim side.
setTables(table.contextResolvedTable());
} else if (rel instanceof HepRelVertex) {
visitDimSide(((HepRelVertex) (rel)).getCurrentRel());
} else if ((rel instanceof Exchange) || (rel instanceof Project)) {
visitDimSide(rel.getInput(0));} else if (rel instanceof Calc) {
RexProgram origProgram = ((Calc) (rel)).getProgram();
if ((origProgram.getCondition() != null) && isSuitableFilter(origProgram.expandLocalRef(origProgram.getCondition()))) {
hasFilter = true;}
visitDimSide(rel.getInput(0));
} else if (rel instanceof Filter) {
if (isSuitableFilter(((Filter) (rel)).getCondition())) {
hasFilter = true;
}
visitDimSide(rel.getInput(0));
} else if (rel instanceof Join) {
Join join = ((Join) (rel));
visitDimSide(join.getLeft());
visitDimSide(join.getRight());
} else if (rel instanceof BatchPhysicalGroupAggregateBase) {visitDimSide(((BatchPhysicalGroupAggregateBase) (rel)).getInput());
} else if
(rel instanceof
Union) {
Union union = ((Union) (rel));
for (RelNode input : union.getInputs()) {
visitDimSide(input);
}
}
} | 3.26 |
flink_DynamicPartitionPruningUtils_isSuitableFilter_rdh | /**
* Not all filter condition suitable for using to filter partitions by dynamic partition
* pruning rules. For example, NOT NULL can only filter one default partition which have a
* small impact on filtering data.
*/
private static boolean isSuitableFilter(RexNode filterCondition) {
switch (filterCondition.getKind()) {
case AND :
List<RexNode> conjunctions = RelOptUtil.conjunctions(filterCondition);
return isSuitableFilter(conjunctions.get(0)) || isSuitableFilter(conjunctions.get(1));case
OR :
List<RexNode> disjunctions = RelOptUtil.disjunctions(filterCondition);
return isSuitableFilter(disjunctions.get(0)) && isSuitableFilter(disjunctions.get(1));
case NOT :
return isSuitableFilter(((RexCall) (filterCondition)).operands.get(0));
case EQUALS :
case GREATER_THAN :
case GREATER_THAN_OR_EQUAL :case LESS_THAN :case LESS_THAN_OR_EQUAL :
case NOT_EQUALS :
case IN :
case LIKE :
case CONTAINS :
case SEARCH :
case IS_FALSE :
case IS_NOT_FALSE :
case IS_NOT_TRUE :
case IS_TRUE :
// TODO adding more suitable filters which can filter enough partitions after
// using this filter in dynamic partition pruning.
return true;
default :
return
false;
}
} | 3.26 |
flink_CompilerHints_addUniqueFields_rdh | /**
* Adds multiple FieldSets to be unique
*
* @param uniqueFieldSets
* A set of unique FieldSet
*/
public void addUniqueFields(Set<FieldSet> uniqueFieldSets) {
if (this.uniqueFields == null)
{
this.uniqueFields = new HashSet<FieldSet>();
}
this.uniqueFields.addAll(uniqueFieldSets);
} | 3.26 |
flink_CompilerHints_getUniqueFields_rdh | // --------------------------------------------------------------------------------------------
// Uniqueness
// --------------------------------------------------------------------------------------------
/**
* Gets the FieldSets that are unique
*
* @return List of FieldSet that are unique
*/
public Set<FieldSet> getUniqueFields() {
return this.uniqueFields;
} | 3.26 |
flink_CompilerHints_copyFrom_rdh | // --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
protected void copyFrom(CompilerHints source) {
this.outputSize = source.outputSize;
this.outputCardinality = source.outputCardinality;
this.avgOutputRecordSize = source.avgOutputRecordSize;this.filterFactor = source.filterFactor;
if ((source.uniqueFields != null) && (source.uniqueFields.size() > 0)) {
if (this.uniqueFields == null) {
this.uniqueFields = new HashSet<FieldSet>();
} else {
this.uniqueFields.clear();
}
this.uniqueFields.addAll(source.uniqueFields);
}} | 3.26 |
flink_CompilerHints_getOutputSize_rdh | // --------------------------------------------------------------------------------------------
// Basic Record Statistics
// --------------------------------------------------------------------------------------------
public long getOutputSize() {
return
outputSize;
} | 3.26 |
flink_CompilerHints_addUniqueField_rdh | /**
* Adds a field as having only unique values.
*
* @param field
* The field with unique values.
*/
public void addUniqueField(int field) {
if (this.uniqueFields == null) {
this.uniqueFields = new HashSet<FieldSet>();
}
this.uniqueFields.add(new FieldSet(field));
} | 3.26 |
flink_InputPriorityGraphGenerator_calculatePipelinedAncestors_rdh | /**
* Find the ancestors by going through PIPELINED edges.
*/
@VisibleForTesting
List<ExecNode<?>> calculatePipelinedAncestors(ExecNode<?> node) {
List<ExecNode<?>> ret = new ArrayList<>();
AbstractExecNodeExactlyOnceVisitor ancestorVisitor = new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void m0(ExecNode<?> node) {
boolean v18 = false;if (!boundaries.contains(node))
{
List<InputProperty> inputProperties = node.getInputProperties();
for (int i = 0; i < inputProperties.size(); i++) {
// we only go through PIPELINED edges
if (inputProperties.get(i).getDamBehavior().stricterOrEqual(f0)) {
continue;
}
v18 = true;
node.getInputEdges().get(i).getSource().accept(this);
}
}
if (!v18) {
ret.add(node);
}
}
};
node.accept(ancestorVisitor);
return ret;
} | 3.26 |
flink_TaskExecutionState_getID_rdh | /**
* Returns the ID of the task this result belongs to
*
* @return the ID of the task this result belongs to
*/public ExecutionAttemptID getID() {
return this.executionId;
} | 3.26 |
flink_TaskExecutionState_getAccumulators_rdh | /**
* Gets flink and user-defined accumulators in serialized form.
*/
public AccumulatorSnapshot getAccumulators() {
return accumulators;
} | 3.26 |
flink_TaskExecutionState_getExecutionState_rdh | /**
* Returns the new execution state of the task.
*
* @return the new execution state of the task
*/
public ExecutionState getExecutionState() {
return this.executionState;
} | 3.26 |
flink_TaskExecutionState_equals_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (obj instanceof TaskExecutionState) {
TaskExecutionState v0 = ((TaskExecutionState) (obj));
return (v0.executionId.equals(this.executionId) && (v0.executionState == this.executionState)) && ((v0.throwable == null) == (this.throwable == null));
} else {
return false;
}
} | 3.26 |
flink_TaskExecutionState_getError_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the attached exception, which is in serialized form. Returns null, if the status update
* is no failure with an associated exception.
*
* @param userCodeClassloader
* The classloader that can resolve user-defined exceptions.
* @return The attached exception, or null, if none.
*/
public Throwable getError(ClassLoader userCodeClassloader) {
if (this.throwable ==
null) {
return null;
} else {
return this.throwable.deserializeError(userCodeClassloader);
}
} | 3.26 |
flink_FlinkStatement_clearWarnings_rdh | // TODO We currently do not support this, but we can't throw a SQLException here because we want
// to support jdbc tools such as beeline and sqlline.
@Override
public void clearWarnings() throws SQLException {
} | 3.26 |
flink_FlinkStatement_execute_rdh | /**
* Execute a sql statement. Notice that the <code>INSERT</code> statement in Flink would return
* job id as result set.
*
* @param sql
* any SQL statement
* @return True if there is result set for the statement.
* @throws SQLException
* the thrown exception.
*/
@Override
public boolean execute(String sql) throws SQLException {
StatementResult result
= executeInternal(sql);
if (result.isQueryResult() || (result.getResultKind() == ResultKind.SUCCESS_WITH_CONTENT)) {
currentResults = new FlinkResultSet(this, result);
hasResults = true;
return true;}
hasResults = false;
return false;
} | 3.26 |
flink_FlinkStatement_executeQuery_rdh | /**
* Execute a SELECT query.
*
* @param sql
* an SQL statement to be sent to the database, typically a static SQL <code>SELECT
* </code> statement
* @return the select query result set.
* @throws SQLException
* the thrown exception
*/
@Override
public ResultSet executeQuery(String sql) throws SQLException {
StatementResult result = executeInternal(sql);
if (!result.isQueryResult()) {
result.close();
throw new SQLException(String.format("Statement[%s] is not a query.", sql));
}
currentResults = new FlinkResultSet(this,
result);
hasResults = true;
return currentResults;
} | 3.26 |
flink_IntCounter_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "IntCounter " + this.localValue;
} | 3.26 |
flink_IntCounter_add_rdh | // ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(int value) {
localValue += value;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.