name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_FileInputFormat_getFilePaths_rdh
|
/**
* Returns the paths of all files to be read by the FileInputFormat.
*
* @return The list of all paths to read.
*/public Path[] getFilePaths() {
if (supportsMultiPaths()) {
if (this.filePaths == null) {
return new Path[0];
}
return this.filePaths;
} else {
if (this.filePath == null) {
return new Path[0];
}
return new Path[]{ filePath };
}
}
| 3.26 |
flink_FileInputFormat_open_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Opens an input stream to the file defined in the input format. The stream is positioned at
* the beginning of the given split.
*
* <p>The stream is actually opened in an asynchronous thread to make sure any interruptions to
* the thread working on the input format do not reach the file system.
*/
@Override public void open(FileInputSplit fileSplit) throws IOException {
this.currentSplit = fileSplit;
this.splitStart = fileSplit.getStart();
final Path path = fileSplit.getPath();
this.splitLength = (testForUnsplittable(path.getFileSystem().getFileStatus(path))) ? READ_WHOLE_SPLIT_FLAG : fileSplit.getLength();
if (LOG.isDebugEnabled()) {
LOG.debug(((((("Opening input split " + fileSplit.getPath()) + " [") + this.splitStart) + ",") + this.splitLength) + "]");
}
// open the split in an asynchronous thread
final InputSplitOpenThread isot = new InputSplitOpenThread(fileSplit, this.openTimeout);
isot.start();
try {
this.stream = isot.waitForCompletion();
this.stream = decorateInputStream(this.stream, fileSplit);
} catch (Throwable t) {
throw new IOException((((((("Error opening the Input Split " + fileSplit.getPath()) + " [") + splitStart) + ",") + f0) + "]: ") + t.getMessage(), t);
}
// get FSDataInputStream
if (this.splitStart != 0) {
this.stream.seek(this.splitStart);
}
}
| 3.26 |
flink_FileInputFormat_extractFileExtension_rdh
|
/**
* Returns the extension of a file name (!= a path).
*
* @return the extension of the file name or {@code null} if there is no extension.
*/
protected static String extractFileExtension(String fileName) {
checkNotNull(fileName); int lastPeriodIndex = fileName.lastIndexOf('.');
if (lastPeriodIndex < 0) {
return null;} else
{
return fileName.substring(lastPeriodIndex + 1);
}
}
| 3.26 |
flink_FileInputFormat_getFilePath_rdh
|
// --------------------------------------------------------------------------------------------
// Getters/setters for the configurable parameters
// --------------------------------------------------------------------------------------------
/**
*
* @return The path of the file to read.
* @deprecated Please use getFilePaths() instead.
*/
@Deprecated
public Path getFilePath() {
if (supportsMultiPaths()) {
if ((this.filePaths == null) || (this.filePaths.length == 0)) {
return null;
} else if
(this.filePaths.length == 1) {
return this.filePaths[0];
} else
{ throw new UnsupportedOperationException("FileInputFormat is configured with multiple paths. Use getFilePaths() instead.");
}
} else {
return filePath;
}
}
| 3.26 |
flink_FileInputFormat_decorateInputStream_rdh
|
/**
* This method allows to wrap/decorate the raw {@link FSDataInputStream} for a certain file
* split, e.g., for decoding. When overriding this method, also consider adapting {@link FileInputFormat#testForUnsplittable} if your stream decoration renders the input file
* unsplittable. Also consider calling existing superclass implementations.
*
* @param inputStream
* is the input stream to decorated
* @param fileSplit
* is the file split for which the input stream shall be decorated
* @return the decorated input stream
* @throws Throwable
* if the decoration fails
* @see org.apache.flink.api.common.io.InputStreamFSInputWrapper
*/
protected FSDataInputStream decorateInputStream(FSDataInputStream inputStream, FileInputSplit fileSplit) throws Throwable {
// Wrap stream in a extracting (decompressing) stream if file ends with a known compression
// file extension.
InflaterInputStreamFactory<?> inflaterInputStreamFactory = getInflaterInputStreamFactory(fileSplit.getPath());
if (inflaterInputStreamFactory != null) {
return new InputStreamFSInputWrapper(inflaterInputStreamFactory.create(stream));
}
return inputStream;
}
| 3.26 |
flink_FileInputFormat_getStatistics_rdh
|
/**
* Obtains basic file statistics containing only file size. If the input is a directory, then
* the size is the sum of all contained files.
*
* @see org.apache.flink.api.common.io.InputFormat#getStatistics(org.apache.flink.api.common.io.statistics.BaseStatistics)
*/
@Override
public FileBaseStatistics getStatistics(BaseStatistics cachedStats) throws IOException {
final FileBaseStatistics cachedFileStats = (cachedStats instanceof FileInputFormat.FileBaseStatistics) ? ((FileBaseStatistics) (cachedStats))
: null;
try {
return m0(cachedFileStats, getFilePaths(), new ArrayList<>(getFilePaths().length));
} catch (IOException ioex) {
if (LOG.isWarnEnabled()) {
LOG.warn((("Could not determine statistics for paths '" + Arrays.toString(getFilePaths())) + "' due to an io error: ") + ioex.getMessage());
}
} catch (Throwable t) {
if (LOG.isErrorEnabled()) {
LOG.error((("Unexpected problem while getting the file statistics for paths '" + Arrays.toString(getFilePaths())) + "': ") + t.getMessage(), t);
}
}
// no statistics available
return null;
}
| 3.26 |
flink_FileInputFormat_getBlockIndexForPosition_rdh
|
/**
* Retrieves the index of the <tt>BlockLocation</tt> that contains the part of the file
* described by the given offset.
*
* @param blocks
* The different blocks of the file. Must be ordered by their offset.
* @param offset
* The offset of the position in the file.
* @param startIndex
* The earliest index to look at.
* @return The index of the block containing the given position.
*/
private int getBlockIndexForPosition(BlockLocation[] blocks, long offset, long halfSplitSize, int startIndex) {
// go over all indexes after the startIndex
for (int i = startIndex; i < blocks.length; i++) {
long blockStart = blocks[i].getOffset();
long blockEnd = blockStart + blocks[i].getLength();
if ((offset >= blockStart) && (offset < blockEnd)) {
// got the block where the split starts
// check if the next block contains more than this one does
if ((i < (blocks.length - 1)) && ((blockEnd - offset)
< halfSplitSize)) {
return i + 1;
} else {
return i;
}
}
}
throw new IllegalArgumentException("The given offset is not contained in the any block.");
}
| 3.26 |
flink_FileInputFormat_close_rdh
|
/**
* Closes the file input stream of the input format.
*/
@Override
public void close() throws IOException {
if (this.stream != null) {
// close input stream
this.stream.close();
stream = null;
}
}
| 3.26 |
flink_OperatorIDGenerator_fromUid_rdh
|
/**
* Generate {@link OperatorID}'s from {@code uid}'s.
*
* <p>{@link org.apache.flink.streaming.api.graph.StreamGraphHasherV2#traverseStreamGraphAndGenerateHashes(StreamGraph)}
*
* @param uid
* {@code DataStream} operator uid.
* @return corresponding {@link OperatorID}
*/
public static OperatorID fromUid(String uid) {
byte[] hash = Hashing.murmur3_128(0).newHasher().putString(uid, UTF_8).hash().asBytes();
return new OperatorID(hash);
}
| 3.26 |
flink_CheckpointProperties_equals_rdh
|
// ------------------------------------------------------------------------
@Override
public boolean equals(Object o) { if (this == o) {
return true;
}
if ((o ==
null) || (getClass() != o.getClass())) {
return false;
}
CheckpointProperties that = ((CheckpointProperties) (o));
return ((((((forced == that.forced) && checkpointType.equals(that.checkpointType)) && (discardSubsumed == that.discardSubsumed)) && (discardFinished == that.discardFinished)) && (discardCancelled == that.discardCancelled)) && (f0 == that.f0)) && (discardSuspended == that.discardSuspended);}
| 3.26 |
flink_CheckpointProperties_isUnclaimed_rdh
|
/**
* Returns whether the checkpoint should be restored in a {@link RestoreMode#NO_CLAIM} mode.
*/
public boolean isUnclaimed() {
return unclaimed;
}
| 3.26 |
flink_CheckpointProperties_forUnclaimedSnapshot_rdh
|
/**
* Creates the checkpoint properties for a snapshot restored in {@link RestoreMode#NO_CLAIM}.
* Those properties should not be used when triggering a checkpoint/savepoint. They're useful
* when restoring a {@link CompletedCheckpointStore} after a JM failover.
*
* @return Checkpoint properties for a snapshot restored in {@link RestoreMode#NO_CLAIM}.
*/
public static CheckpointProperties forUnclaimedSnapshot() {
return // unclaimed snapshot is similar to a savepoint
// we do not care about the format when restoring, the format is
// necessary when triggering a savepoint
new CheckpointProperties(false, SavepointType.savepoint(SavepointFormatType.CANONICAL), false, false, false, false, false, true);
}
| 3.26 |
flink_CheckpointProperties_forceCheckpoint_rdh
|
// ------------------------------------------------------------------------
/**
* Returns whether the checkpoint should be forced.
*
* <p>Forced checkpoints ignore the configured maximum number of concurrent checkpoints and
* minimum time between checkpoints. Furthermore, they are not subsumed by more recent
* checkpoints as long as they are pending.
*
* @return <code>true</code> if the checkpoint should be forced; <code>false</code> otherwise.
* @see CheckpointCoordinator
* @see PendingCheckpoint
*/
boolean forceCheckpoint() {
return forced;
}
| 3.26 |
flink_CheckpointProperties_forCheckpoint_rdh
|
/**
* Creates the checkpoint properties for a checkpoint.
*
* <p>Checkpoints may be queued in case too many other checkpoints are currently happening. They
* are garbage collected automatically, except when the owning job terminates in state {@link JobStatus#FAILED}. The user is required to configure the clean up behaviour on job
* cancellation.
*
* @return Checkpoint properties for an external checkpoint.
*/
public static CheckpointProperties forCheckpoint(CheckpointRetentionPolicy policy) {
switch (policy) {case NEVER_RETAIN_AFTER_TERMINATION :
return CHECKPOINT_NEVER_RETAINED;
case RETAIN_ON_FAILURE :
return CHECKPOINT_RETAINED_ON_FAILURE;
case RETAIN_ON_CANCELLATION :
return CHECKPOINT_RETAINED_ON_CANCELLATION;
default :
throw new IllegalArgumentException("unknown policy: " + policy);}
}
| 3.26 |
flink_CheckpointProperties_getCheckpointType_rdh
|
/**
* Gets the type of the checkpoint (checkpoint / savepoint).
*/public SnapshotType getCheckpointType() {
return checkpointType;
}
| 3.26 |
flink_CheckpointProperties_forSavepoint_rdh
|
/**
* Creates the checkpoint properties for a (manually triggered) savepoint.
*
* <p>Savepoints are not queued due to time trigger limits. They have to be garbage collected
* manually.
*
* @return Checkpoint properties for a (manually triggered) savepoint.
*/
public static CheckpointProperties forSavepoint(boolean forced, SavepointFormatType formatType) {
return new CheckpointProperties(forced, SavepointType.savepoint(formatType), false, false, false, false, false, false);
}
| 3.26 |
flink_SqlNodeConvertUtils_validateAlterView_rdh
|
/**
* Validate the view to alter is valid and existed and return the {@link CatalogView} to alter.
*/
static CatalogView validateAlterView(SqlAlterView alterView, ConvertContext context) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(alterView.fullViewName());
ObjectIdentifier viewIdentifier = context.getCatalogManager().qualifyIdentifier(unresolvedIdentifier);Optional<ContextResolvedTable> optionalCatalogTable = context.getCatalogManager().getTable(viewIdentifier);
// check the view exist and is not a temporary view
if ((!optionalCatalogTable.isPresent()) || optionalCatalogTable.get().isTemporary()) {
throw new ValidationException(String.format("View %s doesn't exist or is a temporary view.", viewIdentifier));
}
// check the view is exactly a view
CatalogBaseTable baseTable = optionalCatalogTable.get().getResolvedTable();
if (baseTable instanceof CatalogTable) {
throw new ValidationException("ALTER VIEW for a table is not allowed");
}
return ((CatalogView) (baseTable));
}
| 3.26 |
flink_SqlNodeConvertUtils_toCatalogView_rdh
|
/**
* convert the query part of a VIEW statement into a {@link CatalogView}.
*/
static CatalogView toCatalogView(SqlNode query, List<SqlNode> viewFields, Map<String, String> viewOptions, String viewComment, ConvertContext context) {
// Put the sql string unparse (getQuotedSqlString()) in front of
// the node conversion (toQueryOperation()),
// because before Calcite 1.22.0, during sql-to-rel conversion, the SqlWindow
// bounds state would be mutated as default when they are null (not specified).
// This bug is fixed in CALCITE-3877 of Calcite 1.23.0.
String originalQuery = context.toQuotedSqlString(query);
SqlNode validateQuery = context.getSqlValidator().validate(query);
// The LATERAL operator was eliminated during sql validation, thus the unparsed SQL
// does not contain LATERAL which is problematic,
// the issue was resolved in CALCITE-4077
// (always treat the table function as implicitly LATERAL).
String expandedQuery = context.expandSqlIdentifiers(originalQuery);
PlannerQueryOperation v4 = toQueryOperation(validateQuery, context);
ResolvedSchema schema = v4.getResolvedSchema();
// the view column list in CREATE VIEW is optional, if it's not empty, we should update
// the column name with the names in view column list.
if (!viewFields.isEmpty()) {
// alias column names:
List<String> inputFieldNames = schema.getColumnNames();
List<String> aliasFieldNames = viewFields.stream().map(SqlNode::toString).collect(Collectors.toList());
if (inputFieldNames.size() != aliasFieldNames.size()) {
throw new ValidationException(String.format("VIEW definition and input fields not match:\n\tDef fields: %s.\n\tInput fields: %s.", aliasFieldNames, inputFieldNames));
}
schema = ResolvedSchema.physical(aliasFieldNames, schema.getColumnDataTypes());
}
return CatalogView.of(Schema.newBuilder().fromResolvedSchema(schema).build(), viewComment, originalQuery, expandedQuery, viewOptions);
}
| 3.26 |
flink_MetricRegistryImpl_startQueryService_rdh
|
/**
* Initializes the MetricQueryService.
*
* @param rpcService
* RpcService to create the MetricQueryService on
* @param resourceID
* resource ID used to disambiguate the actor name
*/
public void startQueryService(RpcService rpcService, ResourceID resourceID) {
synchronized(lock) {
Preconditions.checkState(!isShutdown(), "The metric registry has already been shut down.");
try {
metricQueryServiceRpcService = rpcService;
queryService = MetricQueryService.createMetricQueryService(rpcService, resourceID, maximumFramesize);
queryService.start();
} catch (Exception e) {
LOG.warn("Could not start MetricDumpActor. No metrics will be submitted to the WebInterface.", e);
}
}
}
| 3.26 |
flink_MetricRegistryImpl_closeAsync_rdh
|
/**
* Shuts down this registry and the associated {@link MetricReporter}.
*
* <p>NOTE: This operation is asynchronous and returns a future which is completed once the
* shutdown operation has been completed.
*
* @return Future which is completed once the {@link MetricRegistryImpl} is shut down.
*/@Override
public CompletableFuture<Void> closeAsync() {
synchronized(lock) {if (isShutdown) {
return
terminationFuture;
} else {
isShutdown = true;
final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3);
final Time gracePeriod = Time.seconds(1L);
if (metricQueryServiceRpcService != null) {
final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture = metricQueryServiceRpcService.closeAsync();
terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture);
}
Throwable throwable = null;
for (ReporterAndSettings reporterAndSettings : reporters) {
try {
reporterAndSettings.getReporter().close();
} catch (Throwable t) {
throwable = ExceptionUtils.firstOrSuppressed(t, throwable);
}
}
reporters.clear();
if (throwable != null) {
terminationFutures.add(FutureUtils.completedExceptionally(new FlinkException("Could not shut down the metric reporters properly.", throwable)));
}
final CompletableFuture<Void> reporterExecutorShutdownFuture = ExecutorUtils.nonBlockingShutdown(gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, reporterScheduledExecutor);
terminationFutures.add(reporterExecutorShutdownFuture);
final CompletableFuture<Void> viewUpdaterExecutorShutdownFuture = ExecutorUtils.nonBlockingShutdown(gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, viewUpdaterScheduledExecutor);
terminationFutures.add(viewUpdaterExecutorShutdownFuture);FutureUtils.completeAll(terminationFutures).whenComplete((Void ignored,Throwable error) -> {
if
(error != null) {
terminationFuture.completeExceptionally(error);
} else {
terminationFuture.complete(null);
}
});
return terminationFuture;
}
}
}
| 3.26 |
flink_MetricRegistryImpl_getMetricQueryServiceGatewayRpcAddress_rdh
|
/**
* Returns the address under which the {@link MetricQueryService} is reachable.
*
* @return address of the metric query service
*/
@Override
@Nullable
public String getMetricQueryServiceGatewayRpcAddress() {
if (queryService != null) {
return queryService.getSelfGateway(MetricQueryServiceGateway.class).getAddress();
} else {
return null;
}
}
| 3.26 |
flink_MetricRegistryImpl_register_rdh
|
// ------------------------------------------------------------------------
// Metrics (de)registration
// ------------------------------------------------------------------------
@Override
public void register(Metric metric, String metricName, AbstractMetricGroup group) {
synchronized(lock) {
if (isShutdown()) {
LOG.warn("Cannot register metric, because the MetricRegistry has already been shut down.");
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Registering metric {}.{}.", group.getLogicalScope(CharacterFilter.NO_OP_FILTER), metricName);
}
if (reporters != null) {forAllReporters(MetricReporter::notifyOfAddedMetric, metric, metricName, group);
}
try {
if (queryService != null) {
queryService.addMetric(metricName, metric, group);
}
} catch (Exception e) {
LOG.warn("Error while registering metric: {}.", metricName, e);
}
try {
if (metric instanceof View) {if (viewUpdater == null) {
viewUpdater = new ViewUpdater(viewUpdaterScheduledExecutor);
} viewUpdater.notifyOfAddedView(((View) (metric)));
}} catch (Exception e) {
LOG.warn("Error while registering metric: {}.", metricName, e);
}
}
}
}
| 3.26 |
flink_MetricRegistryImpl_isShutdown_rdh
|
/**
* Returns whether this registry has been shutdown.
*
* @return true, if this registry was shutdown, otherwise false
*/
public boolean isShutdown() {
synchronized(lock) {
return isShutdown;
}
}
| 3.26 |
flink_MetricRegistryImpl_getQueryService_rdh
|
// ------------------------------------------------------------------------
@VisibleForTesting
@Nullable
MetricQueryService getQueryService() {
return queryService;
}
| 3.26 |
flink_MurmurHashUtils_hashUnsafeBytes_rdh
|
/**
* Hash unsafe bytes.
*
* @param base
* base unsafe object
* @param offset
* offset for unsafe object
* @param lengthInBytes
* length in bytes
* @return hash code
*/
public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes) {
return hashUnsafeBytes(base, offset,
lengthInBytes, DEFAULT_SEED);
}
| 3.26 |
flink_MurmurHashUtils_fmix_rdh
|
// Finalization mix - force all bits of a hash block to avalanche
private static int fmix(int h1, int length) {
h1 ^= length;
return fmix(h1);
}
| 3.26 |
flink_MurmurHashUtils_hashUnsafeBytesByWords_rdh
|
/**
* Hash unsafe bytes, length must be aligned to 4 bytes.
*
* @param base
* base unsafe object
* @param offset
* offset for unsafe object
* @param lengthInBytes
* length in bytes
* @return hash code
*/
public static int hashUnsafeBytesByWords(Object base, long offset, int lengthInBytes) {
return hashUnsafeBytesByWords(base, offset, lengthInBytes, DEFAULT_SEED);
}
| 3.26 |
flink_MurmurHashUtils_hashBytesByWords_rdh
|
/**
* Hash bytes in MemorySegment, length must be aligned to 4 bytes.
*
* @param segment
* segment.
* @param offset
* offset for MemorySegment
* @param lengthInBytes
* length in MemorySegment
* @return hash code
*/
public static int
hashBytesByWords(MemorySegment segment, int offset, int lengthInBytes) {
return hashBytesByWords(segment, offset, lengthInBytes, DEFAULT_SEED);}
| 3.26 |
flink_SerdeUtils_serializeSplitAssignments_rdh
|
/**
* Serialize a mapping from subtask ids to lists of assigned splits. The serialized format is
* following:
*
* <pre>
* 4 bytes - number of subtasks
* 4 bytes - split serializer version
* N bytes - [assignment_for_subtask]
* 4 bytes - subtask id
* 4 bytes - number of assigned splits
* N bytes - [assigned_splits]
* 4 bytes - serialized split length
* N bytes - serialized splits
* </pre>
*
* @param splitAssignments
* a mapping from subtask ids to lists of assigned splits.
* @param splitSerializer
* the serializer of the split.
* @param <SplitT>
* the type of the splits.
* @param <C>
* the type of the collection to hold the assigned splits for a subtask.
* @return the serialized bytes of the given subtask to splits assignment mapping.
* @throws IOException
* when serialization failed.
*/
public static <SplitT extends SourceSplit, C extends
Collection<SplitT>> byte[] serializeSplitAssignments(Map<Integer, C> splitAssignments, SimpleVersionedSerializer<SplitT> splitSerializer) throws IOException {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();DataOutputStream out = new DataOutputStream(baos)) {
out.writeInt(splitAssignments.size());
// Split serializer version.
out.writeInt(splitSerializer.getVersion());
// Write assignments for subtasks.
for (Map.Entry<Integer, C> entry : splitAssignments.entrySet()) {
// Subtask ID
int subtaskId = entry.getKey();
Collection<SplitT> splitsForSubtask = entry.getValue();
// Number of the splits.
out.writeInt(subtaskId);
out.writeInt(splitsForSubtask.size());
for (SplitT split : splitsForSubtask) {
byte[] serializedSplit = splitSerializer.serialize(split);
out.writeInt(serializedSplit.length);
out.write(serializedSplit);
}
}
return baos.toByteArray();
}
}
| 3.26 |
flink_SerdeUtils_deserializeSplitAssignments_rdh
|
/**
* Deserialize the given bytes returned by {@link #serializeSplitAssignments(Map,
* SimpleVersionedSerializer)}.
*
* @param serialized
* the serialized bytes returned by {@link #serializeSplitAssignments(Map,
* SimpleVersionedSerializer)}.
* @param splitSerializer
* the split serializer for the splits.
* @param collectionSupplier
* the supplier for the {@link Collection} instance to hold the
* assigned splits for a subtask.
* @param <SplitT>
* the type of the splits.
* @param <C>
* the type of the collection to hold the assigned splits for a subtask.
* @return A mapping from subtask id to its assigned splits.
* @throws IOException
* when deserialization failed.
*/
public static <SplitT extends SourceSplit, C extends Collection<SplitT>> Map<Integer, C> deserializeSplitAssignments(byte[] serialized, SimpleVersionedSerializer<SplitT> splitSerializer, Function<Integer, C> collectionSupplier) throws IOException {
try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized);DataInputStream in = new DataInputStream(bais)) {
int numSubtasks = in.readInt();
Map<Integer,
C>
splitsAssignments = new HashMap<>(numSubtasks);
int serializerVersion = in.readInt();
for (int
i = 0; i < numSubtasks; i++) {
int subtaskId = in.readInt();
int numAssignedSplits = in.readInt();
C assignedSplits = collectionSupplier.apply(numAssignedSplits);
for (int j = 0; j < numAssignedSplits; j++) {
int serializedSplitSize = in.readInt();
byte[] serializedSplit = new byte[serializedSplitSize];
in.readFully(serializedSplit);
SplitT split = splitSerializer.deserialize(serializerVersion, serializedSplit);
assignedSplits.add(split);
}
splitsAssignments.put(subtaskId, assignedSplits);
}
return splitsAssignments;
}
}
| 3.26 |
flink_AbstractUdfStreamOperator_notifyCheckpointComplete_rdh
|
// ------------------------------------------------------------------------
// checkpointing and recovery
// ------------------------------------------------------------------------
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
super.notifyCheckpointComplete(checkpointId);
if (userFunction instanceof CheckpointListener) {
((CheckpointListener) (userFunction)).notifyCheckpointComplete(checkpointId);
}
}
| 3.26 |
flink_AbstractUdfStreamOperator_getUserFunctionParameters_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Since the streaming API does not implement any parametrization of functions via a
* configuration, the config returned here is actually empty.
*
* @return The user function parameters (currently empty)
*/
public Configuration getUserFunctionParameters() {
return new Configuration();
}
| 3.26 |
flink_AbstractUdfStreamOperator_setup_rdh
|
// ------------------------------------------------------------------------
// operator life cycle
// ------------------------------------------------------------------------
@Override
public void setup(StreamTask<?, ?> containingTask, StreamConfig config, Output<StreamRecord<OUT>> output) {
super.setup(containingTask, config, output);
FunctionUtils.setFunctionRuntimeContext(userFunction, getRuntimeContext());
}
| 3.26 |
flink_AbstractUdfStreamOperator_setOutputType_rdh
|
// ------------------------------------------------------------------------
// Output type configuration
// ------------------------------------------------------------------------
@Override
public void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
StreamingFunctionUtils.setOutputType(userFunction, outTypeInfo, executionConfig);
}
| 3.26 |
flink_OpenApiSpecGenerator_injectAsyncOperationResultSchema_rdh
|
/**
* The {@link AsynchronousOperationResult} contains a generic 'operation' field that can't be
* properly extracted from swagger. This method injects these manually.
*
* <p>Resulting spec diff:
*
* <pre>
* AsynchronousOperationResult:
* type: object
* properties:
* operation:
* - type: object
* + oneOf:
* + - $ref: '#/components/schemas/AsynchronousOperationInfo'
* + - $ref: '#/components/schemas/SavepointInfo'
* </pre>
*/
private static void injectAsyncOperationResultSchema(final OpenAPI openApi, List<Schema> asyncOperationSchemas) {
final Schema schema = openApi.getComponents().getSchemas().get(AsynchronousOperationResult.class.getSimpleName());
if (schema != null) {
schema.getProperties().put(AsynchronousOperationResult.FIELD_NAME_OPERATION, new ComposedSchema().oneOf(asyncOperationSchemas));
}}
| 3.26 |
flink_OpenApiSpecGenerator_overrideIdSchemas_rdh
|
/**
* Various ID classes are effectively internal classes that aren't sufficiently annotated to
* work with automatic schema extraction. This method overrides the schema of these to a string
* regex pattern.
*
* <p>Resulting spec diff:
*
* <pre>
* JobID:
* - type: object
* - properties:
* - upperPart:
* - type: integer
* - format: int64
* - lowerPart:
* - type: integer
* - format: int64
* - bytes:
* - type: array
* - items:
* - type: string
* - format: byte
* + pattern: "[0-9a-f]{32}"
* + type: string
* </pre>
*/
private static void overrideIdSchemas(final OpenAPI openApi) {
final Schema idSchema = new Schema().type("string").pattern("[0-9a-f]{32}");
openApi.getComponents().addSchemas(JobID.class.getSimpleName(), idSchema).addSchemas(JobVertexID.class.getSimpleName(),
idSchema).addSchemas(IntermediateDataSetID.class.getSimpleName(), idSchema).addSchemas(TriggerId.class.getSimpleName(), idSchema).addSchemas(ResourceID.class.getSimpleName(), idSchema);
}
| 3.26 |
flink_AbstractMultipleInputTransformation_getInputTypes_rdh
|
/**
* Returns the {@code TypeInformation} for the elements from the inputs.
*/
public List<TypeInformation<?>> getInputTypes() {return inputs.stream().map(Transformation::getOutputType).collect(Collectors.toList());
}
| 3.26 |
flink_AbstractMultipleInputTransformation_getOperatorFactory_rdh
|
/**
* Returns the {@code StreamOperatorFactory} of this Transformation.
*/
public StreamOperatorFactory<OUT> getOperatorFactory() {
return operatorFactory;
}
| 3.26 |
flink_CostEstimator_costOperator_rdh
|
// ------------------------------------------------------------------------
/**
* This method computes the cost of an operator. The cost is composed of cost for input
* shipping, locally processing an input, and running the operator.
*
* <p>It requires at least that all inputs are set and have a proper ship strategy set, which is
* not equal to <tt>NONE</tt>.
*
* @param n
* The node to compute the costs for.
*/
public void costOperator(PlanNode n) {
// initialize costs objects with no costs
final
Costs totalCosts = new Costs();
final long availableMemory = n.getGuaranteedAvailableMemory();
// add the shipping strategy costs
for (Channel channel : n.getInputs()) {
final Costs costs = new Costs();
// Plans that apply the same strategies, but at different points
// are equally expensive. For example, if a partitioning can be
// pushed below a Map function there is often no difference in plan
// costs between the pushed down version and the version that partitions
// after the Mapper. However, in those cases, we want the expensive
// strategy to appear later in the plan, as data reduction often occurs
// by large factors, while blowup is rare and typically by smaller fractions.
// We achieve this by adding a penalty to small penalty to the FORWARD strategy,
// weighted by the current plan depth (steps to the earliest data source).
// that way, later FORWARDS are more expensive than earlier forwards.
// Note that this only applies to the heuristic costs.
switch (channel.getShipStrategy()) {
case NONE :
throw new CompilerException("Cannot determine costs: Shipping strategy has not been set for an input.");
case FORWARD :
// costs.addHeuristicNetworkCost(channel.getMaxDepth());
break;
case PARTITION_RANDOM : addRandomPartitioningCost(channel, costs);
break;
case PARTITION_HASH :
case PARTITION_CUSTOM
:
addHashPartitioningCost(channel, costs);
break;
case PARTITION_RANGE :
addRangePartitionCost(channel, costs);
break;
case BROADCAST :
addBroadcastCost(channel, channel.getReplicationFactor(), costs);
break;
case PARTITION_FORCED_REBALANCE :
addRandomPartitioningCost(channel, costs);
break;
default :
throw new CompilerException("Unknown shipping strategy for input: " + channel.getShipStrategy());
}
switch (channel.getLocalStrategy()) {
case NONE :
break;
case SORT :
case COMBININGSORT :
addLocalSortCost(channel, costs);
break;
default :
throw new CompilerException("Unsupported local strategy for input: " + channel.getLocalStrategy());}
if ((channel.getTempMode() != null) && (channel.getTempMode() != TempMode.NONE)) {addArtificialDamCost(channel, 0, costs);}
// adjust with the cost weight factor
if (channel.isOnDynamicPath()) {
costs.multiplyWith(channel.getCostWeight());}
totalCosts.addCosts(costs);
}
Channel firstInput =
null;
Channel secondInput = null;
Costs driverCosts = new Costs();
int costWeight = 1;
// adjust with the cost weight factor
if (n.isOnDynamicPath()) {
costWeight = n.getCostWeight();
}
// get the inputs, if we have some
{
Iterator<Channel> channels = n.getInputs().iterator();
if (channels.hasNext()) {
firstInput = channels.next();
}
if (channels.hasNext()) {
secondInput = channels.next();
}
}
// determine the local costs
switch (n.getDriverStrategy()) {
case NONE :
case UNARY_NO_OP :
case BINARY_NO_OP :
case MAP :
case MAP_PARTITION :case FLAT_MAP :
case
ALL_GROUP_REDUCE :
case ALL_REDUCE :
// this operations does not do any actual grouping, since every element is in the
// same single group
case CO_GROUP :
case CO_GROUP_RAW :
case SORTED_GROUP_REDUCE :
case SORTED_REDUCE :
// grouping or co-grouping over sorted streams for free
case SORTED_GROUP_COMBINE : // partial grouping is always local and main memory resident. we should add a
// relative cpu cost at some point
// partial grouping is always local and main memory resident. we should add a
// relative cpu cost at some point
case ALL_GROUP_COMBINE :
case UNION :
// pipelined local union is for free
break;
case INNER_MERGE :
case FULL_OUTER_MERGE :
case LEFT_OUTER_MERGE :
case RIGHT_OUTER_MERGE
:
addLocalMergeCost(firstInput, secondInput, driverCosts, costWeight);
break;
case HYBRIDHASH_BUILD_FIRST :
case RIGHT_HYBRIDHASH_BUILD_FIRST :
case LEFT_HYBRIDHASH_BUILD_FIRST :
case FULL_OUTER_HYBRIDHASH_BUILD_FIRST :
addHybridHashCosts(firstInput, secondInput, driverCosts, costWeight);
break;
case HYBRIDHASH_BUILD_SECOND :
case LEFT_HYBRIDHASH_BUILD_SECOND :
case RIGHT_HYBRIDHASH_BUILD_SECOND
:case FULL_OUTER_HYBRIDHASH_BUILD_SECOND :
addHybridHashCosts(secondInput, firstInput, driverCosts, costWeight);
break;
case HYBRIDHASH_BUILD_FIRST_CACHED :
addCachedHybridHashCosts(firstInput, secondInput, driverCosts, costWeight);
break;
case HYBRIDHASH_BUILD_SECOND_CACHED :
addCachedHybridHashCosts(secondInput, firstInput, driverCosts, costWeight);
break;
case NESTEDLOOP_BLOCKED_OUTER_FIRST :
addBlockNestedLoopsCosts(firstInput, secondInput, availableMemory, driverCosts, costWeight);
break;
case NESTEDLOOP_BLOCKED_OUTER_SECOND :
addBlockNestedLoopsCosts(secondInput, firstInput, availableMemory, driverCosts, costWeight);
break;
case NESTEDLOOP_STREAMED_OUTER_FIRST :
addStreamedNestedLoopsCosts(firstInput, secondInput, availableMemory, driverCosts, costWeight);
break;
case NESTEDLOOP_STREAMED_OUTER_SECOND :
addStreamedNestedLoopsCosts(secondInput, firstInput, availableMemory, driverCosts, costWeight);
break;
default :
throw new CompilerException("Unknown local strategy: " + n.getDriverStrategy().name());
}totalCosts.addCosts(driverCosts);
n.setCosts(totalCosts);
}
| 3.26 |
flink_BufferConsumer_skip_rdh
|
/**
*
* @param bytesToSkip
* number of bytes to skip from currentReaderPosition
*/
void skip(int bytesToSkip) {
writerPosition.update();
int v2 = writerPosition.getCached();
int bytesReadable = v2 - currentReaderPosition;
checkState(bytesToSkip <= bytesReadable, "bytes to skip beyond readable range");
currentReaderPosition += bytesToSkip;}
| 3.26 |
flink_BufferConsumer_copy_rdh
|
/**
* Returns a retained copy with separate indexes. This allows to read from the same {@link MemorySegment} twice.
*
* <p>WARNING: the newly returned {@link BufferConsumer} will have its reader index copied from
* the original buffer. In other words, data already consumed before copying will not be visible
* to the returned copies.
*
* @return a retained copy of self with separate indexes
*/
public BufferConsumer copy() {
return new BufferConsumer(buffer.retainBuffer(), writerPosition.positionMarker, currentReaderPosition);
}
| 3.26 |
flink_BufferConsumer_copyWithReaderPosition_rdh
|
/**
* Returns a retained copy with separate indexes and sets the reader position to the given
* value. This allows to read from the same {@link MemorySegment} twice starting from the
* supplied position.
*
* @param readerPosition
* the new reader position. Can be less than the {@link #currentReaderPosition}, but may not exceed the current writer's position.
* @return a retained copy of self with separate indexes
*/
public BufferConsumer copyWithReaderPosition(int readerPosition) {
return new BufferConsumer(buffer.retainBuffer(), writerPosition.positionMarker, readerPosition);
}
| 3.26 |
flink_BufferConsumer_isFinished_rdh
|
/**
* Checks whether the {@link BufferBuilder} has already been finished.
*
* <p>BEWARE: this method accesses the cached value of the position marker which is only updated
* after calls to {@link #build()} and {@link #skip(int)}!
*
* @return <tt>true</tt> if the buffer was finished, <tt>false</tt> otherwise
*/
public boolean isFinished() {
return
writerPosition.m1();
}
/**
*
* @return sliced {@link Buffer} containing the not yet consumed data. Returned {@link Buffer}
shares the reference counter with the parent {@link BufferConsumer}
| 3.26 |
flink_BufferConsumer_m0_rdh
|
/**
* Returns true if there is new data available for reading.
*/
public boolean m0() {
return currentReaderPosition < writerPosition.getLatest();
}
| 3.26 |
flink_RawByteArrayConverter_create_rdh
|
// --------------------------------------------------------------------------------------------
// Factory method
// --------------------------------------------------------------------------------------------
public static RawByteArrayConverter<?> create(DataType dataType) {
final LogicalType v0 = dataType.getLogicalType();
final TypeSerializer<?> serializer;
if (v0 instanceof TypeInformationRawType) {
serializer = ((TypeInformationRawType<?>) (v0)).getTypeInformation().createSerializer(new ExecutionConfig());
} else {
serializer = ((RawType<?>) (dataType.getLogicalType())).getTypeSerializer();
}
return new RawByteArrayConverter<>(serializer);
}
| 3.26 |
flink_CollectAggFunction_getArgumentDataTypes_rdh
|
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType> getArgumentDataTypes() {
return Collections.singletonList(elementDataType);
}
| 3.26 |
flink_SinkFunction_finish_rdh
|
/**
* This method is called at the end of data processing.
*
* <p>The method is expected to flush all remaining buffered data. Exceptions will cause the
* pipeline to be recognized as failed, because the last data items are not processed properly.
* You may use this method to flush remaining buffered elements in the state into transactions
* which you can commit in the last checkpoint.
*
* <p><b>NOTE:</b>This method does not need to close any resources. You should release external
* resources in the {@link RichSinkFunction#close()} method.
*
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
default void finish() throws Exception {
}
| 3.26 |
flink_SinkFunction_writeWatermark_rdh
|
/**
* Writes the given watermark to the sink. This function is called for every watermark.
*
* <p>This method is intended for advanced sinks that propagate watermarks.
*
* @param watermark
* The watermark.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
default void writeWatermark(Watermark watermark) throws Exception { }
| 3.26 |
flink_SinkFunction_invoke_rdh
|
/**
* Writes the given value to the sink. This function is called for every record.
*
* <p>You have to override this method when implementing a {@code SinkFunction}, this is a
* {@code default} method for backward compatibility with the old-style method only.
*
* @param value
* The input record.
* @param context
* Additional context about the input record.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
default void invoke(IN value, Context context) throws Exception {
invoke(value);
}
| 3.26 |
flink_DoubleValue_toString_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public String toString() {return String.valueOf(this.value);
}
| 3.26 |
flink_DoubleValue_read_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
this.value = in.readDouble();
}
| 3.26 |
flink_DoubleValue_m0_rdh
|
/**
* Sets the value of the encapsulated primitive double.
*
* @param value
* the new value of the encapsulated primitive double.
*/
public void m0(double value) {
this.value = value;
}
| 3.26 |
flink_DoubleValue_getBinaryLength_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() {
return 8;
}
| 3.26 |
flink_DoubleValue_getValue_rdh
|
/**
* Returns the value of the encapsulated primitive double.
*
* @return the value of the encapsulated primitive double.
*/
public double getValue()
{
return this.value;
}
| 3.26 |
flink_JobExceptionsInfoWithHistory_equals_rdh
|
// hashCode and equals are necessary for the test classes deriving from
// RestResponseMarshallingTestBase
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (((o == null) || (getClass() != o.getClass())) || (!super.equals(o))) {
return false;}
RootExceptionInfo
that = ((RootExceptionInfo) (o));
return getConcurrentExceptions().equals(that.getConcurrentExceptions());}
| 3.26 |
flink_LongHashPartition_updateIndex_rdh
|
/**
* Update the address in array for given key.
*/
private void updateIndex(long key, int hashCode, long address, int size, MemorySegment dataSegment, int currentPositionInSegment) throws IOException {
assert f2 <= (numBuckets / 2);
int
bucketId = findBucket(hashCode);// each bucket occupied 16 bytes (long key + long pointer to data address)
int bucketOffset = bucketId * SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES;
MemorySegment segment = buckets[bucketOffset >>> segmentSizeBits];
int segOffset = bucketOffset & segmentSizeMask;
long currAddress;
while (true) {
currAddress = segment.getLong(segOffset + 8);
if ((segment.getLong(segOffset) != key) && (currAddress != INVALID_ADDRESS)) {
// hash conflicts, the bucket is occupied by another key
// TODO test Conflict resolution:
// now: +1 +1 +1... cache friendly but more conflict, so we set factor to 0.5
// other1: +1 +2 +3... less conflict, factor can be 0.75
// other2: Secondary hashCode... less and less conflict, but need compute hash again
bucketId
= (bucketId + 1) & numBucketsMask;
if ((segOffset + SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES) < segmentSize) {
// if the new bucket still in current segment, we only need to update offset
// within this segment
segOffset += SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES;
} else {
// otherwise, we should re-calculate segment and offset
bucketOffset = bucketId * 16;
segment = buckets[bucketOffset >>> segmentSizeBits];
segOffset = bucketOffset & segmentSizeMask;
}
} else {
break;
}
}
if (currAddress == INVALID_ADDRESS) {
// this is the first value for this key, put the address in array.
segment.putLong(segOffset, key);
segment.putLong(segOffset +
8, address);
f2 += 1;
// dataSegment may be null if we only have to rehash bucket area
if (dataSegment != null) {
dataSegment.putLong(currentPositionInSegment, toAddrAndLen(INVALID_ADDRESS, size));
}
if ((f2 * 2) > numBuckets) {
resize();
}
} else {
// there are some values for this key, put the address in the front of them.
dataSegment.putLong(currentPositionInSegment, toAddrAndLen(currAddress, size));
segment.putLong(segOffset + 8, address);
}
}
| 3.26 |
flink_LongHashPartition_valueIter_rdh
|
/**
* Returns an iterator of BinaryRowData for multiple linked values.
*/
MatchIterator valueIter(long address) {
iterator.set(address);
return iterator;
}
| 3.26 |
flink_LongHashPartition_get_rdh
|
/**
* Returns an iterator for all the values for the given key, or null if no value found.
*/
public MatchIterator get(long key,
int hashCode) {
int bucket = findBucket(hashCode);
int bucketOffset = bucket << 4;
MemorySegment segment
= buckets[bucketOffset >>> segmentSizeBits];
int segOffset = bucketOffset & segmentSizeMask;
while (true) {
long address = segment.getLong(segOffset + 8);
if (address != INVALID_ADDRESS) {
if (segment.getLong(segOffset) == key) {
return valueIter(address);
} else {
bucket = (bucket + 1) & numBucketsMask;
if ((segOffset + 16) < segmentSize)
{
segOffset += 16;
} else {
bucketOffset = bucket << 4;
segOffset = bucketOffset & segmentSizeMask;
segment = buckets[bucketOffset >>> segmentSizeBits];
}
}
} else {
return valueIter(INVALID_ADDRESS);
}
}
}
| 3.26 |
flink_LongHashPartition_setReadPosition_rdh
|
// ------------------ PagedInputView for read --------------------
@Override
public void setReadPosition(long pointer) {
final int bufferNum = ((int) (pointer >>> this.segmentSizeBits));
final int offset = ((int) (pointer & segmentSizeMask));
this.currentBufferNum = bufferNum;
seekInput(this.partitionBuffers[bufferNum], offset, bufferNum < (partitionBuffers.length - 1) ? segmentSize : f0);
}
| 3.26 |
flink_SupportsRowLevelDelete_getRowLevelDeleteMode_rdh
|
/**
* Planner will rewrite delete statement to query base on the {@link RowLevelDeleteInfo},
* keeping the query of delete unchanged by default(in `DELETE_ROWS` mode), or changing the
* query to the complementary set in REMAINING_ROWS mode.
*
* <p>Take the following SQL as an example:
*
* <pre>{@code DELETE FROM t WHERE y = 2;}</pre>
*
* <p>If returns {@link SupportsRowLevelDelete.RowLevelDeleteMode#DELETED_ROWS}, the sink
* will get the rows to be deleted which match the filter [y = 2].
*
* <p>If returns {@link SupportsRowLevelDelete.RowLevelDeleteMode#REMAINING_ROWS}, the sink
* will get the rows which don't match the filter [y = 2].
*
* <p>Note: All rows will be of RowKind#DELETE when RowLevelDeleteMode is DELETED_ROWS, and
* RowKind#INSERT when RowLevelDeleteMode is REMAINING_ROWS.
*/
default RowLevelDeleteMode getRowLevelDeleteMode() {
return RowLevelDeleteMode.DELETED_ROWS;
}
| 3.26 |
flink_AggregatingStateDescriptor_getAggregateFunction_rdh
|
/**
* Returns the aggregate function to be used for the state.
*/
public AggregateFunction<IN, ACC, OUT> getAggregateFunction() {
return aggFunction;
}
| 3.26 |
flink_DecodingFormat_listReadableMetadata_rdh
|
/**
* Returns the map of metadata keys and their corresponding data types that can be produced by
* this format for reading. By default, this method returns an empty map.
*
* <p>Metadata columns add additional columns to the table's schema. A decoding format is
* responsible to add requested metadata columns at the end of produced rows.
*
* <p>See {@link SupportsReadingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link SupportsReadingMetadata} and calls this method in {@link SupportsReadingMetadata#listReadableMetadata()}.
*/
default Map<String,
DataType> listReadableMetadata() {
return Collections.emptyMap();
}
| 3.26 |
flink_DecodingFormat_applyReadableMetadata_rdh
|
/**
* Provides a list of metadata keys that the produced row must contain as appended metadata
* columns. By default, this method throws an exception if metadata keys are defined.
*
* <p>See {@link SupportsReadingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link SupportsReadingMetadata} and calls this method in {@link SupportsReadingMetadata#applyReadableMetadata(List, DataType)}.
*/
@SuppressWarnings("unused")
default void applyReadableMetadata(List<String> metadataKeys) {
throw new UnsupportedOperationException("A decoding format must override this method to apply metadata keys.");
}
| 3.26 |
flink_AbstractAutoCloseableRegistry_registerCloseable_rdh
|
/**
* Registers a {@link AutoCloseable} with the registry. In case the registry is already closed,
* this method throws an {@link IllegalStateException} and closes the passed {@link AutoCloseable}.
*
* @param closeable
* Closeable to register.
* @throws IOException
* exception when the registry was closed before.
*/
public final void registerCloseable(C closeable) throws IOException {
if (null ==
closeable) {
return;
}
synchronized(getSynchronizationLock()) {
if (!closed) {
doRegister(closeable, closeableToRef);
return;}
}
IOUtils.closeQuietly(closeable);
throw new IOException("Cannot register Closeable, registry is already closed. Closing argument.");
}
/**
* Removes a {@link Closeable}
| 3.26 |
flink_AbstractAutoCloseableRegistry_removeCloseableInternal_rdh
|
/**
* Removes a mapping from the registry map, respecting locking.
*/
protected final boolean removeCloseableInternal(R closeable) {
synchronized(getSynchronizationLock()) {
return closeableToRef.remove(closeable) != null;
}}
| 3.26 |
flink_MiniCluster_getHaLeadershipControl_rdh
|
/**
* Returns {@link HaLeadershipControl} if enabled.
*
* <p>{@link HaLeadershipControl} allows granting and revoking leadership of HA components, e.g.
* JobManager. The method return {@link Optional#empty()} if the control is not enabled in
* {@link MiniClusterConfiguration}.
*
* <p>Enabling this feature disables {@link HighAvailabilityOptions#HA_MODE} option.
*/
public Optional<HaLeadershipControl> getHaLeadershipControl() {
synchronized(lock) {
return haServices instanceof HaLeadershipControl ? Optional.of(((HaLeadershipControl) (haServices))) : Optional.empty();
}
}
| 3.26 |
flink_MiniCluster_createRemoteRpcService_rdh
|
/**
* Factory method to instantiate the remote RPC service.
*
* @param configuration
* Flink configuration.
* @param externalAddress
* The external address to access the RPC service.
* @param externalPortRange
* The external port range to access the RPC service.
* @param bindAddress
* The address to bind the RPC service to.
* @param rpcSystem
* @return The instantiated RPC service
*/
protected RpcService createRemoteRpcService(Configuration configuration, String externalAddress, String externalPortRange, String bindAddress, RpcSystem rpcSystem) throws Exception {
return rpcSystem.remoteServiceBuilder(configuration, externalAddress, externalPortRange).withBindAddress(bindAddress).withExecutorConfiguration(RpcUtils.getTestForkJoinExecutorConfiguration()).createAndStart();
}
| 3.26 |
flink_MiniCluster_runDetached_rdh
|
// ------------------------------------------------------------------------
// running jobs
// ------------------------------------------------------------------------
/**
* This method executes a job in detached mode. The method returns immediately after the job has
* been added to the
*
* @param job
* The Flink job to execute
* @throws JobExecutionException
* Thrown if anything went amiss during initial job launch, or if
* the job terminally failed.
*/
public void runDetached(JobGraph job) throws JobExecutionException, InterruptedException {checkNotNull(job, "job is null");
final CompletableFuture<JobSubmissionResult> submissionFuture = submitJob(job);
try {
submissionFuture.get();
} catch (ExecutionException e) {
throw new JobExecutionException(job.getJobID(), ExceptionUtils.stripExecutionException(e));
}
}
| 3.26 |
flink_MiniCluster_start_rdh
|
/**
* Starts the mini cluster, based on the configured properties.
*
* @throws Exception
* This method passes on any exception that occurs during the startup of the
* mini cluster.
*/
public void start() throws Exception {
synchronized(lock) {
checkState(!running, "MiniCluster is already running");
LOG.info("Starting Flink Mini Cluster");
LOG.debug("Using configuration {}", miniClusterConfiguration);
final Configuration configuration = miniClusterConfiguration.getConfiguration();
final boolean useSingleRpcService = miniClusterConfiguration.getRpcServiceSharing() == RpcServiceSharing.SHARED;
try {
workingDirectory = WorkingDirectory.create(ClusterEntrypointUtils.generateWorkingDirectoryFile(configuration, Optional.of(PROCESS_WORKING_DIR_BASE), "minicluster_" + ResourceID.generate()));initializeIOFormatClasses(configuration);
rpcSystem = rpcSystemSupplier.get();
LOG.info("Starting Metrics Registry");metricRegistry = m2(configuration, rpcSystem.deref().getMaximumMessageSizeInBytes(configuration));
// bring up all the RPC services
LOG.info("Starting RPC Service(s)");
final RpcServiceFactory dispatcherResourceManagerComponentRpcServiceFactory;
final RpcService metricQueryServiceRpcService;
if (useSingleRpcService) {
// we always need the 'commonRpcService' for auxiliary calls
commonRpcService = createLocalRpcService(configuration, rpcSystem.deref());
final CommonRpcServiceFactory commonRpcServiceFactory = new CommonRpcServiceFactory(commonRpcService);
taskManagerRpcServiceFactory = commonRpcServiceFactory;
dispatcherResourceManagerComponentRpcServiceFactory = commonRpcServiceFactory;
metricQueryServiceRpcService = MetricUtils.startLocalMetricsRpcService(configuration, rpcSystem.deref());
} else {
// start a new service per component, possibly with custom bind addresses
final String jobManagerExternalAddress = miniClusterConfiguration.getJobManagerExternalAddress();
final String taskManagerExternalAddress = miniClusterConfiguration.getTaskManagerExternalAddress();
final String jobManagerExternalPortRange = miniClusterConfiguration.getJobManagerExternalPortRange();
final String taskManagerExternalPortRange = miniClusterConfiguration.getTaskManagerExternalPortRange();
final String jobManagerBindAddress = miniClusterConfiguration.getJobManagerBindAddress();
final String taskManagerBindAddress =
miniClusterConfiguration.getTaskManagerBindAddress();
dispatcherResourceManagerComponentRpcServiceFactory = new DedicatedRpcServiceFactory(configuration, jobManagerExternalAddress, jobManagerExternalPortRange, jobManagerBindAddress, rpcSystem.deref());
taskManagerRpcServiceFactory = new DedicatedRpcServiceFactory(configuration, taskManagerExternalAddress, taskManagerExternalPortRange, taskManagerBindAddress, rpcSystem.deref());
// we always need the 'commonRpcService' for auxiliary calls
// bind to the JobManager address with port 0
commonRpcService = createRemoteRpcService(configuration, jobManagerBindAddress, 0, rpcSystem.deref());
metricQueryServiceRpcService = MetricUtils.startRemoteMetricsRpcService(configuration, commonRpcService.getAddress(), null, rpcSystem.deref());
}
metricRegistry.startQueryService(metricQueryServiceRpcService, null);
processMetricGroup = MetricUtils.instantiateProcessMetricGroup(metricRegistry, RpcUtils.getHostname(commonRpcService), ConfigurationUtils.getSystemResourceMetricsProbingInterval(configuration));
ioExecutor = Executors.newFixedThreadPool(ClusterEntrypointUtils.getPoolSize(configuration), new ExecutorThreadFactory("mini-cluster-io"));
delegationTokenManager = DefaultDelegationTokenManagerFactory.create(configuration, miniClusterConfiguration.getPluginManager(), commonRpcService.getScheduledExecutor(), ioExecutor);
// Obtaining delegation tokens and propagating them to the local JVM receivers in a
// one-time fashion is required because BlobServer may connect to external file
// systems
delegationTokenManager.obtainDelegationTokens();
delegationTokenReceiverRepository = new DelegationTokenReceiverRepository(configuration, miniClusterConfiguration.getPluginManager());
haServicesFactory = createHighAvailabilityServicesFactory(configuration);
haServices = createHighAvailabilityServices(configuration, ioExecutor);
blobServer = BlobUtils.createBlobServer(configuration,
Reference.borrowed(workingDirectory.getBlobStorageDirectory()), haServices.createBlobStore());
blobServer.start();
heartbeatServices = HeartbeatServices.fromConfiguration(configuration);
blobCacheService = BlobUtils.createBlobCacheService(configuration, Reference.borrowed(workingDirectory.getBlobStorageDirectory()), haServices.createBlobStore(), new InetSocketAddress(InetAddress.getLocalHost(),
blobServer.getPort()));
startTaskManagers();
MetricQueryServiceRetriever metricQueryServiceRetriever = new RpcMetricQueryServiceRetriever(metricRegistry.getMetricQueryServiceRpcService());
setupDispatcherResourceManagerComponents(configuration, dispatcherResourceManagerComponentRpcServiceFactory, metricQueryServiceRetriever);
resourceManagerLeaderRetriever = haServices.getResourceManagerLeaderRetriever();
dispatcherLeaderRetriever = haServices.getDispatcherLeaderRetriever();
clusterRestEndpointLeaderRetrievalService = haServices.getClusterRestEndpointLeaderRetriever();
dispatcherGatewayRetriever = new RpcGatewayRetriever<>(commonRpcService, DispatcherGateway.class, DispatcherId::fromUuid, new ExponentialBackoffRetryStrategy(21, Duration.ofMillis(5L), Duration.ofMillis(20L)));
resourceManagerGatewayRetriever = new RpcGatewayRetriever<>(commonRpcService, ResourceManagerGateway.class, ResourceManagerId::fromUuid, new ExponentialBackoffRetryStrategy(21, Duration.ofMillis(5L), Duration.ofMillis(20L)));
webMonitorLeaderRetriever
= new LeaderRetriever();
resourceManagerLeaderRetriever.start(resourceManagerGatewayRetriever);
dispatcherLeaderRetriever.start(dispatcherGatewayRetriever);
clusterRestEndpointLeaderRetrievalService.start(webMonitorLeaderRetriever);
} catch (Exception e) {
// cleanup everything
try {
close();
} catch (Exception ee) {
e.addSuppressed(ee);
}
throw e;
}
// create a new termination future
terminationFuture = new CompletableFuture<>();
// now officially mark this as running
running = true;LOG.info("Flink Mini Cluster started successfully");
}
}
| 3.26 |
flink_MiniCluster_create_rdh
|
/**
* Create a new {@link TerminatingFatalErrorHandler} for the {@link TaskExecutor} with the
* given index.
*
* @param index
* into the {@link #taskManagers} collection to identify the correct {@link TaskExecutor}.
* @return {@link TerminatingFatalErrorHandler} for the given index
*/
@GuardedBy("lock")
private TerminatingFatalErrorHandler create(int index) {
return new TerminatingFatalErrorHandler(index);
}
| 3.26 |
flink_MiniCluster_getArchivedExecutionGraph_rdh
|
// ------------------------------------------------------------------------
// Accessing jobs
// ------------------------------------------------------------------------
public CompletableFuture<ArchivedExecutionGraph> getArchivedExecutionGraph(JobID jobId) {
return runDispatcherCommand(dispatcherGateway -> dispatcherGateway.requestExecutionGraphInfo(jobId, rpcTimeout).thenApply(ExecutionGraphInfo::getArchivedExecutionGraph));
}
| 3.26 |
flink_MiniCluster_m2_rdh
|
// ------------------------------------------------------------------------
/**
* Factory method to create the metric registry for the mini cluster.
*
* @param config
* The configuration of the mini cluster
* @param maximumMessageSizeInBytes
* the maximum message size
*/
protected MetricRegistryImpl m2(Configuration config, long maximumMessageSizeInBytes) {
return new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(config, maximumMessageSizeInBytes), ReporterSetup.fromConfiguration(config, miniClusterConfiguration.getPluginManager()));
}
| 3.26 |
flink_MiniCluster_initializeIOFormatClasses_rdh
|
// ------------------------------------------------------------------------
// miscellaneous utilities
// ------------------------------------------------------------------------
private void initializeIOFormatClasses(Configuration configuration) {
// TODO: That we still have to call something like this is a crime against humanity
FileOutputFormat.initDefaultsFromConfiguration(configuration);
}
| 3.26 |
flink_MiniCluster_closeAsync_rdh
|
/**
* Shuts down the mini cluster, failing all currently executing jobs. The mini cluster can be
* started again by calling the {@link #start()} method again.
*
* <p>This method shuts down all started services and components, even if an exception occurs in
* the process of shutting down some component.
*
* @return Future which is completed once the MiniCluster has been completely shut down
*/
@Override
public CompletableFuture<Void> closeAsync() {
return closeInternal(true);
}
| 3.26 |
flink_MiniCluster_startTaskManager_rdh
|
/**
* Starts additional TaskManager process.
*
* <p>When the MiniCluster starts up, it always starts {@link MiniClusterConfiguration#getNumTaskManagers} TaskManagers. All TaskManagers are indexed from
* 0 to the number of TaskManagers, started so far, minus one. This method starts a TaskManager
* with the next index which is the number of TaskManagers, started so far. The index always
* increases with each new started TaskManager. The indices of terminated TaskManagers are not
* reused after {@link #terminateTaskManager(int)}.
*/
public void startTaskManager() throws
Exception {
synchronized(lock) {
final Configuration v28 = miniClusterConfiguration.getConfiguration();
final TaskExecutor taskExecutor = TaskManagerRunner.startTaskManager(v28, new ResourceID(UUID.randomUUID().toString()), taskManagerRpcServiceFactory.createRpcService(), haServices, heartbeatServices, metricRegistry, blobCacheService, useLocalCommunication(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, workingDirectory.createSubWorkingDirectory("tm_" + taskManagers.size()), taskManagerTerminatingFatalErrorHandlerFactory.create(taskManagers.size()), delegationTokenReceiverRepository);
taskExecutor.start();
taskManagers.add(taskExecutor);
}
}
| 3.26 |
flink_MiniCluster_executeJobBlocking_rdh
|
/**
* This method runs a job in blocking mode. The method returns only after the job completed
* successfully, or after it failed terminally.
*
* @param job
* The Flink job to execute
* @return The result of the job execution
* @throws JobExecutionException
* Thrown if anything went amiss during initial job launch, or if
* the job terminally failed.
*/
public JobExecutionResult executeJobBlocking(JobGraph job) throws JobExecutionException, InterruptedException {
checkNotNull(job, "job is null");
final CompletableFuture<JobSubmissionResult> submissionFuture = submitJob(job);
final CompletableFuture<JobResult> jobResultFuture = submissionFuture.thenCompose((JobSubmissionResult ignored) -> requestJobResult(job.getJobID()));
final JobResult jobResult;
try {
jobResult = jobResultFuture.get();
} catch (ExecutionException e) {
throw new JobExecutionException(job.getJobID(),
"Could not retrieve JobResult.", ExceptionUtils.stripExecutionException(e));
}
try {return jobResult.toJobExecutionResult(Thread.currentThread().getContextClassLoader());
} catch
(IOException | ClassNotFoundException e) {
throw new JobExecutionException(job.getJobID(), e);
}
}
| 3.26 |
flink_MiniCluster_terminateTaskManager_rdh
|
/**
* Terminates a TaskManager with the given index.
*
* <p>See {@link #startTaskManager()} to understand how TaskManagers are indexed. This method
* terminates a TaskManager with a given index but it does not clear the index. The index stays
* occupied for the lifetime of the MiniCluster and its TaskManager stays terminated. The index
* is not reused if more TaskManagers are started with {@link #startTaskManager()}.
*
* @param index
* index of the TaskManager to terminate
* @return {@link CompletableFuture} of the given TaskManager termination
*/
public CompletableFuture<Void> terminateTaskManager(int index) {
synchronized(lock) {
final TaskExecutor taskExecutor = taskManagers.get(index);
return taskExecutor.closeAsync();
}
}
| 3.26 |
flink_MiniCluster_createLocalRpcService_rdh
|
/**
* Factory method to instantiate the local RPC service.
*
* @param configuration
* Flink configuration.
* @param rpcSystem
* @return The instantiated RPC service
*/
protected RpcService createLocalRpcService(Configuration configuration, RpcSystem rpcSystem) throws Exception {
return rpcSystem.localServiceBuilder(configuration).withExecutorConfiguration(RpcUtils.getTestForkJoinExecutorConfiguration()).createAndStart();
}
| 3.26 |
flink_MiniCluster_checkRestoreModeForChangelogStateBackend_rdh
|
// HACK: temporary hack to make the randomized changelog state backend tests work with forced
// full snapshots. This option should be removed once changelog state backend supports forced
// full snapshots
private void checkRestoreModeForChangelogStateBackend(JobGraph jobGraph) {
final SavepointRestoreSettings savepointRestoreSettings = jobGraph.getSavepointRestoreSettings();
if (overrideRestoreModeForChangelogStateBackend && (savepointRestoreSettings.getRestoreMode() == RestoreMode.NO_CLAIM)) {
final Configuration conf = new Configuration();
SavepointRestoreSettings.toConfiguration(savepointRestoreSettings, conf);
conf.set(SavepointConfigOptions.RESTORE_MODE, RestoreMode.LEGACY);
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.fromConfiguration(conf));
}
}
| 3.26 |
flink_MiniCluster_shutDownResourceManagerComponents_rdh
|
// ------------------------------------------------------------------------
// Internal methods
// ------------------------------------------------------------------------
@GuardedBy("lock")
private CompletableFuture<Void> shutDownResourceManagerComponents() {
final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(dispatcherResourceManagerComponents.size());
for (DispatcherResourceManagerComponent dispatcherResourceManagerComponent : dispatcherResourceManagerComponents) {
terminationFutures.add(dispatcherResourceManagerComponent.closeAsync());
}
final FutureUtils.ConjunctFuture<Void> v46 = FutureUtils.completeAll(terminationFutures);
return FutureUtils.runAfterwards(v46, () -> {
Exception exception = null;synchronized(lock) {
if (resourceManagerLeaderRetriever != null) {try {
resourceManagerLeaderRetriever.stop();
} catch (Exception
e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
resourceManagerLeaderRetriever = null;}
if (dispatcherLeaderRetriever != null) {
try {
dispatcherLeaderRetriever.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
dispatcherLeaderRetriever = null;
}
if (clusterRestEndpointLeaderRetrievalService != null) {
try {
clusterRestEndpointLeaderRetrievalService.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
clusterRestEndpointLeaderRetrievalService = null;
}
}
if (exception != null)
{throw exception;
}
});
}
| 3.26 |
flink_MiniCluster_isRunning_rdh
|
// ------------------------------------------------------------------------
// life cycle
// ------------------------------------------------------------------------
/**
* Checks if the mini cluster was started and is running.
*/
public boolean isRunning() {
return running;
}
| 3.26 |
flink_TieredStorageConfiguration_getEachTierExclusiveBufferNum_rdh
|
/**
* Get exclusive buffer number of each tier.
*
* @return buffer number of each tier.
*/
public List<Integer> getEachTierExclusiveBufferNum() {
return tierExclusiveBuffers;
}
| 3.26 |
flink_TieredStorageConfiguration_getRemoteStorageBasePath_rdh
|
/**
* Get the base path on remote storage.
*
* @return string if the remote storage path is configured otherwise null.
*/
public String getRemoteStorageBasePath() {
return remoteStorageBasePath;
}
| 3.26 |
flink_TieredStorageConfiguration_getAccumulatorExclusiveBuffers_rdh
|
/**
* Get exclusive buffer number of accumulator.
*
* <p>The buffer number is used to compare with the subpartition number to determine the type of
* {@link BufferAccumulator}.
*
* <p>If the exclusive buffer number is larger than (subpartitionNum + 1), the accumulator will
* use {@link HashBufferAccumulator}. If the exclusive buffer number is equal to or smaller than
* (subpartitionNum + 1), the accumulator will use {@link SortBufferAccumulator}
*
* @return the buffer number.
*/
public int getAccumulatorExclusiveBuffers() {
return accumulatorExclusiveBuffers;
}
| 3.26 |
flink_TieredStorageConfiguration_getMemoryTierNumBytesPerSegment_rdh
|
/**
* Get the segment size of memory tier.
*
* @return segment size.
*/
public int getMemoryTierNumBytesPerSegment()
{
return memoryTierNumBytesPerSegment;
}
| 3.26 |
flink_TieredStorageConfiguration_getTotalExclusiveBufferNum_rdh
|
/**
* Get the total exclusive buffer number.
*
* @return the total exclusive buffer number.
*/
public int getTotalExclusiveBufferNum() {
return ((accumulatorExclusiveBuffers + memoryTierExclusiveBuffers) + diskTierExclusiveBuffers) + (remoteStorageBasePath == null ? 0 : remoteTierExclusiveBuffers);
}
| 3.26 |
flink_TieredStorageConfiguration_m0_rdh
|
/**
* Maximum time to wait when requesting read buffers from the buffer pool before throwing an
* exception in {@link DiskIOScheduler}.
*
* @return timeout duration.
*/
public Duration m0() {
return diskIOSchedulerRequestTimeout;
}
| 3.26 |
flink_TieredStorageConfiguration_getMemoryTierExclusiveBuffers_rdh
|
/**
* Get exclusive buffer number of memory tier.
*
* @return the buffer number.
*/
public int getMemoryTierExclusiveBuffers() { return
memoryTierExclusiveBuffers;
}
| 3.26 |
flink_TieredStorageConfiguration_getMinReserveDiskSpaceFraction_rdh
|
/**
* Minimum reserved disk space fraction in disk tier.
*
* @return the fraction.
*/
public float getMinReserveDiskSpaceFraction() {
return minReserveDiskSpaceFraction;
}
| 3.26 |
flink_AbstractFsCheckpointStorageAccess_hasDefaultSavepointLocation_rdh
|
// ------------------------------------------------------------------------
// CheckpointStorage implementation
// ------------------------------------------------------------------------
@Override
public boolean hasDefaultSavepointLocation() {
return defaultSavepointDirectory != null;
}
| 3.26 |
flink_AbstractFsCheckpointStorageAccess_encodePathAsReference_rdh
|
// Encoding / Decoding of References
// ------------------------------------------------------------------------
/**
* Encodes the given path as a reference in bytes. The path is encoded as a UTF-8 string and
* prepended as a magic number.
*
* @param path
* The path to encode.
* @return The location reference.
*/
public static CheckpointStorageLocationReference encodePathAsReference(Path path) {
byte[] refBytes = path.toString().getBytes(StandardCharsets.UTF_8);
byte[] bytes = new
byte[REFERENCE_MAGIC_NUMBER.length + refBytes.length];
System.arraycopy(REFERENCE_MAGIC_NUMBER, 0, bytes, 0, REFERENCE_MAGIC_NUMBER.length);
System.arraycopy(refBytes, 0, bytes, REFERENCE_MAGIC_NUMBER.length, refBytes.length);
return new CheckpointStorageLocationReference(bytes);
}
| 3.26 |
flink_AbstractFsCheckpointStorageAccess_decodePathFromReference_rdh
|
/**
* Decodes the given reference into a path. This method validates that the reference bytes start
* with the correct magic number (as written by {@link #encodePathAsReference(Path)}) and
* converts the remaining bytes back to a proper path.
*
* @param reference
* The bytes representing the reference.
* @return The path decoded from the reference.
* @throws IllegalArgumentException
* Thrown, if the bytes do not represent a proper reference.
*/
public static Path decodePathFromReference(CheckpointStorageLocationReference reference) {
if (reference.isDefaultReference()) {
throw new IllegalArgumentException("Cannot decode default reference");
}
final byte[] bytes = reference.getReferenceBytes();
final int headerLen = REFERENCE_MAGIC_NUMBER.length;
if (bytes.length > headerLen) {
// compare magic number
for (int i = 0; i < headerLen; i++) {
if (bytes[i] != REFERENCE_MAGIC_NUMBER[i]) {
throw new IllegalArgumentException("Reference starts with the wrong magic number");
}
}
// covert to string and path
try {
return new Path(new String(bytes, headerLen, bytes.length - headerLen, StandardCharsets.UTF_8));
} catch (Exception e) {
throw new IllegalArgumentException("Reference cannot be decoded to a path", e);
}
} else {
throw new IllegalArgumentException("Reference too short.");
}
}
| 3.26 |
flink_AbstractFsCheckpointStorageAccess_createCheckpointDirectory_rdh
|
/**
* Creates the directory path for the data exclusive to a specific checkpoint.
*
* @param baseDirectory
* The base directory into which the job checkpoints.
* @param checkpointId
* The ID (logical timestamp) of the checkpoint.
*/
protected static Path createCheckpointDirectory(Path baseDirectory, long checkpointId) {
return new Path(baseDirectory, CHECKPOINT_DIR_PREFIX + checkpointId);
}
| 3.26 |
flink_AbstractFsCheckpointStorageAccess_resolveCheckpointPointer_rdh
|
/**
* Takes the given string (representing a pointer to a checkpoint) and resolves it to a file
* status for the checkpoint's metadata file.
*
* @param checkpointPointer
* The pointer to resolve.
* @return A state handle to checkpoint/savepoint's metadata.
* @throws IOException
* Thrown, if the pointer cannot be resolved, the file system not accessed,
* or the pointer points to a location that does not seem to be a checkpoint/savepoint.
*/
@Internal
public static FsCompletedCheckpointStorageLocation resolveCheckpointPointer(String checkpointPointer) throws IOException {
checkNotNull(checkpointPointer, "checkpointPointer");
checkArgument(!checkpointPointer.isEmpty(), "empty checkpoint pointer");
// check if the pointer is in fact a valid file path
final Path path;
try {
path
= new Path(checkpointPointer);
} catch (Exception e) {
throw new IOException((("Checkpoint/savepoint path '" + checkpointPointer) + "' is not a valid file URI. ") + "Either the pointer path is invalid, or the checkpoint was created by a different state backend.");}
// check if the file system can be accessed
final FileSystem v8;
try {
v8 = path.getFileSystem();
} catch (IOException e) {
throw new IOException(("Cannot access file system for checkpoint/savepoint path '" + checkpointPointer) + "'.", e);
}
final FileStatus status;
try {
status = v8.getFileStatus(path);
} catch (FileNotFoundException e) {
throw new FileNotFoundException((((("Cannot find checkpoint or savepoint " + "file/directory '") + checkpointPointer) + "' on file system '") + v8.getUri().getScheme()) + "'.");
}
// if we are here, the file / directory exists
final Path checkpointDir;
final FileStatus metadataFileStatus;
// If this is a directory, we need to find the meta data file
if
(status.isDir()) {
checkpointDir = status.getPath();
final Path metadataFilePath = new Path(path, METADATA_FILE_NAME);
try {
metadataFileStatus = v8.getFileStatus(metadataFilePath);
} catch (FileNotFoundException e) {
throw new FileNotFoundException((((("Cannot find meta data file '" + METADATA_FILE_NAME)
+ "' in directory '") + path) + "'. Please try to load the checkpoint/savepoint ") + "directly from the metadata file instead of the directory.");
}
} else {
// this points to a file and we either do no name validation, or
// the name is actually correct, so we can return the path
metadataFileStatus = status;
checkpointDir = status.getPath().getParent();
}
final FileStateHandle metaDataFileHandle = new FileStateHandle(metadataFileStatus.getPath(), metadataFileStatus.getLen());
final String pointer = checkpointDir.makeQualified(v8).toString();
return new FsCompletedCheckpointStorageLocation(v8, checkpointDir, metaDataFileHandle, pointer);
}
| 3.26 |
flink_AbstractFsCheckpointStorageAccess_getDefaultSavepointDirectory_rdh
|
/**
* Gets the default directory for savepoints. Returns null, if no default savepoint directory is
* configured.
*/
@Nullable
public Path getDefaultSavepointDirectory() {
return defaultSavepointDirectory;
}
| 3.26 |
flink_FileRegionBuffer_readInto_rdh
|
// ------------------------------------------------------------------------
// Utils
// ------------------------------------------------------------------------
public Buffer readInto(MemorySegment segment) throws IOException {
final ByteBuffer buffer = segment.wrap(0, bufferSize());
BufferReaderWriterUtil.readByteBufferFully(f0, buffer, position());
return new NetworkBuffer(segment, DummyBufferRecycler.INSTANCE, dataType, isCompressed, bufferSize());
}
| 3.26 |
flink_FileRegionBuffer_getNioBufferReadable_rdh
|
/**
* This method is only called by tests and by event-deserialization, like checkpoint barriers.
* Because such events are not used for bounded intermediate results, this method currently
* executes only in tests.
*/
@Override
public ByteBuffer getNioBufferReadable() {
try {
final ByteBuffer buffer = ByteBuffer.allocateDirect(bufferSize());
BufferReaderWriterUtil.readByteBufferFully(f0, buffer, position());
buffer.flip();
return buffer;
} catch
(IOException e) {
// this is not very pretty, but given that this code runs only in tests
// the exception wrapping here is simpler than updating the method signature
// to declare IOExceptions, as would be necessary for a proper "lazy buffer".
throw new FlinkRuntimeException(e.getMessage(), e);
}
}
| 3.26 |
flink_FileRegionBuffer_isBuffer_rdh
|
// ------------------------------------------------------------------------
// Buffer override methods
// ------------------------------------------------------------------------
@Override
public boolean isBuffer() {
return dataType.isBuffer();
}
| 3.26 |
flink_IterableUtils_flatMap_rdh
|
/**
* Flatmap the two-dimensional {@link Iterable} into an one-dimensional {@link Iterable} and
* convert the keys into items.
*
* @param itemGroups
* to flatmap
* @param mapper
* convert the {@link K} into {@link V}
* @param <K>
* type of key in the two-dimensional iterable
* @param <V>
* type of items that are mapped to
* @param <G>
* iterable of {@link K}
* @return flattened one-dimensional {@link Iterable} from the given two-dimensional {@link Iterable}
*/
@Internal
public static <K, V, G extends Iterable<K>> Iterable<V> flatMap(Iterable<G>
itemGroups, Function<K, V> mapper) {
return () -> new Iterator<V>() {
private final Iterator<G> groupIterator
= itemGroups.iterator();
private Iterator<K> itemIterator;
@Override
public boolean hasNext() {
while ((itemIterator == null) || (!itemIterator.hasNext())) {
if (!groupIterator.hasNext()) {
return false;
} else {
itemIterator = groupIterator.next().iterator();
}
}
return true;
}
@Override
public V next() {
if (hasNext()) {
return mapper.apply(itemIterator.next());
} else {
throw new NoSuchElementException();
}
}
};
}
| 3.26 |
flink_ResultPartitionType_canBePipelinedConsumed_rdh
|
/**
* return if this partition's upstream and downstream support scheduling in the same time.
*/
public boolean canBePipelinedConsumed() {
return (f1 == ConsumingConstraint.CAN_BE_PIPELINED)
|| (f1 == ConsumingConstraint.MUST_BE_PIPELINED);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.