name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SingleOutputStreamOperator_returns_rdh
|
/**
* Adds a type information hint about the return type of this operator. This method can be used
* in cases where Flink cannot determine automatically what the produced type of a function is.
* That can be the case if the function uses generic type variables in the return type that
* cannot be inferred from the input type.
*
* <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are
* preferable.
*
* @param typeInfo
* type information as a return type hint
* @return This operator with a given return type hint.
*/
public SingleOutputStreamOperator<T> returns(TypeInformation<T> typeInfo) {
requireNonNull(typeInfo, "TypeInformation must not be null");
transformation.setOutputType(typeInfo);
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_setMaxParallelism_rdh
|
/**
* Sets the maximum parallelism of this operator.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
* number of key groups used for partitioned state.
*
* @param maxParallelism
* Maximum parallelism
* @return The operator with set maximum parallelism
*/
@PublicEvolving
public SingleOutputStreamOperator<T> setMaxParallelism(int maxParallelism) {
OperatorValidationUtils.validateMaxParallelism(maxParallelism, canBeParallel()); transformation.setMaxParallelism(maxParallelism);
return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_uid_rdh
|
/**
* Sets an ID for this operator.
*
* <p>The specified ID is used to assign the same operator ID across job submissions (for
* example when starting a job from a savepoint).
*
* <p><strong>Important</strong>: this ID needs to be unique per transformation and job.
* Otherwise, job submission will fail.
*
* @param uid
* The unique user-specified ID of this transformation.
* @return The operator with the specified ID.
*/
@PublicEvolving
public SingleOutputStreamOperator<T> uid(String uid) {
transformation.setUid(uid);return this;
}
| 3.26 |
flink_SingleOutputStreamOperator_setResources_rdh
|
/**
* Sets the resources for this operator, the minimum and preferred resources are the same by
* default.
*
* @param resources
* The resources for this operator.
* @return The operator with set minimum and preferred resources.
*/
private SingleOutputStreamOperator<T> setResources(ResourceSpec resources) {
transformation.setResources(resources, resources);
return this;
}
| 3.26 |
flink_TypeMappingUtils_checkPhysicalLogicalTypeCompatible_rdh
|
/**
* Checks whether the given physical field type and logical field type are compatible at the
* edges of the table ecosystem. Types are still compatible if the physical type is a legacy
* decimal type (converted from Types#BIG_DEC) and the logical type is DECIMAL(38, 18). This is
* to support legacy TypeInformation for {@link TableSource} and {@link org.apache.flink.table.sinks.TableSink}.
*
* @param physicalFieldType
* physical field type
* @param logicalFieldType
* logical field type
* @param physicalFieldName
* physical field name
* @param logicalFieldName
* logical field name
* @param isSource
* whether it is a source or sink, used for logging.
*/
public static void checkPhysicalLogicalTypeCompatible(LogicalType physicalFieldType, LogicalType logicalFieldType, String physicalFieldName, String logicalFieldName, boolean isSource) {
if (isSource) {checkIfCompatible(physicalFieldType, logicalFieldType, cause -> new ValidationException(String.format("Type %s of table field '%s' does not match with " + "the physical type %s of the '%s' field of the TableSource return type.", logicalFieldType, logicalFieldName, physicalFieldType, physicalFieldName), cause));
} else {
checkIfCompatible(logicalFieldType, physicalFieldType, cause -> new ValidationException(String.format("Type %s of table field '%s' does not match with " + "the physical type %s of the '%s' field of the TableSink consumed type.", logicalFieldType, logicalFieldName, physicalFieldType, physicalFieldName), cause));
}
}
| 3.26 |
flink_TypeMappingUtils_computePhysicalIndicesOrTimeAttributeMarkers_rdh
|
/**
* Computes indices of physical fields corresponding to the selected logical fields of a {@link TableSchema}.
*
* <p>It puts markers (idx < 0) for time attributes extracted from {@link DefinedProctimeAttribute} and {@link DefinedRowtimeAttributes}
*
* <p>{@link TypeMappingUtils#computePhysicalIndices(List, DataType, Function)} should be
* preferred. The time attribute markers should not be used anymore.
*
* @param tableSource
* Used to extract {@link DefinedRowtimeAttributes}, {@link DefinedProctimeAttribute} and {@link TableSource#getProducedDataType()}.
* @param logicalColumns
* Logical columns that describe the physical type.
* @param streamMarkers
* If true puts stream markers otherwise puts batch markers.
* @param nameRemapping
* Additional remapping of a logical to a physical field name.
* TimestampExtractor works with logical names, but accesses physical fields
* @return Physical indices of logical fields selected with {@code projectedLogicalFields} mask.
*/
public static int[] computePhysicalIndicesOrTimeAttributeMarkers(TableSource<?> tableSource, List<TableColumn> logicalColumns, boolean streamMarkers, Function<String, String> nameRemapping) {
Optional<String> proctimeAttribute = getProctimeAttribute(tableSource);
List<String> rowtimeAttributes = getRowtimeAttributes(tableSource);
List<TableColumn> columnsWithoutTimeAttributes =
logicalColumns.stream().filter(col -> (!rowtimeAttributes.contains(col.getName())) && proctimeAttribute.map(attr -> !attr.equals(col.getName())).orElse(true)).collect(Collectors.toList());
Map<TableColumn, Integer> columnsToPhysicalIndices = TypeMappingUtils.computePhysicalIndices(columnsWithoutTimeAttributes.stream(), tableSource.getProducedDataType(), nameRemapping);
return logicalColumns.stream().mapToInt(logicalColumn -> {
if (proctimeAttribute.map(attr -> attr.equals(logicalColumn.getName())).orElse(false)) {
verifyTimeAttributeType(logicalColumn, "Proctime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER;
}
} else if (rowtimeAttributes.contains(logicalColumn.getName())) {
verifyTimeAttributeType(logicalColumn, "Rowtime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER;
}
} else {
return columnsToPhysicalIndices.get(logicalColumn);
}
}).toArray();
}
| 3.26 |
flink_TypeMappingUtils_getRowtimeAttributes_rdh
|
/**
* Returns a list with all rowtime attribute names of the [[TableSource]].
*/
private static List<String> getRowtimeAttributes(TableSource<?> tableSource) {if (tableSource instanceof DefinedRowtimeAttributes) {
return ((DefinedRowtimeAttributes) (tableSource)).getRowtimeAttributeDescriptors().stream().map(RowtimeAttributeDescriptor::getAttributeName).collect(Collectors.toList());
} else {
return Collections.emptyList();
}
}
| 3.26 |
flink_TypeMappingUtils_computePhysicalIndices_rdh
|
/**
* Computes indices of physical fields corresponding to the selected logical fields of a {@link TableSchema}.
*
* @param logicalColumns
* Logical columns that describe the physical type.
* @param physicalType
* Physical type to retrieve indices from.
* @param nameRemapping
* Additional remapping of a logical to a physical field name.
* TimestampExtractor works with logical names, but accesses physical fields
* @return Physical indices of logical fields selected with {@code projectedLogicalFields} mask.
*/
public static int[] computePhysicalIndices(List<TableColumn> logicalColumns, DataType physicalType, Function<String, String> nameRemapping) {
Map<TableColumn, Integer> physicalIndexLookup = computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping);
return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray();
}
| 3.26 |
flink_TypeMappingUtils_getProctimeAttribute_rdh
|
/**
* Returns the proctime attribute of the [[TableSource]] if it is defined.
*/
private static Optional<String> getProctimeAttribute(TableSource<?> tableSource) {
if (tableSource instanceof DefinedProctimeAttribute) {
return Optional.ofNullable(((DefinedProctimeAttribute) (tableSource)).getProctimeAttribute());
}
else {
return Optional.empty();
}
}
| 3.26 |
flink_HistoryServerStaticFileServerHandler_respondWithFile_rdh
|
/**
* Response when running with leading JobManager.
*/
private void respondWithFile(ChannelHandlerContext ctx, HttpRequest request, String requestPath) throws IOException, ParseException, RestHandlerException {
// make sure we request the "index.html" in case there is a directory request
if (requestPath.endsWith("/")) {
requestPath = requestPath + "index.html";
}
if (!requestPath.contains(".")) {
// we assume that the path ends in either .html or .js
requestPath = requestPath + ".json";
}
// convert to absolute path
final File file
= new File(rootPath, requestPath);
if (!file.exists()) {
// file does not exist. Try to load it with the classloader
ClassLoader cl = HistoryServerStaticFileServerHandler.class.getClassLoader();
try (InputStream resourceStream = cl.getResourceAsStream("web" + requestPath)) {
boolean v4 = false;
try {
if (resourceStream != null) {
URL root = cl.getResource("web");
URL v6 = cl.getResource("web" + requestPath);
if ((root != null) && (v6 != null)) {
URI v7 = new URI(root.getPath()).normalize();
URI requestedURI = new URI(v6.getPath()).normalize();
// Check that we don't load anything from outside of the
// expected scope.
if (!v7.relativize(requestedURI).equals(requestedURI)) {
LOG.debug("Loading missing file from classloader: {}", requestPath);
// ensure that directory to file exists.
file.getParentFile().mkdirs();
Files.copy(resourceStream, file.toPath());
v4 = true;
}
}
}
} catch (Throwable t) {
LOG.error("error while responding", t);
} finally {
if (!v4) {
LOG.debug("Unable to load requested file {} from classloader", requestPath);
throw new NotFoundException("File not found.");
}
}
}
}
StaticFileServerHandler.checkFileValidity(file, rootPath, LOG);
// cache validation
final String ifModifiedSince = request.headers().get(IF_MODIFIED_SINCE);
if ((ifModifiedSince != null) && (!ifModifiedSince.isEmpty())) {
SimpleDateFormat dateFormatter = new SimpleDateFormat(StaticFileServerHandler.HTTP_DATE_FORMAT, Locale.US);
Date ifModifiedSinceDate = dateFormatter.parse(ifModifiedSince);
// Only compare up to the second because the datetime format we send to the client
// does not have milliseconds
long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() /
1000;
long fileLastModifiedSeconds = file.lastModified() / 1000;
if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) {
if (LOG.isDebugEnabled()) {
LOG.debug(("Responding 'NOT MODIFIED' for file '" + file.getAbsolutePath()) + '\'');
}
StaticFileServerHandler.sendNotModified(ctx);
return;
}}
if (LOG.isDebugEnabled()) {
LOG.debug(("Responding with file '" + file.getAbsolutePath()) + '\'');
}
// Don't need to close this manually. Netty's DefaultFileRegion will take care of it.
final RandomAccessFile raf;
try {
raf = new RandomAccessFile(file, "r");
} catch (FileNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Could not find file {}.", file.getAbsolutePath());
}
HandlerUtils.sendErrorResponse(ctx, request, new ErrorResponseBody("File not found."), NOT_FOUND, Collections.emptyMap());
return;
}
try {
long fileLength = raf.length();
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
StaticFileServerHandler.setContentTypeHeader(response, file);
// the job overview should be updated as soon as possible
if (!requestPath.equals("/joboverview.json")) {
StaticFileServerHandler.setDateAndCacheHeaders(response, file);
}
if (HttpUtil.isKeepAlive(request)) {
response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
HttpUtil.setContentLength(response, fileLength);
// write the initial line and the header.
ctx.write(response);
// write the content.
ChannelFuture lastContentFuture;
if (ctx.pipeline().get(SslHandler.class) == null) {
ctx.write(new DefaultFileRegion(raf.getChannel(), 0, fileLength), ctx.newProgressivePromise());
lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)), ctx.newProgressivePromise());
// HttpChunkedInput will write the end marker (LastHttpContent) for us.
}
// close the connection, if no keep-alive is needed
if (!HttpUtil.isKeepAlive(request)) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
} catch (Exception
e)
{
raf.close();
LOG.error("Failed to serve file.", e);
throw new RestHandlerException("Internal server error.",
INTERNAL_SERVER_ERROR);
}
}
| 3.26 |
flink_HistoryServerStaticFileServerHandler_channelRead0_rdh
|
// ------------------------------------------------------------------------
// Responses to requests
// ------------------------------------------------------------------------
@Override
public void channelRead0(ChannelHandlerContext ctx, RoutedRequest routedRequest) throws Exception {
String requestPath = routedRequest.getPath();
try {
respondWithFile(ctx, routedRequest.getRequest(), requestPath);
} catch (RestHandlerException rhe) {
HandlerUtils.sendErrorResponse(ctx, routedRequest.getRequest(), new ErrorResponseBody(rhe.getMessage()), rhe.getHttpResponseStatus(), Collections.emptyMap());
}
}
| 3.26 |
flink_ComponentMetricGroup_close_rdh
|
/**
* Closes the component group by removing and closing all metrics and subgroups (inherited from
* {@link AbstractMetricGroup}), plus closing and removing all dedicated component subgroups.
*/
@Override
public void close() {
synchronized(this) {
if (!isClosed()) {
// remove all metrics and generic subgroups
super.close();
// remove and close all subcomponent metrics
for (ComponentMetricGroup group : subComponents()) {
group.close(); }
}
}
}
| 3.26 |
flink_Grouping_getInputDataSet_rdh
|
/**
* Returns the input DataSet of a grouping operation, that is the one before the grouping. This
* means that if it is applied directly to the result of a grouping operation, it will cancel
* its effect. As an example, in the following snippet:
*
* <pre>{@code DataSet<X> notGrouped = input.groupBy().getDataSet();
* DataSet<Y> allReduced = notGrouped.reduce()}</pre>
*
* <p>the {@code groupBy()} is as if it never happened, as the {@code notGrouped} DataSet
* corresponds to the input of the {@code groupBy()} (because of the {@code getDataset()}).
*/
@Internal
public DataSet<T> getInputDataSet() {
return this.inputDataSet;
}
| 3.26 |
flink_Grouping_getCustomPartitioner_rdh
|
/**
* Gets the custom partitioner to be used for this grouping, or {@code null}, if none was
* defined.
*
* @return The custom partitioner to be used for this grouping.
*/
@Internal
public Partitioner<?> getCustomPartitioner() {
return this.customPartitioner;
}
| 3.26 |
flink_ByteValue_setValue_rdh
|
/**
* Sets the encapsulated byte to the specified value.
*
* @param value
* the new value of the encapsulated byte.
*/
public void setValue(byte value) {
this.value = value;
}
| 3.26 |
flink_ByteValue_getBinaryLength_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() {
return 1;
}
| 3.26 |
flink_ByteValue_read_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {this.value = in.readByte();
}
| 3.26 |
flink_ByteValue_getMaxNormalizedKeyLen_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int getMaxNormalizedKeyLen() {
return 1;
}
| 3.26 |
flink_ByteValue_toString_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public String toString() {
return String.valueOf(this.value);
}
| 3.26 |
flink_HandlerRequestUtils_getQueryParameter_rdh
|
/**
* Returns the value of a query parameter, or {@code null} if the query parameter is not set.
*
* @throws RestHandlerException
* If the query parameter is repeated.
*/
public static <X, P extends MessageQueryParameter<X>, R extends RequestBody, M extends MessageParameters> X getQueryParameter(final HandlerRequest<R> request, final Class<P> queryParameterClass) throws RestHandlerException {
return getQueryParameter(request, queryParameterClass, null);
}
| 3.26 |
flink_HandlerRequestUtils_fromRequestBodyOrQueryParameter_rdh
|
/**
* Returns {@code requestValue} if it is not null, otherwise returns the query parameter value
* if it is not null, otherwise returns the default value.
*/
public static <T> T fromRequestBodyOrQueryParameter(T requestValue, SupplierWithException<T, RestHandlerException> queryParameterExtractor, T defaultValue, Logger log) throws RestHandlerException {
if (requestValue != null) {
return requestValue;
} else {
T queryParameterValue = queryParameterExtractor.get();
if (queryParameterValue != null) {
log.warn("Configuring the job submission via query parameters is deprecated." + " Please migrate to submitting a JSON request instead.");
return queryParameterValue;
} else {
return defaultValue;
}
}
}
| 3.26 |
flink_HeartbeatManagerImpl_getOwnResourceID_rdh
|
// ----------------------------------------------------------------------------------------------
// Getters
// ----------------------------------------------------------------------------------------------
ResourceID getOwnResourceID() {return ownResourceID;
}
| 3.26 |
flink_HeartbeatManagerImpl_receiveHeartbeat_rdh
|
// ----------------------------------------------------------------------------------------------
// HeartbeatTarget methods
// ----------------------------------------------------------------------------------------------
@Override
public CompletableFuture<Void> receiveHeartbeat(ResourceID heartbeatOrigin, I heartbeatPayload) {
if (!f0) {
log.debug("Received heartbeat from {}.",
heartbeatOrigin);reportHeartbeat(heartbeatOrigin);
if (heartbeatPayload != null) {
heartbeatListener.reportPayload(heartbeatOrigin, heartbeatPayload);
}
}
return FutureUtils.completedVoidFuture();
}
| 3.26 |
flink_HeartbeatManagerImpl_monitorTarget_rdh
|
// ----------------------------------------------------------------------------------------------
// HeartbeatManager methods
// ----------------------------------------------------------------------------------------------
@Override
public void monitorTarget(ResourceID resourceID, HeartbeatTarget<O> heartbeatTarget) {
if (!f0) {
if (heartbeatTargets.containsKey(resourceID)) {
log.debug("The target with resource ID {} is already been monitored.", resourceID.getStringWithMetadata());
} else {
HeartbeatMonitor<O> heartbeatMonitor = heartbeatMonitorFactory.createHeartbeatMonitor(resourceID, heartbeatTarget, mainThreadExecutor, heartbeatListener, heartbeatTimeoutIntervalMs, failedRpcRequestsUntilUnreachable);
heartbeatTargets.put(resourceID, heartbeatMonitor);
// check if we have stopped in the meantime (concurrent stop operation)
if (f0) {
heartbeatMonitor.cancel();
heartbeatTargets.remove(resourceID);
}
}
}
}
| 3.26 |
flink_SessionManagerImpl_checkSessionCount_rdh
|
// ------------------------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------------------------
private void checkSessionCount() throws SqlGatewayException {
if (maxSessionCount <= 0) {
return;
}
if (sessions.size() >= maxSessionCount) {
String msg = String.format("Failed to create session, the count of active sessions exceeds the max count: %s", maxSessionCount);
LOG.warn(msg);
throw new SqlGatewayException(msg);
}
}
| 3.26 |
flink_GenericDataSourceBase_getUserCodeWrapper_rdh
|
/**
* Gets the class describing the input format.
*
* <p>This method is basically identical to {@link #getFormatWrapper()}.
*
* @return The class describing the input format.
* @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper()
*/
@Override
public UserCodeWrapper<? extends T> getUserCodeWrapper() {return this.formatWrapper;
}
| 3.26 |
flink_GenericDataSourceBase_getStatisticsKey_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the key under which statistics about this data source may be obtained from the
* statistics cache.
*
* @return The statistics cache key.
*/
public String getStatisticsKey() {
return this.statisticsKey;
}
| 3.26 |
flink_GenericDataSourceBase_toString_rdh
|
// --------------------------------------------------------------------------------------------
public String toString() {
return this.name;
}
| 3.26 |
flink_GenericDataSourceBase_accept_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Accepts the visitor and applies it this instance. Since the data sources have no inputs, no
* recursive descend happens. The visitors pre-visit method is called and, if returning
* <tt>true</tt>, the post-visit method is called.
*
* @param visitor
* The visitor.
* @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor)
*/
@Override
public void accept(Visitor<Operator<?>> visitor) {
if (visitor.preVisit(this)) {
visitor.postVisit(this);
}
}
| 3.26 |
flink_GenericDataSourceBase_setStatisticsKey_rdh
|
/**
* Sets the key under which statistics about this data source may be obtained from the
* statistics cache. Useful for testing purposes, when providing mock statistics.
*
* @param statisticsKey
* The key for the statistics object.
*/
public void setStatisticsKey(String statisticsKey) {
this.statisticsKey = statisticsKey;
}
| 3.26 |
flink_GenericDataSourceBase_getFormatWrapper_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the class describing the input format.
*
* @return The class describing the input format.
*/
public UserCodeWrapper<? extends T> getFormatWrapper() {
return this.formatWrapper;
}
| 3.26 |
flink_GenericDataSourceBase_setSplitDataProperties_rdh
|
/**
* Sets properties of input splits for this data source. Split properties can help to generate
* more efficient execution plans. <br>
* <b> IMPORTANT: Providing wrong split data properties can cause wrong results! </b>
*
* @param splitDataProperties
* The data properties of this data source's splits.
*/
public void setSplitDataProperties(SplitDataProperties<OUT> splitDataProperties) {
this.splitProperties = splitDataProperties;
}
| 3.26 |
flink_GenericDataSourceBase_executeOnCollections_rdh
|
// --------------------------------------------------------------------------------------------
protected List<OUT> executeOnCollections(RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
@SuppressWarnings("unchecked")
InputFormat<OUT, InputSplit> inputFormat = ((InputFormat<OUT, InputSplit>) (this.formatWrapper.getUserCodeObject()));
// configure the input format
inputFormat.configure(this.parameters);
// open the input format
if (inputFormat instanceof RichInputFormat) {
((RichInputFormat) (inputFormat)).setRuntimeContext(ctx);((RichInputFormat) (inputFormat)).openInputFormat();
}
List<OUT> result = new ArrayList<OUT>();
// splits
InputSplit[] splits = inputFormat.createInputSplits(1);
TypeSerializer<OUT> serializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
for (InputSplit split : splits) {
inputFormat.open(split);
while (!inputFormat.reachedEnd()) {
OUT next = inputFormat.nextRecord(serializer.createInstance());if (next !=
null) {
result.add(serializer.copy(next));
}
}
inputFormat.close();
}
// close the input format
if (inputFormat instanceof RichInputFormat) {
((RichInputFormat) (inputFormat)).closeInputFormat();
}
return result;
}
| 3.26 |
flink_ChannelStateWriteRequestExecutorFactory_getOrCreateExecutor_rdh
|
/**
*
* @param startExecutor
* It is for test to prevent create too many threads when some unit tests
* create executor frequently.
*/
ChannelStateWriteRequestExecutor getOrCreateExecutor(JobVertexID jobVertexID, int subtaskIndex, CheckpointStorage checkpointStorage, int maxSubtasksPerChannelStateFile, boolean startExecutor) {
synchronized(lock) {
if (executor == null) {
executor = new ChannelStateWriteRequestExecutorImpl(new ChannelStateWriteRequestDispatcherImpl(checkpointStorage, jobID, new ChannelStateSerializerImpl()), maxSubtasksPerChannelStateFile, executor -> {
assert Thread.holdsLock(lock);
checkState(this.executor == executor);
this.executor = null;
}, lock);
if (startExecutor) {
executor.start();
}
}
ChannelStateWriteRequestExecutor currentExecutor = executor;
currentExecutor.registerSubtask(jobVertexID, subtaskIndex);
return currentExecutor;
}
}
| 3.26 |
flink_Task_getExecutionState_rdh
|
// ------------------------------------------------------------------------
// Task Execution
// ------------------------------------------------------------------------
/**
* Returns the current execution state of the task.
*
* @return The current execution state of the task.
*/
public ExecutionState getExecutionState() {
return this.executionState;
}
| 3.26 |
flink_Task_triggerCheckpointBarrier_rdh
|
// ------------------------------------------------------------------------
// Notifications on the invokable
// ------------------------------------------------------------------------
/**
* Calls the invokable to trigger a checkpoint.
*
* @param checkpointID
* The ID identifying the checkpoint.
* @param checkpointTimestamp
* The timestamp associated with the checkpoint.
* @param checkpointOptions
* Options for performing this checkpoint.
*/
public void triggerCheckpointBarrier(final long checkpointID, final long checkpointTimestamp, final CheckpointOptions checkpointOptions) {
final TaskInvokable invokable = this.invokable;
final CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointID, checkpointTimestamp, System.currentTimeMillis());
if (executionState == ExecutionState.RUNNING) {
checkState(invokable instanceof CheckpointableTask, "invokable is not checkpointable");
try {
((CheckpointableTask) (invokable)).triggerCheckpointAsync(checkpointMetaData, checkpointOptions).handle((triggerResult, exception) -> {
if ((exception
!= null) || (!triggerResult)) {
declineCheckpoint(checkpointID, CheckpointFailureReason.TASK_FAILURE, exception);
return false;
}
return true;
});
} catch (RejectedExecutionException ex) {
// This may happen if the mailbox is closed. It means that the task is shutting
// down, so we just ignore it.
LOG.debug("Triggering checkpoint {} for {} ({}) was rejected by the mailbox", checkpointID, taskNameWithSubtask, executionId);
declineCheckpoint(checkpointID, CheckpointFailureReason.CHECKPOINT_DECLINED_TASK_CLOSING);
} catch (Throwable t) {
if (getExecutionState() == ExecutionState.RUNNING) {
failExternally(new Exception((("Error while triggering checkpoint " + checkpointID) + " for ") + taskNameWithSubtask, t));
} else {
LOG.debug("Encountered error while triggering checkpoint {} for " + "{} ({}) while being not in state running.", checkpointID, taskNameWithSubtask, executionId, t);
}
}
} else {
LOG.debug("Declining checkpoint request for non-running task {} ({}).", taskNameWithSubtask, executionId);// send back a message that we did not do the checkpoint
declineCheckpoint(checkpointID, CheckpointFailureReason.CHECKPOINT_DECLINED_TASK_NOT_READY);
}
}
| 3.26 |
flink_Task_getJobID_rdh
|
// ------------------------------------------------------------------------
// Accessors
// ------------------------------------------------------------------------
@Override
public JobID getJobID() {
return jobId;
}
| 3.26 |
flink_Task_run_rdh
|
/**
* The core work method that bootstraps the task and executes its code.
*/
@Override
public void run() {
try {
doRun();
} finally {
terminationFuture.complete(executionState);
}
}
| 3.26 |
flink_Task_cancelExecution_rdh
|
// ----------------------------------------------------------------------------------------------------------------
// Canceling / Failing the task from the outside
// ----------------------------------------------------------------------------------------------------------------
/**
* Cancels the task execution. If the task is already in a terminal state (such as FINISHED,
* CANCELED, FAILED), or if the task is already canceling this does nothing. Otherwise it sets
* the state to CANCELING, and, if the invokable code is running, starts an asynchronous thread
* that aborts that code.
*
* <p>This method never blocks.
*/
public void cancelExecution() {
LOG.info("Attempting to cancel task {} ({}).", taskNameWithSubtask, executionId);cancelOrFailAndCancelInvokable(ExecutionState.CANCELING, null);
}
| 3.26 |
flink_Task_loadAndInstantiateInvokable_rdh
|
/**
* Instantiates the given task invokable class, passing the given environment (and possibly the
* initial task state) to the task's constructor.
*
* <p>The method will first try to instantiate the task via a constructor accepting both the
* Environment and the TaskStateSnapshot. If no such constructor exists, and there is no initial
* state, the method will fall back to the stateless convenience constructor that accepts only
* the Environment.
*
* @param classLoader
* The classloader to load the class through.
* @param className
* The name of the class to load.
* @param environment
* The task environment.
* @return The instantiated invokable task object.
* @throws Throwable
* Forwards all exceptions that happen during initialization of the task. Also
* throws an exception if the task class misses the necessary constructor.
*/
private static TaskInvokable loadAndInstantiateInvokable(ClassLoader classLoader, String className, Environment environment) throws Throwable {
final Class<? extends TaskInvokable> invokableClass;
try {
invokableClass = Class.forName(className, true, classLoader).asSubclass(TaskInvokable.class);
} catch (Throwable t) {
throw new Exception("Could not load the task's invokable class.", t);
}Constructor<? extends TaskInvokable> statelessCtor;
try {
statelessCtor = invokableClass.getConstructor(Environment.class);
} catch (NoSuchMethodException ee) {
throw new FlinkException("Task misses proper constructor", ee);
}
// instantiate the class
try {
// noinspection ConstantConditions --> cannot happen
return statelessCtor.newInstance(environment);
} catch (InvocationTargetException e) {
// directly forward exceptions from the eager initialization
throw e.getTargetException();
} catch (Exception e) {
throw new FlinkException("Could not instantiate the task's invokable class.", e);
}
}
| 3.26 |
flink_Task_runWithSystemExitMonitoring_rdh
|
/**
* Monitor user codes from exiting JVM covering user function invocation. This can be done in a
* finer-grained way like enclosing user callback functions individually, but as exit triggered
* by framework is not performed and expected in this invoke function anyhow, we can monitor
* exiting JVM for entire scope.
*/
private void runWithSystemExitMonitoring(RunnableWithException action) throws Exception {
FlinkSecurityManager.monitorUserSystemExitForCurrentThread();
try {
action.run();
}
finally {
FlinkSecurityManager.unmonitorUserSystemExitForCurrentThread();
}
}
| 3.26 |
flink_Task_m1_rdh
|
/**
* Unwrap, enrich and handle fatal errors.
*/private Throwable m1(Throwable t) {
// unwrap wrapped exceptions to make stack traces more compact
if (t instanceof WrappingRuntimeException) {
t = ((WrappingRuntimeException) (t)).unwrap();
}
TaskManagerExceptionUtils.tryEnrichTaskManagerError(t);
// check if the exception is unrecoverable
if (ExceptionUtils.isJvmFatalError(t) || ((t instanceof
OutOfMemoryError) && taskManagerConfig.shouldExitJvmOnOutOfMemoryError())) {
// terminate the JVM immediately
// don't attempt a clean shutdown, because we cannot expect the clean shutdown
// to complete
try {
LOG.error("Encountered fatal error {} - terminating the JVM", t.getClass().getName(), t);
} finally {
Runtime.getRuntime().halt(-1);
}
}
return t;
}
| 3.26 |
flink_Task_transitionState_rdh
|
/**
* Try to transition the execution state from the current state to the new state.
*
* @param currentState
* of the execution
* @param newState
* of the execution
* @return true if the transition was successful, otherwise false
*/
private boolean transitionState(ExecutionState currentState, ExecutionState newState) {
return transitionState(currentState, newState, null);
}
| 3.26 |
flink_Task_isCanceledOrFailed_rdh
|
/**
* Checks whether the task has failed, is canceled, or is being canceled at the moment.
*
* @return True is the task in state FAILED, CANCELING, or CANCELED, false otherwise.
*/
public boolean isCanceledOrFailed() {
return ((executionState == ExecutionState.CANCELING) || (executionState == ExecutionState.CANCELED)) || (executionState == ExecutionState.FAILED);
}
| 3.26 |
flink_Task_deliverOperatorEvent_rdh
|
/**
* Dispatches an operator event to the invokable task.
*
* <p>If the event delivery did not succeed, this method throws an exception. Callers can use
* that exception for error reporting, but need not react with failing this task (this method
* takes care of that).
*
* @throws FlinkException
* This method throws exceptions indicating the reason why delivery did
* not succeed.
*/
public void deliverOperatorEvent(OperatorID operator, SerializedValue<OperatorEvent> evt) throws FlinkException {
final TaskInvokable invokable = this.invokable;
final ExecutionState currentState = this.executionState;
if
((invokable == null) || ((currentState != ExecutionState.RUNNING) && (currentState != ExecutionState.INITIALIZING))) {
throw new TaskNotRunningException("Task is not running, but in state " + currentState);
}
if (invokable instanceof CoordinatedTask)
{
try {
((CoordinatedTask) (invokable)).dispatchOperatorEvent(operator, evt);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
if ((getExecutionState() == ExecutionState.RUNNING) || (getExecutionState() == ExecutionState.INITIALIZING)) {
FlinkException e = new FlinkException("Error while handling operator event", t);
failExternally(e);
throw e;
}}
}
}
| 3.26 |
flink_Task_requestPartitionProducerState_rdh
|
// ------------------------------------------------------------------------
// Partition State Listeners
// ------------------------------------------------------------------------
@Override
public void requestPartitionProducerState(final IntermediateDataSetID intermediateDataSetId, final ResultPartitionID resultPartitionId, Consumer<? super ResponseHandle> responseConsumer) {
final CompletableFuture<ExecutionState> futurePartitionState = partitionProducerStateChecker.requestPartitionProducerState(jobId, intermediateDataSetId, resultPartitionId);
FutureUtils.assertNoException(futurePartitionState.handle(PartitionProducerStateResponseHandle::new).thenAcceptAsync(responseConsumer, executor));
}
| 3.26 |
flink_Task_failExternally_rdh
|
/**
* Marks task execution failed for an external reason (a reason other than the task code itself
* throwing an exception). If the task is already in a terminal state (such as FINISHED,
* CANCELED, FAILED), or if the task is already canceling this does nothing. Otherwise it sets
* the state to FAILED, and, if the invokable code is running, starts an asynchronous thread
* that aborts that code.
*
* <p>This method never blocks.
*/
@Override
public void failExternally(Throwable cause) {
LOG.info("Attempting to fail task externally {} ({}).", taskNameWithSubtask, executionId); cancelOrFailAndCancelInvokable(ExecutionState.FAILED, cause);
}
| 3.26 |
flink_Task_releaseResources_rdh
|
/**
* Releases resources before task exits. We should also fail the partition to release if the
* task has failed, is canceled, or is being canceled at the moment.
*/
private void releaseResources() {
LOG.debug("Release task {} network resources (state: {}).", taskNameWithSubtask, getExecutionState());
for (ResultPartitionWriter partitionWriter : partitionWriters) {
taskEventDispatcher.unregisterPartition(partitionWriter.getPartitionId());
}
// close network resources
if (isCanceledOrFailed()) {
failAllResultPartitions();
}
closeAllResultPartitions();
closeAllInputGates();
try {
taskStateManager.close();
} catch (Exception e) {
LOG.error("Failed to close task state manager for task {}.", taskNameWithSubtask, e);
}
}
| 3.26 |
flink_Task_m0_rdh
|
/**
* If the task has failed, this method gets the exception that caused this task to fail.
* Otherwise this method returns null.
*
* @return The exception that caused the task to fail, or null, if the task has not failed.
*/public Throwable m0() {
return failureCause;
}
| 3.26 |
flink_EnvironmentSettings_getUserClassLoader_rdh
|
/**
* Returns the user {@link ClassLoader} to use for code generation, UDF loading and other
* operations requiring reflections on user code.
*/
@Internal
public ClassLoader getUserClassLoader()
{
return classLoader;
}
| 3.26 |
flink_EnvironmentSettings_inBatchMode_rdh
|
/**
* Sets that the components should work in a batch mode. Streaming mode by default.
*/
public Builder inBatchMode() {
configuration.set(RUNTIME_MODE, BATCH);
return this;
}
| 3.26 |
flink_EnvironmentSettings_m0_rdh
|
/**
* Tells if the {@link TableEnvironment} should work in a batch or streaming mode.
*/public boolean m0() {
return configuration.get(RUNTIME_MODE) == STREAMING;
}
| 3.26 |
flink_EnvironmentSettings_inStreamingMode_rdh
|
/**
* Sets that the components should work in a streaming mode. Enabled by default.
*/
public Builder inStreamingMode() {
configuration.set(RUNTIME_MODE, STREAMING);
return this;
}
| 3.26 |
flink_EnvironmentSettings_m1_rdh
|
/**
* Specifies the classloader to use in the planner for operations related to code
* generation, UDF loading, operations requiring reflections on user classes, discovery of
* factories.
*
* <p>By default, this is configured using {@code Thread.currentThread().getContextClassLoader()}.
*
* <p>Modify the {@link ClassLoader} only if you know what you're doing.
*/
public Builder m1(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
}
| 3.26 |
flink_EnvironmentSettings_newInstance_rdh
|
/**
* Creates a builder for creating an instance of {@link EnvironmentSettings}.
*/
public static Builder newInstance() {
return new Builder();
}
/**
* Creates an instance of {@link EnvironmentSettings} from configuration.
*
* @deprecated use {@link Builder#withConfiguration(Configuration)}
| 3.26 |
flink_EnvironmentSettings_getConfiguration_rdh
|
/**
* Get the underlying {@link Configuration}.
*/
public Configuration getConfiguration() {
return configuration;}
| 3.26 |
flink_EnvironmentSettings_withConfiguration_rdh
|
/**
* Add extra configuration to {@link EnvironmentSettings}.
*/
public Builder withConfiguration(Configuration configuration) {
this.configuration.addAll(configuration);
return
this;
}
| 3.26 |
flink_EnvironmentSettings_withBuiltInCatalogName_rdh
|
/**
* Specifies the name of the initial catalog to be created when instantiating a {@link TableEnvironment}.
*
* <p>This catalog is an in-memory catalog that will be used to store all temporary objects
* (e.g. from {@link TableEnvironment#createTemporaryView(String, Table)} or {@link TableEnvironment#createTemporarySystemFunction(String, UserDefinedFunction)}) that cannot
* be persisted because they have no serializable representation.
*
* <p>It will also be the initial value for the current catalog which can be altered via
* {@link TableEnvironment#useCatalog(String)}.
*
* <p>Default: {@link TableConfigOptions#TABLE_DATABASE_NAME}{@code .defaultValue()}.
*/
public Builder withBuiltInCatalogName(String builtInCatalogName) {
configuration.set(TABLE_CATALOG_NAME, builtInCatalogName);
return this;
}
| 3.26 |
flink_EnvironmentSettings_getBuiltInCatalogName_rdh
|
/**
* Gets the specified name of the initial catalog to be created when instantiating a {@link TableEnvironment}.
*/
public String getBuiltInCatalogName() {
return configuration.get(TABLE_CATALOG_NAME);
}
| 3.26 |
flink_EnvironmentSettings_build_rdh
|
/**
* Returns an immutable instance of {@link EnvironmentSettings}.
*/
public EnvironmentSettings
build() {
if (classLoader == null) {
classLoader = Thread.currentThread().getContextClassLoader();
}
return new EnvironmentSettings(configuration, classLoader, catalogStore);
}
| 3.26 |
flink_EnvironmentSettings_withBuiltInDatabaseName_rdh
|
/**
* Specifies the name of the default database in the initial catalog to be created when
* instantiating a {@link TableEnvironment}.
*
* <p>This database is an in-memory database that will be used to store all temporary
* objects (e.g. from {@link TableEnvironment#createTemporaryView(String, Table)} or {@link TableEnvironment#createTemporarySystemFunction(String, UserDefinedFunction)}) that cannot
* be persisted because they have no serializable representation.
*
* <p>It will also be the initial value for the current database which can be altered via
* {@link TableEnvironment#useDatabase(String)}.
*
* <p>Default: {@link TableConfigOptions#TABLE_DATABASE_NAME}{@code .defaultValue()}.
*/
public Builder withBuiltInDatabaseName(String builtInDatabaseName) {
configuration.set(TABLE_DATABASE_NAME, builtInDatabaseName);
return this;
}
| 3.26 |
flink_EnvironmentSettings_getBuiltInDatabaseName_rdh
|
/**
* Gets the specified name of the default database in the initial catalog to be created when
* instantiating a {@link TableEnvironment}.
*/
public String getBuiltInDatabaseName() {return configuration.get(TABLE_DATABASE_NAME);
}
| 3.26 |
flink_FileLock_unlock_rdh
|
/**
* Release the file lock.
*
* @throws IOException
* If the FileChannel is closed
*/
public void unlock() throws IOException {
if ((lock != null) && lock.channel().isOpen()) {
lock.release();
}
}
| 3.26 |
flink_FileLock_inTempFolder_rdh
|
/**
* Initialize a FileLock using a file located inside temp folder.
*
* @param fileName
* The name of the locking file
* @return The initialized FileLock
*/
public static FileLock inTempFolder(String fileName) {return new FileLock(TEMP_DIR, fileName);
}
| 3.26 |
flink_FileLock_tryLock_rdh
|
/**
* Try to acquire a lock on the locking file. This method immediately returns whenever the lock
* is acquired or not.
*
* @return True if successfully acquired the lock
* @throws IOException
* If the file path is invalid
*/
public boolean tryLock() throws IOException {
if (outputStream == null) {
init();
}
try {
lock = outputStream.getChannel().tryLock();
} catch (Exception e) {
return false;
}
return lock != null;
}
| 3.26 |
flink_FileLock_unlockAndDestroy_rdh
|
/**
* Release the file lock, close the fileChannel and FileOutputStream then try deleting the
* locking file if other file lock does not need it, which means the lock will not be used
* anymore.
*
* @throws IOException
* If an I/O error occurs
*/
public void unlockAndDestroy() throws IOException {
try {
unlock();
if (lock != null) {
lock.channel().close();
lock = null;
}
if (outputStream != null) {
outputStream.close();
outputStream = null;
}
} finally {
this.file.delete();
}
}
| 3.26 |
flink_FileLock_init_rdh
|
/**
* Check whether the locking file exists in the file system. Create it if it does not exist.
* Then create a FileOutputStream for it.
*
* @throws IOException
* If the file path is invalid or the parent dir does not exist
*/ private void init() throws IOException {
if (!this.file.exists()) {
this.file.createNewFile();
}
outputStream = new FileOutputStream(this.file);
}
| 3.26 |
flink_FileLock_normalizeFileName_rdh
|
/**
* Normalize the file name, which only allows slash, backslash, digits and letters.
*
* @param fileName
* Original file name
* @return File name with illegal characters stripped
*/
private static String normalizeFileName(String fileName) {return fileName.replaceAll("[^\\w/\\\\]", "");
}
| 3.26 |
flink_SerdeContext_get_rdh
|
/**
* Retrieve context from {@link SerializerProvider} and {@link DeserializationContext}.
*/public static SerdeContext get(DatabindContext databindContext) {
final SerdeContext serdeContext = ((SerdeContext) (databindContext.getAttribute(SERDE_CONTEXT_KEY)));
assert serdeContext != null;
return serdeContext;
}
| 3.26 |
flink_RawObjectConverter_create_rdh
|
// --------------------------------------------------------------------------------------------
// Factory method
// --------------------------------------------------------------------------------------------
public static RawObjectConverter<?> create(DataType dataType) {
final LogicalType logicalType = dataType.getLogicalType();
final TypeSerializer<?> serializer;
if (logicalType instanceof TypeInformationRawType) {
serializer = ((TypeInformationRawType<?>) (logicalType)).getTypeInformation().createSerializer(new ExecutionConfig());
} else {
serializer = ((RawType<?>) (dataType.getLogicalType())).getTypeSerializer();
}
return new RawObjectConverter<>(serializer);
}
| 3.26 |
flink_DeltaEvictor_of_rdh
|
/**
* Creates a {@code DeltaEvictor} from the given threshold and {@code DeltaFunction}. Eviction
* is done before the window function.
*
* @param threshold
* The threshold
* @param deltaFunction
* The {@code DeltaFunction}
*/
public static <T, W extends Window> DeltaEvictor<T, W> of(double threshold, DeltaFunction<T> deltaFunction) {
return new DeltaEvictor<>(threshold, deltaFunction);}
/**
* Creates a {@code DeltaEvictor} from the given threshold, {@code DeltaFunction}. Eviction is
* done before/after the window function based on the value of doEvictAfter.
*
* @param threshold
* The threshold
* @param deltaFunction
* The {@code DeltaFunction}
| 3.26 |
flink_PrimitiveArrayTypeInfo_getComponentClass_rdh
|
/**
* Gets the class that represents the component type.
*
* @return The class of the component type.
*/
@PublicEvolving
public Class<?> getComponentClass() {
return this.arrayClass.getComponentType();
}
| 3.26 |
flink_PrimitiveArrayTypeInfo_getComponentType_rdh
|
/**
* Gets the type information of the component type.
*
* @return The type information of the component type.
*/
@PublicEvolving
public TypeInformation<?>
getComponentType()
{
return BasicTypeInfo.getInfoFor(getComponentClass());
}
| 3.26 |
flink_PrimitiveArrayTypeInfo_isBasicType_rdh
|
// --------------------------------------------------------------------------------------------
@Override
@PublicEvolving
public boolean isBasicType() {
return false;
}
| 3.26 |
flink_ExecNodeBase_inputsContainSingleton_rdh
|
/**
* Whether singleton distribution is required.
*/
protected boolean inputsContainSingleton() {
return getInputProperties().stream().anyMatch(p -> p.getRequiredDistribution().getType() == InputProperty.DistributionType.SINGLETON);
}
| 3.26 |
flink_ExecNodeBase_getPersistedConfig_rdh
|
// Custom filter to exclude node configuration if no consumed options are used
@JsonProperty(value = FIELD_NAME_CONFIGURATION, access = Access.READ_ONLY, index = 2)
@JsonInclude(value = Include.CUSTOM, valueFilter = ConfigurationJsonSerializerFilter.class)
public ReadableConfig getPersistedConfig() {
return persistedConfig;
}
| 3.26 |
flink_ExecNodeBase_translateToFusionCodegenSpecInternal_rdh
|
/**
* Internal method, translates this node into a operator codegen spec generator.
*
* @param planner
* The planner.
* @param config
* per-{@link ExecNode} configuration that contains the merged configuration from
* various layers which all the nodes implementing this method should use, instead of
* retrieving configuration from the {@code planner}. For more details check {@link ExecNodeConfig}.
*/
protected OpFusionCodegenSpecGenerator translateToFusionCodegenSpecInternal(PlannerBase planner, ExecNodeConfig config) {
throw new TableException("This ExecNode doesn't support operator fusion codegen now.");
}
| 3.26 |
flink_ReaderInfo_getLocation_rdh
|
/**
*
* @return the location of the subtask that runs this source reader.
*/
public String getLocation() {
return location;
}
| 3.26 |
flink_ReaderInfo_getSubtaskId_rdh
|
/**
*
* @return the ID of the subtask that runs the source reader.
*/
public int getSubtaskId() {
return subtaskId;
}
| 3.26 |
flink_RowTimeIntervalJoin_getMaxOutputDelay_rdh
|
/**
* Get the maximum interval between receiving a row and emitting it (as part of a joined
* result). This is the time interval by which watermarks need to be held back.
*
* @return the maximum delay for the outputs
*/
public long getMaxOutputDelay() {
return Math.max(leftRelativeSize, rightRelativeSize) + allowedLateness;
}
| 3.26 |
flink_CliInputView_init_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected void init() {
// nothing to do
}
| 3.26 |
flink_CliInputView_insert_rdh
|
// --------------------------------------------------------------------------------------------
private void insert(String binding) {
currentInput.insert(cursorPos, binding);
cursorPos += binding.length();
// reset view
resetMainPart();
}
| 3.26 |
flink_CalculatedTableFactory_create_rdh
|
/**
* Creates a valid {@link CalculatedQueryOperation} operation.
*
* @param callExpr
* call to table function as expression
* @return valid calculated table
*/
QueryOperation create(ResolvedExpression callExpr, List<String> leftTableFieldNames) {
FunctionTableCallVisitor calculatedTableCreator = new FunctionTableCallVisitor(leftTableFieldNames);
return callExpr.accept(calculatedTableCreator);
}
| 3.26 |
flink_StatePartitionStreamProvider_getStream_rdh
|
/**
* Returns a stream with the data of one state partition.
*/
public InputStream getStream() throws IOException {
if (creationException != null) {
throw new IOException(creationException);
}
return stream;
}
| 3.26 |
flink_SupportsProjectionPushDown_applyProjection_rdh
|
/**
* Provides the field index paths that should be used for a projection. The indices are 0-based
* and support fields within (possibly nested) structures if this is enabled via {@link #supportsNestedProjection()}.
*
* <p>In the example mentioned in {@link SupportsProjectionPushDown}, this method would receive:
*
* <ul>
* <li>{@code [[2], [1]]} which is equivalent to {@code [["s"], ["r"]]} if {@link #supportsNestedProjection()} returns false.
* <li>{@code [[2], [1, 0]]} which is equivalent to {@code [["s"], ["r", "d"]]]} if {@link #supportsNestedProjection()} returns true.
* </ul>
*
* <p>Note: Use the passed data type instead of {@link ResolvedSchema#toPhysicalRowDataType()}
* for describing the final output data type when creating {@link TypeInformation}.
*
* @param projectedFields
* field index paths of all fields that must be present in the physically
* produced data
* @param producedDataType
* the final output type of the source, with the projection applied
*/
default void applyProjection(int[][] projectedFields, DataType producedDataType) {
applyProjection(projectedFields);
}
| 3.26 |
flink_ArchivedExecutionVertex_getTaskNameWithSubtaskIndex_rdh
|
// --------------------------------------------------------------------------------------------
// Accessors
// --------------------------------------------------------------------------------------------
@Override
public String getTaskNameWithSubtaskIndex() {
return this.taskNameWithSubtask;
}
| 3.26 |
flink_FailureEnricherUtils_labelFailure_rdh
|
/**
* Enriches a Throwable by returning the merged label output of a Set of FailureEnrichers.
*
* @param cause
* the Throwable to label
* @param context
* the context of the Throwable
* @param mainThreadExecutor
* the executor to complete the enricher labeling on
* @param failureEnrichers
* a collection of FailureEnrichers to enrich the context with
* @return a CompletableFuture that will complete with a map of labels
*/
public static CompletableFuture<Map<String, String>> labelFailure(final Throwable cause, final Context context, final Executor mainThreadExecutor, final Collection<FailureEnricher> failureEnrichers) {
// list of CompletableFutures to enrich failure with labels from each enricher
final Collection<CompletableFuture<Map<String, String>>> enrichFutures = new ArrayList<>();
for (final FailureEnricher enricher : failureEnrichers) {
enrichFutures.add(enricher.processFailure(cause, context).thenApply(enricherLabels -> {
final Map<String, String> validLabels = new HashMap<>();
enricherLabels.forEach((k, v) -> {
if (!enricher.getOutputKeys().contains(k)) {
LOG.warn("Ignoring label with key {} from enricher {}" + " violating contract, keys allowed {}.", k, enricher.getClass(), enricher.getOutputKeys());} else {
validLabels.put(k, v);
}
});
return validLabels;}).exceptionally(t -> {
LOG.warn("Enricher {} threw an exception.", enricher.getClass(), t);
return Collections.emptyMap();
}));
}
// combine all CompletableFutures into a single CompletableFuture containing a Map of labels
return FutureUtils.combineAll(enrichFutures).thenApplyAsync(labelsToMerge -> {
final Map<String, String> mergedLabels = new HashMap<>();
for (Map<String, String> labels : labelsToMerge) {
labels.forEach((k, v) -> // merge label with existing, throwing an exception
// if there is a key conflict
mergedLabels.merge(k, v, (first,
second) -> {
throw new FlinkRuntimeException(String.format(MERGE_EXCEPTION_MSG, k));
}));
}
return mergedLabels;
}, mainThreadExecutor);
}
| 3.26 |
flink_FailureEnricherUtils_m0_rdh
|
/**
* Filters out invalid {@link FailureEnricher} objects that have duplicate output keys.
*
* @param failureEnrichers
* a set of {@link FailureEnricher} objects to filter
* @return a filtered collection without any duplicate output keys
*/
@VisibleForTesting
static Collection<FailureEnricher> m0(final Set<FailureEnricher> failureEnrichers)
{
final Map<String, Set<Class<?>>> enrichersByKey
= new
HashMap<>();
failureEnrichers.forEach(enricher -> enricher.getOutputKeys().forEach(enricherKey -> enrichersByKey.computeIfAbsent(enricherKey, ignored -> new HashSet<>()).add(enricher.getClass())));
final Set<Class<?>> invalidEnrichers = enrichersByKey.entrySet().stream().filter(entry -> entry.getValue().size() > 1).flatMap(entry -> {
LOG.warn("Following enrichers have have registered duplicate output key [%s] and will be ignored: {}.", entry.getValue().stream().map(Class::getName).collect(Collectors.joining(", ")));
return entry.getValue().stream();
}).collect(Collectors.toSet());return failureEnrichers.stream().filter(enricher -> !invalidEnrichers.contains(enricher.getClass())).collect(Collectors.toList());
}
| 3.26 |
flink_FailureEnricherUtils_getIncludedFailureEnrichers_rdh
|
/**
* Returns a set of failure enricher names included in the given configuration.
*
* @param configuration
* the configuration to get the failure enricher names from
* @return failure enricher names
*/
@VisibleForTesting
static Set<String> getIncludedFailureEnrichers(final Configuration configuration) {
final String includedEnrichersString = configuration.getString(JobManagerOptions.FAILURE_ENRICHERS_LIST, "");return enricherListPattern.splitAsStream(includedEnrichersString).filter(r -> !r.isEmpty()).collect(Collectors.toSet());
}
| 3.26 |
flink_FailureEnricherUtils_getFailureEnrichers_rdh
|
/**
* Returns a set of validated FailureEnrichers for a given configuration.
*
* @param configuration
* the configuration for the job
* @return a collection of validated FailureEnrichers
*/
public static Collection<FailureEnricher> getFailureEnrichers(final Configuration configuration) {
final PluginManager pluginManager = PluginUtils.createPluginManagerFromRootFolder(configuration);
return getFailureEnrichers(configuration, pluginManager);}
| 3.26 |
flink_StateUtil_bestEffortDiscardAllStateObjects_rdh
|
/**
* Iterates through the passed state handles and calls discardState() on each handle that is not
* null. All occurring exceptions are suppressed and collected until the iteration is over and
* emitted as a single exception.
*
* @param handlesToDiscard
* State handles to discard. Passed iterable is allowed to deliver null
* values.
* @throws Exception
* exception that is a collection of all suppressed exceptions that were
* caught during iteration
*/
public static void bestEffortDiscardAllStateObjects(Iterable<? extends StateObject> handlesToDiscard) throws Exception {
LambdaUtil.applyToAllWhileSuppressingExceptions(handlesToDiscard, StateObject::discardState);
}
| 3.26 |
flink_StateUtil_discardStateFuture_rdh
|
/**
* Discards the given state future by first trying to cancel it. If this is not possible, then
* the state object contained in the future is calculated and afterwards discarded.
*
* @param stateFuture
* to be discarded
* @throws Exception
* if the discard operation failed
* @return the size of state before cancellation (if available)
*/
public static Tuple2<Long, Long> discardStateFuture(Future<? extends StateObject> stateFuture) throws Exception {
long stateSize = 0;
long checkpointedSize = 0;
if (null != stateFuture) {
if (!stateFuture.cancel(true)) {
try {
// We attempt to get a result, in case the future completed before cancellation.
if ((stateFuture instanceof RunnableFuture<?>) && (!stateFuture.isDone())) {
((RunnableFuture<?>) (stateFuture)).run();
}
StateObject stateObject = stateFuture.get();
if (stateObject != null) {
stateSize = stateObject.getStateSize();
checkpointedSize = getCheckpointedSize(stateObject, stateSize);
stateObject.discardState();
}
} catch (Exception ex) {
LOG.debug("Cancelled execution of snapshot future runnable. Cancellation produced the following " + "exception, which is expected an can be ignored.", ex);
}
} else if (stateFuture.isDone()) {
try {
StateObject v3 = stateFuture.get();
stateSize = v3.getStateSize();
checkpointedSize = getCheckpointedSize(v3, stateSize);
} catch (Exception e) {
// ignored
}
}
}
return Tuple2.of(stateSize, checkpointedSize);
}
| 3.26 |
flink_StateUtil_getStateSize_rdh
|
/**
* Returns the size of a state object.
*
* @param handle
* The handle to the retrieved state
*/
public static long getStateSize(StateObject handle) {
return handle == null ? 0 : handle.getStateSize();
}
| 3.26 |
flink_StateUtil_unexpectedStateHandleException_rdh
|
/**
* Creates a {@link RuntimeException} that signals that an operation did not get the type of
* {@link StateObject} that was expected. This can mostly happen when a different {@link StateBackend} from the one that was used for taking a checkpoint/savepoint is used when
* restoring.
*/
public static RuntimeException unexpectedStateHandleException(Class<? extends StateObject>[] expectedStateHandleClasses, Class<? extends StateObject> actualStateHandleClass) {
return new IllegalStateException(((((("Unexpected state handle type, expected one of: " + Joiner.on(", ").join(expectedStateHandleClasses)) + ", but found: ") + actualStateHandleClass) + ". ") + "This can mostly happen when a different StateBackend from the one ") + "that was used for taking a checkpoint/savepoint is used when restoring.");
}
| 3.26 |
flink_JvmUtils_createThreadInfoSample_rdh
|
/**
* Creates a {@link ThreadInfoSample} for a specific thread. Contains thread traces if
* maxStackTraceDepth > 0.
*
* @param threadId
* The ID of the thread to create the thread dump for.
* @param maxStackTraceDepth
* The maximum number of entries in the stack trace to be collected.
* @return The thread information of a specific thread.
*/
public static Optional<ThreadInfoSample> createThreadInfoSample(long threadId, int maxStackTraceDepth) {
ThreadMXBean threadMxBean =
ManagementFactory.getThreadMXBean();
return ThreadInfoSample.from(threadMxBean.getThreadInfo(threadId, maxStackTraceDepth));
}
/**
* Creates a {@link ThreadInfoSample}
| 3.26 |
flink_JvmUtils_createThreadDump_rdh
|
/**
* Creates a thread dump of the current JVM.
*
* @return the thread dump of current JVM
*/
public static Collection<ThreadInfo> createThreadDump() {
ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
return Arrays.asList(threadMxBean.dumpAllThreads(true, true));
}
| 3.26 |
flink_ReduceTransformation_getInputType_rdh
|
/**
* Returns the {@code TypeInformation} for the elements of the input.
*/
public TypeInformation<IN> getInputType() {
return input.getOutputType();
}
| 3.26 |
flink_CatalogFactory_supportedProperties_rdh
|
/**
*
* @deprecated Implement the {@link Factory} based stack instead.
*/
@Deprecated
default List<String> supportedProperties() {
// Default implementation for catalogs implementing the new {@link Factory} stack instead.
return null;
}
| 3.26 |
flink_CatalogFactory_requiredContext_rdh
|
// --------------------------------------------------------------------------------------------
// Default implementations for legacy {@link TableFactory} stack.
// --------------------------------------------------------------------------------------------
/**
*
* @deprecated Implement the {@link Factory} based stack instead.
*/
@Deprecated
default Map<String, String> requiredContext() {
// Default implementation for catalogs implementing the new {@link Factory} stack instead.
return null;
}
| 3.26 |
flink_CatalogFactory_createCatalog_rdh
|
/**
* Creates and configures a {@link Catalog} using the given context.
*
* <p>An implementation should perform validation and the discovery of further (nested)
* factories in this method.
*/
default Catalog createCatalog(Context context) {
throw new CatalogException("Catalog factories must implement createCatalog(Context)");
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.