name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_PekkoUtils_getRemoteConfig_rdh | /**
* Creates a Pekko config for a remote actor system listening on port on the network interface
* identified by bindAddress.
*
* @param configuration
* instance containing the user provided configuration values
* @param bindAddress
* of the network interface to bind on
* @param port
* to bind to or if 0 then Pekko picks a free port automatically
* @param externalHostname
* The host name to expect for Pekko messages
* @param externalPort
* The port to expect for Pekko messages
* @return Flink's Pekko configuration for remote actor systems
*/
private static Config getRemoteConfig(Configuration configuration, String bindAddress, int port, String externalHostname, int externalPort) {
final ConfigBuilder builder = new ConfigBuilder(); addBaseRemoteConfig(builder, configuration, port, externalPort);
addHostnameRemoteConfig(builder, bindAddress, externalHostname);
addSslRemoteConfig(builder, configuration);
addRemoteForkJoinExecutorConfig(builder, ActorSystemBootstrapTools.getRemoteForkJoinExecutorConfiguration(configuration));
return builder.build();
} | 3.26 |
flink_PekkoUtils_terminateActorSystem_rdh | /**
* Terminates the given {@link ActorSystem} and returns its termination future.
*
* @param actorSystem
* to terminate
* @return Termination future
*/
public static CompletableFuture<Void> terminateActorSystem(ActorSystem actorSystem) {
return ScalaFutureUtils.toJava(actorSystem.terminate()).thenAccept(FunctionUtils.ignoreFn());
} | 3.26 |
flink_PekkoUtils_getConfig_rdh | /**
* Creates a pekko config with the provided configuration values. If the listening address is
* specified, then the actor system will listen on the respective address.
*
* @param configuration
* instance containing the user provided configuration values
* @param externalAddress
* optional tuple of external address and port to be reachable at. If
* null is given, then a Pekko config for local actor system will be returned
* @param bindAddress
* optional tuple of bind address and port to be used locally. If null is
* given, wildcard IP address and the external port wil be used. Takes effect only if
* externalAddress is not null.
* @param executorConfig
* config defining the used executor by the default dispatcher
* @return Pekko config
*/
public static Config getConfig(Configuration configuration, @Nullable
HostAndPort externalAddress, @Nullable
HostAndPort bindAddress, Config executorConfig) {
final Config defaultConfig = PekkoUtils.getBasicConfig(configuration).withFallback(executorConfig);
if (externalAddress != null) {if (bindAddress != null) {
final Config remoteConfig = PekkoUtils.getRemoteConfig(configuration, bindAddress.getHost(), bindAddress.getPort(), externalAddress.getHost(), externalAddress.getPort());
return remoteConfig.withFallback(defaultConfig);
} else {
final Config remoteConfig = PekkoUtils.getRemoteConfig(configuration, NetUtils.getWildcardIPAddress(), externalAddress.getPort(), externalAddress.getHost(), externalAddress.getPort());
return remoteConfig.withFallback(defaultConfig);
}
}
return defaultConfig;
} | 3.26 |
flink_PekkoUtils_getAddress_rdh | /**
* Returns the address of the given {@link ActorSystem}. The {@link Address} object contains the
* port and the host under which the actor system is reachable.
*
* @param system
* {@link ActorSystem} for which the {@link Address} shall be retrieved
* @return {@link Address} of the given {@link ActorSystem}
*/
public static Address getAddress(ActorSystem system) {
return RemoteAddressExtension.INSTANCE.apply(system).getAddress();
}
/**
* Returns the given {@link ActorRef}'s path string representation with host and port of the
* {@link ActorSystem} in which the actor is running.
*
* @param system
* {@link ActorSystem} in which the given {@link ActorRef} is running
* @param actor
* {@link ActorRef} of the actor for which the URL has to be generated
* @return String containing the {@link ActorSystem} | 3.26 |
flink_PekkoUtils_createLocalActorSystem_rdh | /**
* Creates a local actor system without remoting.
*
* @param configuration
* instance containing the user provided configuration values
* @return The created actor system
*/
public static ActorSystem createLocalActorSystem(Configuration configuration) {
return createActorSystem(getConfig(configuration, null));
} | 3.26 |
flink_TestUtils_readCsvResultFiles_rdh | /**
* Read the all files with the specified path.
*/
public static List<String> readCsvResultFiles(Path
path) throws IOException {
File filePath = path.toFile();
// list all the non-hidden files
File[] csvFiles = filePath.listFiles((dir, name) -> !name.startsWith("."));
List<String> result = new ArrayList<>();
if (csvFiles != null) {
for (File file : csvFiles) {
result.addAll(Files.readAllLines(file.toPath()));
}
}
return result;
} | 3.26 |
flink_TypeSerializerInputFormat_getProducedType_rdh | // --------------------------------------------------------------------------------------------
// Typing
// --------------------------------------------------------------------------------------------
@Override
public TypeInformation<T> getProducedType() {
return resultType;
} | 3.26 |
flink_FailureHandlingResultSnapshot_getConcurrentlyFailedExecution_rdh | /**
* All {@link Execution Executions} that failed and are planned to be restarted as part of this
* failure handling.
*
* @return The concurrently failed {@code Executions}.
*/
public Iterable<Execution> getConcurrentlyFailedExecution() {
return Collections.unmodifiableSet(concurrentlyFailedExecutions);
} | 3.26 |
flink_FailureHandlingResultSnapshot_create_rdh | /**
* Creates a {@code FailureHandlingResultSnapshot} based on the passed {@link FailureHandlingResult} and {@link ExecutionVertex ExecutionVertices}.
*
* @param failureHandlingResult
* The {@code FailureHandlingResult} that is used for extracting
* the failure information.
* @param currentExecutionsLookup
* The look-up function for retrieving all the current {@link Execution} instances for a given {@link ExecutionVertexID}.
* @return The {@code FailureHandlingResultSnapshot}.
*/
public static FailureHandlingResultSnapshot create(FailureHandlingResult failureHandlingResult, Function<ExecutionVertexID, Collection<Execution>> currentExecutionsLookup) {
final Execution rootCauseExecution = failureHandlingResult.getFailedExecution().orElse(null);
if ((rootCauseExecution != null) && (!rootCauseExecution.getFailureInfo().isPresent())) {
throw new IllegalArgumentException(String.format("The failed execution %s didn't provide a failure info.", rootCauseExecution.getAttemptId()));
}
final Set<Execution> concurrentlyFailedExecutions = failureHandlingResult.getVerticesToRestart().stream().flatMap(id -> currentExecutionsLookup.apply(id).stream()).filter(execution -> execution != rootCauseExecution).filter(execution -> execution.getFailureInfo().isPresent()).collect(Collectors.toSet());return new FailureHandlingResultSnapshot(rootCauseExecution, ErrorInfo.handleMissingThrowable(failureHandlingResult.getError()), failureHandlingResult.getTimestamp(), failureHandlingResult.getFailureLabels(), concurrentlyFailedExecutions);
} | 3.26 |
flink_FailureHandlingResultSnapshot_getTimestamp_rdh | /**
* The time the failure occurred.
*
* @return The time of the failure.
*/
public long getTimestamp() {
return timestamp;
} | 3.26 |
flink_FailureHandlingResultSnapshot_getRootCauseExecution_rdh | /**
* Returns the {@link Execution} that handled the root cause for this failure. An empty {@code Optional} will be returned if it's a global failure.
*
* @return The {@link Execution} that handled the root cause for this failure.
*/
public Optional<Execution> getRootCauseExecution() {
return Optional.ofNullable(rootCauseExecution);
} | 3.26 |
flink_FailureHandlingResultSnapshot_getFailureLabels_rdh | /**
* Returns the labels future associated with the failure.
*
* @return the CompletableFuture map of String labels
*/public CompletableFuture<Map<String, String>> getFailureLabels() {
return failureLabels;
} | 3.26 |
flink_PredefinedOptions_getValue_rdh | /**
* Get a option value according to the pre-defined values. If not defined, return the default
* value.
*
* @param option
* the option.
* @param <T>
* the option value type.
* @return the value if defined, otherwise return the default value.
*/
@Nullable
@SuppressWarnings("unchecked")
<T> T getValue(ConfigOption<T> option) {
Object value = options.get(option.key());
if (value == null) {
value = option.defaultValue();
}
if (value == null) {
return null;
}
return ((T) (value));
} | 3.26 |
flink_FileCompactStrategy_enableCompactionOnCheckpoint_rdh | /**
* Optional, compaction will be triggered when N checkpoints passed since the last
* triggering, -1 by default indicating no compaction on checkpoint.
*/
public FileCompactStrategy.Builder enableCompactionOnCheckpoint(int numCheckpointsBeforeCompaction) {
checkArgument(numCheckpointsBeforeCompaction > 0, "Number of checkpoints before compaction should be more than 0.");
this.numCheckpointsBeforeCompaction = numCheckpointsBeforeCompaction;
return this;
} | 3.26 |
flink_FileCompactStrategy_setSizeThreshold_rdh | /**
* Optional, compaction will be triggered when the total size of compacting files reaches
* the threshold. -1 by default, indicating the size is unlimited.
*/
public FileCompactStrategy.Builder setSizeThreshold(long sizeThreshold) {
this.sizeThreshold =
sizeThreshold;
return this;} | 3.26 |
flink_FileCompactStrategy_setNumCompactThreads_rdh | /**
* Optional, the count of compacting threads in a compactor operator, 1 by default.
*/
public FileCompactStrategy.Builder
setNumCompactThreads(int numCompactThreads) {
checkArgument(numCompactThreads > 0, "Compact threads should be more than 0.");
this.numCompactThreads
= numCompactThreads;
return this;
} | 3.26 |
flink_DagConnection_getDataExchangeMode_rdh | /**
* Gets the data exchange mode to use for this connection.
*
* @return The data exchange mode to use for this connection.
*/
public ExecutionMode getDataExchangeMode() {
if (dataExchangeMode == null) {
throw new IllegalStateException("This connection does not have the data exchange mode set");
}
return dataExchangeMode;
} | 3.26 |
flink_DagConnection_toString_rdh | // --------------------------------------------------------------------------------------------
public String toString() {
StringBuilder
buf = new StringBuilder(50);
buf.append("Connection: ");
if (this.source == null)
{
buf.append("null");
} else {
buf.append(this.source.getOperator().getName());
buf.append('(').append(this.source.getOperatorName()).append(')');
}
buf.append(" -> ");
if (this.shipStrategy != null) {
buf.append('[');
buf.append(this.shipStrategy.name());
buf.append(']').append(' ');
}
if (this.target == null) {
buf.append("null");
} else {
buf.append(this.target.getOperator().getName());
buf.append('(').append(this.target.getOperatorName()).append(')');
}
return buf.toString();} | 3.26 |
flink_DagConnection_getMaterializationMode_rdh | // --------------------------------------------------------------------------------------------
public TempMode getMaterializationMode() {
return this.materializationMode;
} | 3.26 |
flink_DagConnection_getShipStrategy_rdh | /**
* Gets the shipping strategy for this connection.
*
* @return The connection's shipping strategy.
*/
public ShipStrategyType getShipStrategy() {return this.shipStrategy;
} | 3.26 |
flink_DagConnection_getSource_rdh | /**
* Gets the source of the connection.
*
* @return The source Node.
*/
public OptimizerNode getSource() {
return this.source;
} | 3.26 |
flink_DagConnection_setShipStrategy_rdh | /**
* Sets the shipping strategy for this connection.
*
* @param strategy
* The shipping strategy to be applied to this connection.
*/
public void setShipStrategy(ShipStrategyType strategy) {
this.shipStrategy = strategy;
} | 3.26 |
flink_DagConnection_markBreaksPipeline_rdh | /**
* Marks that this connection should do a decoupled data exchange (such as batched) rather then
* pipeline data. Connections are marked as pipeline breakers to avoid deadlock situations.
*/
public void markBreaksPipeline() {
this.breakPipeline = true;
} | 3.26 |
flink_DagConnection_getInterestingProperties_rdh | /**
* Gets the interesting properties object for this pact connection. If the interesting
* properties for this connections have not yet been set, this method returns null.
*
* @return The collection of all interesting properties, or null, if they have not yet been set.
*/
public InterestingProperties getInterestingProperties() {
return this.interestingProps;
} | 3.26 |
flink_DagConnection_getEstimatedOutputSize_rdh | // --------------------------------------------------------------------------------------------
// Estimates
// --------------------------------------------------------------------------------------------
@Override
public long
getEstimatedOutputSize() {
return this.source.getEstimatedOutputSize();
} | 3.26 |
flink_DagConnection_setInterestingProperties_rdh | /**
* Sets the interesting properties for this pact connection.
*
* @param props
* The interesting properties.
*/
public void setInterestingProperties(InterestingProperties props) {
if (this.interestingProps == null) {this.interestingProps = props;
} else {
throw new IllegalStateException("Interesting Properties have already been set.");
}
} | 3.26 |
flink_PushLocalAggIntoScanRuleBase_isInputRefOnly_rdh | /**
* Currently, we only supports to push down aggregate above calc which has input ref only.
*
* @param calc
* BatchPhysicalCalc
* @return true if OK to be pushed down
*/
protected boolean isInputRefOnly(BatchPhysicalCalc calc) {
RexProgram program = calc.getProgram();
// check if condition exists. All filters should have been pushed down.
if (program.getCondition()
!= null) {
return false;
}
return (!program.getProjectList().isEmpty()) && program.getProjectList().stream().map(calc.getProgram()::expandLocalRef).allMatch(RexInputRef.class::isInstance);
} | 3.26 |
flink_StreamOperatorStateContext_isRestored_rdh | /**
* Returns true if the states provided by this context are restored from a checkpoint/savepoint.
*/
default boolean isRestored() {
return getRestoredCheckpointId().isPresent();
} | 3.26 |
flink_SafetyNetCloseableRegistry_doClose_rdh | /**
* This implementation doesn't imply any exception during closing due to backward compatibility.
*/
@Override
protected void doClose(List<Closeable> toClose) throws IOException {
try {
IOUtils.closeAllQuietly(toClose);
} finally {
synchronized(REAPER_THREAD_LOCK) {
--GLOBAL_SAFETY_NET_REGISTRY_COUNT;
if (0 == GLOBAL_SAFETY_NET_REGISTRY_COUNT) {
REAPER_THREAD.interrupt();
REAPER_THREAD = null;}
}
}
} | 3.26 |
flink_SubpartitionRemoteCacheManager_flushBuffers_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void flushBuffers() {
synchronized(allBuffers) {
List<Tuple2<Buffer, Integer>> allBuffersToFlush = new ArrayList<>(allBuffers);
allBuffers.clear();if (allBuffersToFlush.isEmpty()) {
return;
}
PartitionFileWriter.SubpartitionBufferContext subpartitionBufferContext = new PartitionFileWriter.SubpartitionBufferContext(subpartitionId, Collections.singletonList(new PartitionFileWriter.SegmentBufferContext(segmentId, allBuffersToFlush, false)));
flushCompletableFuture = partitionFileWriter.write(partitionId, Collections.singletonList(subpartitionBufferContext));
}} | 3.26 |
flink_SubpartitionRemoteCacheManager_startSegment_rdh | // ------------------------------------------------------------------------
// Called by RemoteCacheManager
// ------------------------------------------------------------------------
void startSegment(int segmentId) {
synchronized(allBuffers) {
checkState(allBuffers.isEmpty(), "There are un-flushed buffers.");
this.segmentId = segmentId;
}
} | 3.26 |
flink_ForwardHashExchangeProcessor_updateOriginalEdgeInMultipleInput_rdh | /**
* Add new exchange node between the input node and the target node for the given edge, and
* reconnect the edges. So that the transformations can be connected correctly.
*/
private void updateOriginalEdgeInMultipleInput(BatchExecMultipleInput multipleInput, int edgeIdx, BatchExecExchange newExchange) {
ExecEdge originalEdge = multipleInput.getOriginalEdges().get(edgeIdx);ExecNode<?> inputNode = originalEdge.getSource();
ExecNode<?> targetNode = originalEdge.getTarget();
int edgeIdxInTargetNode = targetNode.getInputEdges().indexOf(originalEdge);
checkArgument(edgeIdxInTargetNode >= 0);
List<ExecEdge> newEdges = new ArrayList<>(targetNode.getInputEdges());
// connect input node to new exchange node
ExecEdge newEdge1 = new ExecEdge(inputNode,
newExchange, originalEdge.getShuffle(), originalEdge.getExchangeMode());
newExchange.setInputEdges(Collections.singletonList(newEdge1));
// connect new exchange node to target node
ExecEdge newEdge2 = new ExecEdge(newExchange, targetNode, originalEdge.getShuffle(), originalEdge.getExchangeMode());
newEdges.set(edgeIdxInTargetNode, newEdge2);
targetNode.setInputEdges(newEdges);
// update the originalEdge in MultipleInput, this is need for multiple operator fusion
// codegen
multipleInput.getOriginalEdges().set(edgeIdx, newEdge2);
} | 3.26 |
flink_ForwardHashExchangeProcessor_addExchangeAndReconnectEdge_rdh | // TODO This implementation should be updated once FLINK-21224 is finished.
private ExecEdge addExchangeAndReconnectEdge(ReadableConfig tableConfig, ExecEdge edge, InputProperty inputProperty, boolean strict, boolean visitChild)
{
ExecNode<?> target = edge.getTarget();
ExecNode<?> source = edge.getSource();
if (source instanceof CommonExecExchange) {
return edge;
}
// only Calc, Correlate and Sort can propagate sort property and distribution property
if (visitChild && (((((source instanceof BatchExecCalc) || (source instanceof BatchExecPythonCalc)) || (source instanceof BatchExecSort)) || (source instanceof BatchExecCorrelate)) || (source instanceof BatchExecPythonCorrelate))) {
ExecEdge newEdge = addExchangeAndReconnectEdge(tableConfig, source.getInputEdges().get(0), inputProperty, strict, true);
source.setInputEdges(Collections.singletonList(newEdge));
}
BatchExecExchange exchange
= createExchangeWithKeepInputAsIsDistribution(tableConfig, inputProperty, strict, ((RowType) (edge.getOutputType())));
ExecEdge newEdge = new ExecEdge(source, exchange, edge.getShuffle(), edge.getExchangeMode());
exchange.setInputEdges(Collections.singletonList(newEdge));
return new ExecEdge(exchange, target, edge.getShuffle(), edge.getExchangeMode());
} | 3.26 |
flink_SqlClient_main_rdh | // --------------------------------------------------------------------------------------------
public static void main(String[] args) {
startClient(args, DEFAULT_TERMINAL_FACTORY);
} | 3.26 |
flink_DayTimeIntervalType_needsDefaultDayPrecision_rdh | // --------------------------------------------------------------------------------------------
private boolean needsDefaultDayPrecision(DayTimeResolution resolution) { switch (resolution) {
case HOUR :case HOUR_TO_MINUTE :
case HOUR_TO_SECOND :case MINUTE :
case MINUTE_TO_SECOND :case SECOND :
return true;
default :
return false;
}
} | 3.26 |
flink_WindowOperator_registerCleanupTimer_rdh | /**
* Registers a timer to cleanup the content of the window.
*
* @param window
* the window whose state to discard
*/
private void registerCleanupTimer(W window) {
long cleanupTime = toEpochMillsForTimer(cleanupTime(window), shiftTimeZone);
if (cleanupTime == Long.MAX_VALUE) {
// don't set a GC timer for "end of time"
return;
}
if
(windowAssigner.isEventTime()) {
triggerContext.registerEventTimeTimer(cleanupTime);
} else {
triggerContext.registerProcessingTimeTimer(cleanupTime);
}
} | 3.26 |
flink_WindowOperator_getNumLateRecordsDropped_rdh | // ------------------------------------------------------------------------------
// Visible For Testing
// ------------------------------------------------------------------------------
protected Counter getNumLateRecordsDropped() {
return numLateRecordsDropped;} | 3.26 |
flink_WindowOperator_cleanupTime_rdh | /**
* Returns the cleanup time for a window, which is {@code window.maxTimestamp +
* allowedLateness}. In case this leads to a value greated than {@link Long#MAX_VALUE} then a
* cleanup time of {@link Long#MAX_VALUE} is returned.
*
* @param window
* the window whose cleanup time we are computing.
*/
private long cleanupTime(W window) {
if (windowAssigner.isEventTime()) {long v15 =
Math.max(0, window.maxTimestamp() + allowedLateness);
return v15 >= window.maxTimestamp() ? v15 : Long.MAX_VALUE;
} else {
return Math.max(0, window.maxTimestamp());
}
} | 3.26 |
flink_DoubleCounter_add_rdh | // ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(double value) {
localValue += value;
} | 3.26 |
flink_DoubleCounter_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "DoubleCounter " +
this.localValue;
} | 3.26 |
flink_StreamCompressionDecorator_decorateWithCompression_rdh | /**
* IMPORTANT: For streams returned by this method, {@link InputStream#close()} is not propagated
* to the inner stream. The inner stream must be closed separately.
*
* @param stream
* the stream to decorate.
* @return an input stream that is decorated by the compression scheme.
*/
public final InputStream decorateWithCompression(InputStream stream) throws IOException {
return m0(new NonClosingInputStreamDecorator(stream)); } | 3.26 |
flink_StreamCompressionDecorator_m0_rdh | /**
* Decorates the stream by wrapping it into a stream that applies a compression.
*
* <p>IMPORTANT: For streams returned by this method, {@link OutputStream#close()} is not
* propagated to the inner stream. The inner stream must be closed separately.
*
* @param stream
* the stream to decorate.
* @return an output stream that is decorated by the compression scheme.
*/
public final OutputStream m0(OutputStream stream)
throws IOException {
return m0(new NonClosingOutputStreamDecorator(stream));
} | 3.26 |
flink_CircularElement_endMarker_rdh | /**
* Gets the element that is passed as marker for the end of data.
*
* @return The element that is passed as marker for the end of data.
*/
static <T> CircularElement<T> endMarker()
{
@SuppressWarnings("unchecked")
CircularElement<T> c = ((CircularElement<T>) (EOF_MARKER));
return c;
} | 3.26 |
flink_CircularElement_spillingMarker_rdh | /**
* Gets the element that is passed as marker for signal beginning of spilling.
*
* @return The element that is passed as marker for signal beginning of spilling.
*/
static <T> CircularElement<T> spillingMarker() {
@SuppressWarnings("unchecked")
CircularElement<T> c = ((CircularElement<T>) (SPILLING_MARKER));
return c;
} | 3.26 |
flink_ExecNodeConfig_getStateRetentionTime_rdh | /**
*
* @return The duration until state which was not updated will be retained.
*/
public long getStateRetentionTime() {
return get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis();
} | 3.26 |
flink_ExecNodeConfig_isCompiled_rdh | /**
*
* @return Whether the {@link ExecNode} translation happens as part of a plan compilation.
*/
public boolean isCompiled() {
return isCompiled;
} | 3.26 |
flink_ExecNodeConfig_shouldSetUid_rdh | /**
*
* @return Whether transformations should set a UID.
*/
public boolean shouldSetUid() {
final UidGeneration uidGeneration = get(ExecutionConfigOptions.TABLE_EXEC_UID_GENERATION);
switch (uidGeneration) {
case PLAN_ONLY :
return isCompiled
&& (!get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_TRANSFORMATION_UIDS));
case ALWAYS :
return true;
case DISABLED :
return false;
default :
throw new IllegalArgumentException("Unknown UID generation strategy: " + uidGeneration);}
} | 3.26 |
flink_FromClasspathEntryClassInformationProvider_getJarFile_rdh | /**
* Always returns an empty {@code Optional} because this implementation relies on the JAR
* archive being available on either the user or the system classpath.
*
* @return An empty {@code Optional}.
*/
@Override
public Optional<File> getJarFile()
{
return Optional.empty();
}
/**
* Returns the job class name if it could be derived from the specified classpath or was
* explicitly specified.
*
* @return The job class name or an empty {@code Optional} | 3.26 |
flink_FromClasspathEntryClassInformationProvider_createWithJobClassAssumingOnSystemClasspath_rdh | /**
* Creates a {@code FromClasspathEntryClassInformationProvider} assuming that the passed job
* class is available on the system classpath.
*
* @param jobClassName
* The job class name working as the entry point.
* @return The {@code FromClasspathEntryClassInformationProvider} providing the job class found.
*/
public static FromClasspathEntryClassInformationProvider createWithJobClassAssumingOnSystemClasspath(String jobClassName) {
return new FromClasspathEntryClassInformationProvider(jobClassName);
} | 3.26 |
flink_SqlCreateTable_getColumnSqlString_rdh | /**
* Returns the projection format of the DDL columns(including computed columns). i.e. the
* following DDL:
*
* <pre>
* create table tbl1(
* col1 int,
* col2 varchar,
* col3 as to_timestamp(col2)
* ) with (
* 'connector' = 'csv'
* )
* </pre>
*
* <p>is equivalent with query "col1, col2, to_timestamp(col2) as col3", caution that the
* "computed column" operands have been reversed.
*/
public String getColumnSqlString() {
SqlPrettyWriter writer = new SqlPrettyWriter(SqlPrettyWriter.config().withDialect(AnsiSqlDialect.DEFAULT).withAlwaysUseParentheses(true).withSelectListItemsOnSeparateLines(false).withIndentation(0));
writer.startList("", "");
for (SqlNode column : columnList) {
writer.sep(",");
SqlTableColumn tableColumn = ((SqlTableColumn) (column));
if (tableColumn instanceof SqlComputedColumn) {
SqlComputedColumn computedColumn = ((SqlComputedColumn) (tableColumn));computedColumn.getExpr().unparse(writer, 0, 0);
writer.keyword("AS");
}
tableColumn.getName().unparse(writer, 0, 0);
}
return writer.toString();
} | 3.26 |
flink_SqlCreateTable_m0_rdh | /**
* Returns the column constraints plus the table constraints.
*/
public List<SqlTableConstraint> m0() {
return SqlConstraintValidator.getFullConstraints(tableConstraints, columnList);
} | 3.26 |
flink_IterationIntermediateTask_initialize_rdh | // --------------------------------------------------------------------------------------------
@Override
protected void initialize() throws
Exception {
super.initialize();
// set the last output collector of this task to reflect the iteration intermediate state
// update
// a) workset update
// b) solution set update
// c) none
Collector<OT> delegate = getLastOutputCollector();
if (isWorksetUpdate) {
// sanity check: we should not have a solution set and workset update at the same time
// in an intermediate task
if (isSolutionSetUpdate) {
throw new IllegalStateException("Plan bug: Intermediate task performs workset and solutions set update.");
}
Collector<OT> outputCollector = createWorksetUpdateOutputCollector(delegate);
// we need the WorksetUpdateOutputCollector separately to count the collected elements
if (isWorksetIteration) {
worksetUpdateOutputCollector = ((WorksetUpdateOutputCollector<OT>) (outputCollector));
}
setLastOutputCollector(outputCollector);
} else if (isSolutionSetUpdate) {
setLastOutputCollector(createSolutionSetUpdateOutputCollector(delegate));
}
} | 3.26 |
flink_LogicalRelDataTypeConverter_toLogicalTypeNotNull_rdh | // --------------------------------------------------------------------------------------------
// RelDataType to LogicalType
// --------------------------------------------------------------------------------------------
private static LogicalType toLogicalTypeNotNull(RelDataType relDataType, DataTypeFactory dataTypeFactory) {
// dataTypeFactory is a preparation for catalog user-defined types
switch (relDataType.getSqlTypeName()) {case BOOLEAN
:
return new BooleanType(false);
case TINYINT :
return new TinyIntType(false);
case SMALLINT :
return new
SmallIntType(false);
case INTEGER :
return new IntType(false);
case BIGINT :
return new BigIntType(false);
case DECIMAL :
if (relDataType.getScale() < 0) {
// negative scale is not supported, normalize it
return new DecimalType(false, relDataType.getPrecision() - relDataType.getScale(), 0);
}
return new DecimalType(false, relDataType.getPrecision(), relDataType.getScale());
case FLOAT :
return new FloatType(false);
case DOUBLE :
return new
DoubleType(false);
case DATE :return new DateType(false);
case TIME :
return new TimeType(false, relDataType.getPrecision());
case TIMESTAMP :
return new TimestampType(false, getTimestampKind(relDataType), relDataType.getPrecision());
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return new LocalZonedTimestampType(false, getTimestampKind(relDataType), relDataType.getPrecision());
case INTERVAL_YEAR :
case INTERVAL_YEAR_MONTH :
case INTERVAL_MONTH :
return new
YearMonthIntervalType(false,
getYearMonthResolution(relDataType), relDataType.getPrecision());
case INTERVAL_DAY :
case INTERVAL_DAY_HOUR :
case INTERVAL_DAY_MINUTE :
case INTERVAL_DAY_SECOND :
case INTERVAL_HOUR :
case INTERVAL_HOUR_MINUTE :
case INTERVAL_HOUR_SECOND :
case INTERVAL_MINUTE :
case
INTERVAL_MINUTE_SECOND :
return new DayTimeIntervalType(false, getDayTimeResolution(relDataType), relDataType.getPrecision(), relDataType.getScale());
case
INTERVAL_SECOND :
return new DayTimeIntervalType(false, getDayTimeResolution(relDataType), DayTimeIntervalType.DEFAULT_DAY_PRECISION, relDataType.getScale());
case CHAR :
if (relDataType.getPrecision() == 0) {
return CharType.ofEmptyLiteral();
}
return new CharType(false, relDataType.getPrecision());
case VARCHAR :
if (relDataType.getPrecision() == 0) {
return VarCharType.ofEmptyLiteral();}
return new VarCharType(false, relDataType.getPrecision());
case BINARY :if (relDataType.getPrecision() == 0) {
return BinaryType.ofEmptyLiteral();
}
return new BinaryType(false, relDataType.getPrecision());
case VARBINARY :if (relDataType.getPrecision() == 0) {
return VarBinaryType.ofEmptyLiteral();
}
return new VarBinaryType(false, relDataType.getPrecision());
case NULL :
return
new NullType();
case SYMBOL :
return new SymbolType<>(false);
case MULTISET :
return new MultisetType(false, toLogicalType(relDataType.getComponentType(), dataTypeFactory));
case ARRAY :
return
new ArrayType(false, toLogicalType(relDataType.getComponentType(), dataTypeFactory));
case MAP :
return new MapType(false, toLogicalType(relDataType.getKeyType(), dataTypeFactory), toLogicalType(relDataType.getValueType(), dataTypeFactory));
case DISTINCT :
throw
new TableException("DISTINCT type is currently not supported.");
case ROW :
return new RowType(false, relDataType.getFieldList().stream().map(f -> new RowField(f.getName(), toLogicalType(f.getType(), dataTypeFactory))).collect(Collectors.toList()));
case STRUCTURED :
case OTHER :
if (relDataType instanceof StructuredRelDataType) {
return ((StructuredRelDataType) (relDataType)).getStructuredType();
} else if (relDataType instanceof RawRelDataType) {
return ((RawRelDataType) (relDataType)).getRawType();
}
// fall through
case REAL :
case TIME_WITH_LOCAL_TIME_ZONE :
case ANY :
case CURSOR :
case COLUMN_LIST :
case DYNAMIC_STAR :
case GEOMETRY :
case SARG :
default :
throw new TableException("Unsupported RelDataType: " + relDataType);
}
} | 3.26 |
flink_RawType_restore_rdh | // --------------------------------------------------------------------------------------------
/**
* Restores a raw type from the components of a serialized string representation.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static RawType<?> restore(ClassLoader classLoader, String className, String serializerString) {
try {
final Class<?> clazz = Class.forName(className, true, classLoader);
final byte[] bytes = EncodingUtils.decodeBase64ToBytes(serializerString);
final DataInputDeserializer inputDeserializer = new
DataInputDeserializer(bytes);
final TypeSerializerSnapshot<?> snapshot = TypeSerializerSnapshot.readVersionedSnapshot(inputDeserializer,
classLoader);
return ((RawType<?>) (new RawType(clazz, snapshot.restoreSerializer())));
} catch
(Throwable t)
{
throw new ValidationException(String.format("Unable to restore the RAW type of class '%s' with serializer snapshot '%s'.", className, serializerString), t);
}
} | 3.26 |
flink_RawType_m1_rdh | /**
* Returns the serialized {@link TypeSerializerSnapshot} in Base64 encoding of this raw type.
*/
public String m1() {
if (serializerString == null) {
final DataOutputSerializer outputSerializer = new DataOutputSerializer(128);
try {
TypeSerializerSnapshot.writeVersionedSnapshot(outputSerializer, serializer.snapshotConfiguration());
serializerString = EncodingUtils.encodeBytesToBase64(outputSerializer.getCopyOfBuffer());
return serializerString;
} catch (Exception e) {
throw new TableException(String.format("Unable to generate a string representation of the serializer snapshot of '%s' " + "describing the class '%s' for the RAW type.", serializer.getClass().getName(), clazz.toString()), e);
}
}
return serializerString;
} | 3.26 |
flink_ParameterizedTestExtension_createContextForParameters_rdh | // -------------------------------- Helper functions -------------------------------------------
private Stream<TestTemplateInvocationContext> createContextForParameters(Stream<Object[]> parameterValueStream, String testNameTemplate, ExtensionContext context) {
// Search fields annotated by @Parameter
final List<Field> parameterFields = AnnotationSupport.findAnnotatedFields(context.getRequiredTestClass(), Parameter.class);
// Use constructor parameter style
if (parameterFields.isEmpty()) {
return parameterValueStream.map(parameterValue ->
new ConstructorParameterResolverInvocationContext(testNameTemplate, parameterValue));
}
// Use field injection style
for (Field parameterField : parameterFields) {
final int index = parameterField.getAnnotation(Parameter.class).value();
context.getStore(NAMESPACE).put(getParameterFieldStoreKey(index), parameterField);
}
return parameterValueStream.map(parameterValue -> new FieldInjectingInvocationContext(testNameTemplate, parameterValue));
} | 3.26 |
flink_FlinkHints_getQueryBlockAliasHints_rdh | /**
* Get all query block alias hints.
*
* <p>Because query block alias hints will be propagated from root to leaves, so maybe one node
* will contain multi alias hints. But only the first one is the real query block name where
* this node is.
*/
public static List<RelHint> getQueryBlockAliasHints(List<RelHint> allHints) {
return allHints.stream().filter(hint -> hint.hintName.equals(FlinkHints.HINT_ALIAS)).collect(Collectors.toList());
} | 3.26 |
flink_FlinkHints_resolveSubQuery_rdh | /**
* Resolve the RelNode of the sub query in conditions.
*/
private static RexNode resolveSubQuery(RexNode rexNode, Function<RelNode,
RelNode> resolver) {
return rexNode.accept(new RexShuttle() {
@Override
public RexNode m0(RexSubQuery subQuery) {
RelNode oldRel = subQuery.rel;
RelNode newRel =
resolver.apply(oldRel);if (oldRel != newRel) {
return super.visitSubQuery(subQuery.clone(newRel));
}
return subQuery;
}
});
} | 3.26 |
flink_FlinkHints_getAllJoinHints_rdh | /**
* Get all join hints.
*/
public static List<RelHint> getAllJoinHints(List<RelHint> allHints) {
return allHints.stream().filter(hint -> JoinStrategy.isJoinStrategy(hint.hintName)).collect(Collectors.toList());
} | 3.26 |
flink_FlinkHints_getTableName_rdh | /**
* Returns the qualified name of a table scan, otherwise returns empty.
*/
public static Optional<String> getTableName(RelOptTable
table) {
if (table == null) {
return Optional.empty();
}
String tableName;
if (table instanceof FlinkPreparingTableBase) {
tableName = StringUtils.join(((FlinkPreparingTableBase) (table)).getNames(), '.');
} else {
throw new TableException(String.format("Could not get the table name with the unknown table class `%s`", table.getClass().getCanonicalName()));
}
return Optional.of(tableName);
} | 3.26 |
flink_FlinkHints_getHintedOptions_rdh | // ~ Tools ------------------------------------------------------------------
/**
* Returns the OPTIONS hint options from the given list of table hints {@code tableHints}, never
* null.
*/
public static Map<String, String> getHintedOptions(List<RelHint> tableHints) {
return tableHints.stream().filter(hint -> hint.hintName.equalsIgnoreCase(HINT_NAME_OPTIONS)).findFirst().map(hint -> hint.kvOptions).orElse(Collections.emptyMap());
}
/**
* Merges the dynamic table options from {@code hints} and static table options from table
* definition {@code props}.
*
* <p>The options in {@code hints} would override the ones in {@code props} if they have the
* same option key.
*
* @param hints
* Dynamic table options, usually from the OPTIONS hint
* @param props
* Static table options defined in DDL or connect API
* @return New options with merged dynamic table options, or the old {@code props} | 3.26 |
flink_FlinkHints_clearJoinHintsOnUnmatchedNodes_rdh | /**
* Clear the join hints on some nodes where these hints should not be attached.
*/
public static RelNode clearJoinHintsOnUnmatchedNodes(RelNode root) {
return root.accept(new ClearJoinHintsOnUnmatchedNodesShuttle(root.getCluster().getHintStrategies()));
} | 3.26 |
flink_SqlRowOperator_inferReturnType_rdh | // ~ Methods ----------------------------------------------------------------
@Override
public RelDataType inferReturnType(SqlOperatorBinding opBinding) {
// ----- FLINK MODIFICATION BEGIN -----
// The type of a ROW(e1,e2) expression is a record with the types
// {e1type,e2type}. According to the standard, field names are
// implementation-defined.
int fieldCount = opBinding.getOperandCount();
return opBinding.getTypeFactory().createStructType(StructKind.PEEK_FIELDS_NO_EXPAND, new AbstractList<RelDataType>() {
@Override
public RelDataType get(int index) {
return opBinding.getOperandType(index);
}
@Override
public int size() {
return fieldCount;}
}, new AbstractList<String>() {
@Override
public String get(int index) {
return SqlUtil.deriveAliasFromOrdinal(index);
}
@Override
public int size() {
return fieldCount;
}
});
// ----- FLINK MODIFICATION END -----
} | 3.26 |
flink_AsyncSinkBaseBuilder_setMaxRecordSizeInBytes_rdh | /**
*
* @param maxRecordSizeInBytes
* the maximum size of each records in bytes. If a record larger
* than this is passed to the sink, it will throw an {@code IllegalArgumentException}.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxRecordSizeInBytes(long maxRecordSizeInBytes) {
this.maxRecordSizeInBytes = maxRecordSizeInBytes;
return ((ConcreteBuilderT) (this));
} | 3.26 |
flink_AsyncSinkBaseBuilder_setMaxBatchSizeInBytes_rdh | /**
*
* @param maxBatchSizeInBytes
* a flush will be attempted if the most recent call to write
* introduces an element to the buffer such that the total size of the buffer is greater
* than or equal to this threshold value. If this happens, the maximum number of elements
* from the head of the buffer will be selected, that is smaller than {@code maxBatchSizeInBytes} in size will be flushed.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBatchSizeInBytes(long maxBatchSizeInBytes) {
this.maxBatchSizeInBytes = maxBatchSizeInBytes;
return ((ConcreteBuilderT) (this));
} | 3.26 |
flink_AsyncSinkBaseBuilder_setMaxBufferedRequests_rdh | /**
*
* @param maxBufferedRequests
* the maximum buffer length. Callbacks to add elements to the buffer
* and calls to write will block if this length has been reached and will only unblock if
* elements from the buffer have been removed for flushing.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBufferedRequests(int maxBufferedRequests) {
this.maxBufferedRequests = maxBufferedRequests;
return ((ConcreteBuilderT) (this));
} | 3.26 |
flink_AsyncSinkBaseBuilder_setMaxInFlightRequests_rdh | /**
*
* @param maxInFlightRequests
* maximum number of uncompleted calls to submitRequestEntries that
* the SinkWriter will allow at any given point. Once this point has reached, writes and
* callbacks to add elements to the buffer may block until one or more requests to
* submitRequestEntries completes.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxInFlightRequests(int maxInFlightRequests) {
this.maxInFlightRequests = maxInFlightRequests;
return ((ConcreteBuilderT) (this));
} | 3.26 |
flink_AsyncSinkBaseBuilder_setMaxTimeInBufferMS_rdh | /**
*
* @param maxTimeInBufferMS
* the maximum amount of time an element may remain in the buffer. In
* most cases elements are flushed as a result of the batch size (in bytes or number) being
* reached or during a snapshot. However, there are scenarios where an element may remain in
* the buffer forever or a long period of time. To mitigate this, a timer is constantly
* active in the buffer such that: while the buffer is not empty, it will flush every
* maxTimeInBufferMS milliseconds.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxTimeInBufferMS(long maxTimeInBufferMS)
{
this.maxTimeInBufferMS = maxTimeInBufferMS;
return ((ConcreteBuilderT) (this));
} | 3.26 |
flink_AsyncSinkBaseBuilder_setMaxBatchSize_rdh | /**
*
* @param maxBatchSize
* maximum number of elements that may be passed in a list to be written
* downstream.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBatchSize(int maxBatchSize) {
this.maxBatchSize = maxBatchSize;
return ((ConcreteBuilderT) (this));
} | 3.26 |
flink_WatermarkAssignerChangelogNormalizeTransposeRule_buildTreeInOrder_rdh | /**
* Build a new {@link RelNode} tree in the given nodes order which is in bottom-up direction.
*/
@SafeVarargs
private final RelNode buildTreeInOrder(RelNode
leafNode, Tuple2<RelNode, RelTraitSet>... nodeAndTraits) {
checkArgument(nodeAndTraits.length >= 1);
RelNode inputNode = leafNode;
RelNode v47 = null;
for (Tuple2<RelNode, RelTraitSet> nodeAndTrait : nodeAndTraits) {
v47
= nodeAndTrait.f0;
if (v47 instanceof StreamPhysicalExchange) {
v47 = ((StreamPhysicalExchange) (v47)).copy(nodeAndTrait.f1, inputNode, nodeAndTrait.f1.getTrait(FlinkRelDistributionTraitDef.INSTANCE()));} else if (v47 instanceof StreamPhysicalChangelogNormalize) {
final List<String> inputNodeFields = inputNode.getRowType().getFieldNames();
final List<String> currentNodeFields = v47.getRowType().getFieldNames();
int[] remappedUniqueKeys = Arrays.stream(((StreamPhysicalChangelogNormalize) (v47)).uniqueKeys()).map(ukIdx -> inputNodeFields.indexOf(currentNodeFields.get(ukIdx))).toArray();
v47 = ((StreamPhysicalChangelogNormalize) (v47)).copy(nodeAndTrait.f1, inputNode, remappedUniqueKeys);
} else {
v47 = v47.copy(nodeAndTrait.f1, Collections.singletonList(inputNode));
}
inputNode = v47;
}
return v47;
} | 3.26 |
flink_TaskManagerRunner_createRpcService_rdh | /**
* Create a RPC service for the task manager.
*
* @param configuration
* The configuration for the TaskManager.
* @param haServices
* to use for the task manager hostname retrieval
*/
@VisibleForTesting
static RpcService createRpcService(final Configuration configuration, final HighAvailabilityServices haServices, final RpcSystem rpcSystem) throws Exception {
checkNotNull(configuration);checkNotNull(haServices);
return RpcUtils.createRemoteRpcService(rpcSystem, configuration, determineTaskManagerBindAddress(configuration, haServices, rpcSystem), configuration.getString(TaskManagerOptions.RPC_PORT), configuration.getString(TaskManagerOptions.BIND_HOST), configuration.getOptional(TaskManagerOptions.RPC_BIND_PORT));} | 3.26 |
flink_TaskManagerRunner_main_rdh | // --------------------------------------------------------------------------------------------
// Static entry point
// --------------------------------------------------------------------------------------------
public static void main(String[] args) throws Exception {
// startup checks and logging
EnvironmentInformation.logEnvironmentInfo(LOG, "TaskManager", args);
SignalHandler.register(LOG);
JvmShutdownSafeguard.installAsShutdownHook(LOG);
long maxOpenFileHandles = EnvironmentInformation.getOpenFileHandlesLimit();
if (maxOpenFileHandles != (-1L)) {
LOG.info("Maximum number of open file descriptors is {}.", maxOpenFileHandles);
} else {
LOG.info("Cannot determine the maximum number of open file descriptors");
}
runTaskManagerProcessSecurely(args);
} | 3.26 |
flink_TaskManagerRunner_onFatalError_rdh | // --------------------------------------------------------------------------------------------
// FatalErrorHandler methods
// --------------------------------------------------------------------------------------------
@Override
public void onFatalError(Throwable exception) {
TaskManagerExceptionUtils.tryEnrichTaskManagerError(exception);
LOG.error("Fatal error occurred while executing the TaskManager. Shutting it down...", exception);
if (ExceptionUtils.isJvmFatalOrOutOfMemoryError(exception)) {
terminateJVM();
} else {
m0(Result.FAILURE);
FutureUtils.orTimeout(terminationFuture, FATAL_ERROR_SHUTDOWN_TIMEOUT_MS, TimeUnit.MILLISECONDS, String.format("Waiting for TaskManager shutting down timed out after %s ms.", FATAL_ERROR_SHUTDOWN_TIMEOUT_MS));
}
} | 3.26 |
flink_TaskManagerRunner_start_rdh | // --------------------------------------------------------------------------------------------
// Lifecycle management
// --------------------------------------------------------------------------------------------
public void start() throws Exception {
synchronized(lock) {
startTaskManagerRunnerServices();
taskExecutorService.start();
}
} | 3.26 |
flink_TaskManagerRunner_createTaskExecutorService_rdh | // --------------------------------------------------------------------------------------------
// Static utilities
// --------------------------------------------------------------------------------------------
public static TaskExecutorService createTaskExecutorService(Configuration configuration, ResourceID resourceID, RpcService rpcService, HighAvailabilityServices highAvailabilityServices, HeartbeatServices heartbeatServices, MetricRegistry metricRegistry, BlobCacheService blobCacheService, boolean localCommunicationOnly, ExternalResourceInfoProvider externalResourceInfoProvider, WorkingDirectory workingDirectory, FatalErrorHandler fatalErrorHandler, DelegationTokenReceiverRepository delegationTokenReceiverRepository) throws Exception {
final TaskExecutor taskExecutor = m2(configuration, resourceID, rpcService, highAvailabilityServices, heartbeatServices, metricRegistry, blobCacheService, localCommunicationOnly, externalResourceInfoProvider, workingDirectory, fatalErrorHandler, delegationTokenReceiverRepository);
return TaskExecutorToServiceAdapter.createFor(taskExecutor);
} | 3.26 |
flink_TaskManagerRunner_m1_rdh | // export the termination future for caller to know it is terminated
public CompletableFuture<Result> m1() {
return terminationFuture;
} | 3.26 |
flink_SpillingThread_mergeChannelList_rdh | /**
* Merges the given sorted runs to a smaller number of sorted runs.
*
* @param channelIDs
* The IDs of the sorted runs that need to be merged.
* @param allReadBuffers
* @param writeBuffers
* The buffers to be used by the writers.
* @return A list of the IDs of the merged channels.
* @throws IOException
* Thrown, if the readers or writers encountered an I/O problem.
*/
private List<ChannelWithBlockCount> mergeChannelList(final List<ChannelWithBlockCount> channelIDs, final List<MemorySegment> allReadBuffers, final List<MemorySegment> writeBuffers) throws IOException {
// A channel list with length maxFanIn<sup>i</sup> can be merged to maxFanIn files in i-1
// rounds where every merge
// is a full merge with maxFanIn input channels. A partial round includes merges with fewer
// than maxFanIn
// inputs. It is most efficient to perform the partial round first.
final double scale = Math.ceil(Math.log(channelIDs.size()) / Math.log(this.maxFanIn)) - 1;
final int numStart = channelIDs.size();
final int numEnd
= ((int) (Math.pow(this.maxFanIn, scale)));
final int numMerges = ((int) (Math.ceil((numStart - numEnd) / ((double) (this.maxFanIn - 1)))));
final int numNotMerged = numEnd - numMerges;
final int numToMerge = numStart - numNotMerged;
// unmerged channel IDs are copied directly to the result list
final List<ChannelWithBlockCount> mergedChannelIDs = new ArrayList<>(numEnd);
mergedChannelIDs.addAll(channelIDs.subList(0, numNotMerged));
final int channelsToMergePerStep = ((int)
(Math.ceil(numToMerge / ((double) (numMerges)))));
// allocate the memory for the merging step
final List<List<MemorySegment>> readBuffers = new ArrayList<>(channelsToMergePerStep);
getSegmentsForReaders(readBuffers, allReadBuffers, channelsToMergePerStep);
final List<ChannelWithBlockCount> channelsToMergeThisStep
= new ArrayList<>(channelsToMergePerStep);
int channelNum = numNotMerged;
while (isRunning() && (channelNum < channelIDs.size())) {
channelsToMergeThisStep.clear();
for (int v42 = 0; (v42 < channelsToMergePerStep) && (channelNum < channelIDs.size()); v42++ , channelNum++) {
channelsToMergeThisStep.add(channelIDs.get(channelNum));
}
mergedChannelIDs.add(mergeChannels(channelsToMergeThisStep, readBuffers, writeBuffers));
}
return mergedChannelIDs;
} | 3.26 |
flink_SpillingThread_disposeSortBuffers_rdh | /**
* Releases the memory that is registered for in-memory sorted run generation.
*/
private void disposeSortBuffers(boolean releaseMemory) {
CircularElement<E> element;
while ((element = this.dispatcher.poll(SortStage.READ)) != null) {
element.getBuffer().dispose();
if (releaseMemory) {
this.memManager.release(element.getMemory());
}
}
} | 3.26 |
flink_SpillingThread_mergeChannels_rdh | /**
* Merges the sorted runs described by the given Channel IDs into a single sorted run. The
* merging process uses the given read and write buffers.
*
* @param channelIDs
* The IDs of the runs' channels.
* @param readBuffers
* The buffers for the readers that read the sorted runs.
* @param writeBuffers
* The buffers for the writer that writes the merged channel.
* @return The ID and number of blocks of the channel that describes the merged run.
*/
private ChannelWithBlockCount mergeChannels(List<ChannelWithBlockCount> channelIDs, List<List<MemorySegment>> readBuffers, List<MemorySegment> writeBuffers) throws IOException {
// the list with the readers, to be closed at shutdown
final List<FileIOChannel> channelAccesses = new ArrayList<>(channelIDs.size());
// the list with the target iterators
final MergeIterator<E> mergeIterator = getMergingIterator(channelIDs, readBuffers, channelAccesses, null);
// create a new channel writer
final FileIOChannel.ID mergedChannelID = this.ioManager.createChannel();
spillChannelManager.registerChannelToBeRemovedAtShutdown(mergedChannelID);
final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(mergedChannelID);
spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(writer);
final ChannelWriterOutputView output = new
ChannelWriterOutputView(writer, writeBuffers, this.memManager.getPageSize());
openSpillingBehaviour();
spillingBehaviour.mergeRecords(mergeIterator, output);
output.close();
final int numBlocksWritten = output.getBlockCount();
// register merged result to be removed at shutdown
spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(writer);
// remove the merged channel readers from the clear-at-shutdown list
for (FileIOChannel v49 : channelAccesses) {
v49.closeAndDelete();
spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(v49);
}
return new ChannelWithBlockCount(mergedChannelID, numBlocksWritten);
} | 3.26 |
flink_SpillingThread_getMergingIterator_rdh | // ------------------------------------------------------------------------
// Result Merging
// ------------------------------------------------------------------------
/**
* Returns an iterator that iterates over the merged result from all given channels.
*
* @param channelIDs
* The channels that are to be merged and returned.
* @param inputSegments
* The buffers to be used for reading. The list contains for each channel
* one list of input segments. The size of the <code>inputSegments</code> list must be equal
* to that of the <code>channelIDs</code> list.
* @return An iterator over the merged records of the input channels.
* @throws IOException
* Thrown, if the readers encounter an I/O problem.
*/
private MergeIterator<E> getMergingIterator(final List<ChannelWithBlockCount> channelIDs, final List<List<MemorySegment>> inputSegments, List<FileIOChannel> readerList, MutableObjectIterator<E> largeRecords) throws IOException {
// create one iterator per channel id
LOG.debug("Performing merge of {} sorted streams.", channelIDs.size());
final List<MutableObjectIterator<E>> iterators = new ArrayList<>(channelIDs.size() + 1);
for (int i = 0; i < channelIDs.size(); i++) {
final ChannelWithBlockCount channel = channelIDs.get(i);
final List<MemorySegment> segsForChannel = inputSegments.get(i);
// create a reader. if there are multiple segments for the reader, issue multiple
// together per I/O request
final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channel.getChannel());
readerList.add(reader);
spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(reader);
spillChannelManager.unregisterChannelToBeRemovedAtShutdown(channel.getChannel());
// wrap channel reader as a view, to get block spanning record deserialization
final ChannelReaderInputView inView =
new ChannelReaderInputView(reader, segsForChannel, channel.getBlockCount(), false);
iterators.add(new ChannelReaderInputViewIterator<>(inView, null, this.serializer));
}
if (largeRecords
!= null) {
iterators.add(largeRecords);
}
return new MergeIterator<>(iterators, this.comparator);
} | 3.26 |
flink_SpillingThread_m0_rdh | /**
* Entry point of the thread.
*/
@Override
public void m0() throws IOException, InterruptedException {
// ------------------- In-Memory Cache ------------------------
final Queue<CircularElement<E>> cache = new ArrayDeque<>();
boolean cacheOnly = readCache(cache);
// check whether the thread was canceled
if (!isRunning()) {
return;
}
MutableObjectIterator<E> largeRecords = null;
// check if we can stay in memory with the large record handler
if ((cacheOnly && (largeRecordHandler != null)) && largeRecordHandler.hasData()) {
List<MemorySegment> memoryForLargeRecordSorting = new ArrayList<>();
CircularElement<E> circElement;
while ((circElement = this.dispatcher.poll(SortStage.READ)) != null) {
circElement.getBuffer().dispose();
memoryForLargeRecordSorting.addAll(circElement.getMemory());
}
if
(memoryForLargeRecordSorting.isEmpty()) {
cacheOnly =
false;LOG.debug("Going to disk-based merge because of large records.");
} else {
LOG.debug("Sorting large records, to add them to in-memory merge.");
largeRecords = largeRecordHandler.finishWriteAndSortKeys(memoryForLargeRecordSorting);
}
}
// ------------------- In-Memory Merge ------------------------
if (cacheOnly) {
mergeInMemory(cache, largeRecords);
return;
}
// ------------------- Spilling Phase ------------------------
List<ChannelWithBlockCount> channelIDs = startSpilling(cache);
// ------------------- Merging Phase ------------------------
mergeOnDisk(channelIDs);
} | 3.26 |
flink_SpillingThread_getSegmentsForReaders_rdh | /**
* Divides the given collection of memory buffers among {@code numChannels} sublists.
*
* @param target
* The list into which the lists with buffers for the channels are put.
* @param memory
* A list containing the memory buffers to be distributed. The buffers are not
* removed from this list.
* @param numChannels
* The number of channels for which to allocate buffers. Must not be zero.
*/
private void getSegmentsForReaders(List<List<MemorySegment>> target, List<MemorySegment> memory, int numChannels) {
// determine the memory to use per channel and the number of buffers
final int numBuffers = memory.size();
final int buffersPerChannelLowerBound = numBuffers / numChannels;
final int numChannelsWithOneMore =
numBuffers % numChannels;
final Iterator<MemorySegment> segments = memory.iterator();
// collect memory for the channels that get one segment more
for (int i = 0; i < numChannelsWithOneMore; i++) {
final ArrayList<MemorySegment> segs = new ArrayList<>(buffersPerChannelLowerBound + 1);
target.add(segs);
for (int k = buffersPerChannelLowerBound; k >= 0; k--) {
segs.add(segments.next());
}
}
// collect memory for the remaining channels
for (int i = numChannelsWithOneMore; i < numChannels; i++) {
final ArrayList<MemorySegment> segs = new ArrayList<>(buffersPerChannelLowerBound);
target.add(segs);
for (int k = buffersPerChannelLowerBound; k > 0; k--) {
segs.add(segments.next());
}
}
} | 3.26 |
flink_SqlWindowTableFunction_checkTableAndDescriptorOperands_rdh | /**
* Checks whether the heading operands are in the form {@code (ROW, DESCRIPTOR, DESCRIPTOR
* ..., other params)}, returning whether successful, and throwing if any columns are not
* found.
*
* @param callBinding
* The call binding
* @param descriptorCount
* The number of descriptors following the first operand (e.g. the
* table)
* @return true if validation passes; throws if any columns are not found
*/
boolean checkTableAndDescriptorOperands(SqlCallBinding callBinding, int descriptorCount) {
final SqlNode operand0 = callBinding.operand(0);
final SqlValidator validator = callBinding.getValidator();
final RelDataType type = validator.getValidatedNodeType(operand0);if (type.getSqlTypeName() != SqlTypeName.ROW) {
return false;
}
for (int i = 1; i < (descriptorCount + 1); i++) {
final SqlNode operand = callBinding.operand(i);
if (operand.getKind() != SqlKind.DESCRIPTOR) {
return false;
}
validateColumnNames(validator, type.getFieldNames(), ((SqlCall) (operand)).getOperandList());
}
return true;} | 3.26 |
flink_SqlWindowTableFunction_checkIntervalOperands_rdh | /**
* Checks whether the operands starting from position {@code startPos} are all of type
* {@code INTERVAL}, returning whether successful.
*
* @param callBinding
* The call binding
* @param startPos
* The start position to validate (starting index is 0)
* @return true if validation passes
*/
boolean checkIntervalOperands(SqlCallBinding callBinding, int startPos) {
final SqlValidator validator = callBinding.getValidator();
for (int i = startPos; i < callBinding.getOperandCount(); i++) {
final RelDataType type = validator.getValidatedNodeType(callBinding.operand(i));
if (!SqlTypeUtil.isInterval(type)) {
return false;
}
}
return true;
} | 3.26 |
flink_SqlWindowTableFunction_checkTimeColumnDescriptorOperand_rdh | /**
* Checks whether the type that the operand of time col descriptor refers to is valid.
*
* @param callBinding
* The call binding
* @param pos
* The position of the descriptor at the operands of the call
* @return true if validation passes, false otherwise
*/
Optional<RuntimeException> checkTimeColumnDescriptorOperand(SqlCallBinding callBinding, int pos) {SqlValidator validator
= callBinding.getValidator();
SqlNode operand0 = callBinding.operand(0);
RelDataType type = validator.getValidatedNodeType(operand0);
List<SqlNode> operands = ((SqlCall) (callBinding.operand(pos))).getOperandList();
SqlIdentifier identifier = ((SqlIdentifier) (operands.get(0)));
String columnName = identifier.getSimple();
SqlNameMatcher matcher = validator.getCatalogReader().nameMatcher();
for (RelDataTypeField field : type.getFieldList()) {
if (matcher.matches(field.getName(), columnName)) {
RelDataType fieldType = field.getType();
if (FlinkTypeFactory.isTimeIndicatorType(fieldType)) {
return Optional.empty();
} else {
LogicalType timeAttributeType = FlinkTypeFactory.toLogicalType(fieldType);
if (!canBeTimeAttributeType(timeAttributeType)) {
ValidationException exception = new ValidationException(String.format("The window function %s requires the timecol to be TIMESTAMP or TIMESTAMP_LTZ, but is %s.\n" + "Besides, the timecol must be a time attribute type in streaming mode.", callBinding.getOperator().getAllowedSignatures(), field.getType()));
return Optional.of(exception);
} else {
return
Optional.empty();
}
}
}
}
IllegalArgumentException error = new IllegalArgumentException(String.format("Can't find the time attribute field '%s' in the input schema %s.", columnName, type.getFullTypeString()));
return Optional.of(error);
} | 3.26 |
flink_SqlWindowTableFunction_argumentMustBeScalar_rdh | /**
* {@inheritDoc }
*
* <p>Overrides because the first parameter of table-value function windowing is an explicit
* TABLE parameter, which is not scalar.
*/
@Override
public boolean argumentMustBeScalar(int ordinal) {
return ordinal != 0;
}
/**
* Helper for {@link #ARG0_TABLE_FUNCTION_WINDOWING} | 3.26 |
flink_AbstractSqlCallContext_getLiteralValueAs_rdh | /**
* Bridges to {@link ValueLiteralExpression#getValueAs(Class)}.
*/
@SuppressWarnings("unchecked")protected static <T> T getLiteralValueAs(LiteralValueAccessor accessor, Class<T> clazz) {
Preconditions.checkArgument(!clazz.isPrimitive());
Object convertedValue = null;
if (clazz == Duration.class) {
final long longVal = accessor.getValueAs(Long.class);
convertedValue = Duration.ofMillis(longVal);
} else if (clazz == Period.class) {
final long longVal = accessor.getValueAs(Long.class);
if ((longVal <= Integer.MAX_VALUE) && (longVal >= Integer.MIN_VALUE)) {
convertedValue = Period.ofMonths(((int) (longVal)));
}
} else if (clazz == LocalDate.class) {
final DateString dateString = accessor.getValueAs(DateString.class);
convertedValue = LocalDate.parse(dateString.toString());
} else if (clazz == LocalTime.class) {
final TimeString timeString = accessor.getValueAs(TimeString.class);
convertedValue = LocalTime.parse(timeString.toString());
} else if (clazz == LocalDateTime.class) {
final TimestampString timestampString = accessor.getValueAs(TimestampString.class);
convertedValue = LocalDateTime.parse(timestampString.toString().replace(' ', 'T'));} else if (clazz == Instant.class) {
// timestamp string is in UTC, convert back to an instant
final TimestampString timestampString = accessor.getValueAs(TimestampString.class);
convertedValue = LocalDateTime.parse(timestampString.toString().replace(' ', 'T')).atOffset(ZoneOffset.UTC).toInstant();
}
if (convertedValue != null) {
return ((T) (convertedValue));
}
return accessor.getValueAs(clazz);
} | 3.26 |
flink_InputTypeStrategies_constraint_rdh | /**
* Strategy for an argument that must fulfill a given constraint.
*/
public static ConstraintArgumentTypeStrategy constraint(String
constraintMessage, Predicate<List<DataType>> evaluator) {
return new ConstraintArgumentTypeStrategy(constraintMessage, evaluator);
} | 3.26 |
flink_InputTypeStrategies_commonType_rdh | /**
* An {@link InputTypeStrategy} that expects {@code count} arguments that have a common type.
*/
public static InputTypeStrategy commonType(int count) {
return new CommonInputTypeStrategy(ConstantArgumentCount.of(count));
} | 3.26 |
flink_InputTypeStrategies_sequence_rdh | /**
* Strategy for a named function signature like {@code f(s STRING, n NUMERIC)} using a sequence
* of {@link ArgumentTypeStrategy}s.
*/
public static InputTypeStrategy sequence(List<String> argumentNames, List<ArgumentTypeStrategy> strategies)
{ return new SequenceInputTypeStrategy(strategies, argumentNames);
} | 3.26 |
flink_InputTypeStrategies_or_rdh | /**
* Strategy for a disjunction of multiple {@link ArgumentTypeStrategy}s into one like {@code f(NUMERIC || STRING)}.
*
* <p>Some {@link ArgumentTypeStrategy}s cannot contribute an inferred type that is different
* from the input type (e.g. {@link #LITERAL}). Therefore, the order {@code f(X || Y)} or {@code f(Y || X)} matters as it defines the precedence in case the result must be casted to a more
* specific type.
*
* <p>This strategy aims to infer a type that is equal to the input type (to prevent unnecessary
* casting) or (if this is not possible) the first more specific, casted type.
*/
public static OrArgumentTypeStrategy or(ArgumentTypeStrategy... strategies) {
return new OrArgumentTypeStrategy(Arrays.asList(strategies)); } | 3.26 |
flink_InputTypeStrategies_and_rdh | /**
* Strategy for a conjunction of multiple {@link ArgumentTypeStrategy}s into one like {@code f(NUMERIC && LITERAL)}.
*
* <p>Some {@link ArgumentTypeStrategy}s cannot contribute an inferred type that is different
* from the input type (e.g. {@link #LITERAL}). Therefore, the order {@code f(X && Y)} or {@code f(Y && X)} matters as it defines the precedence in case the result must be casted to a more
* specific type.
*
* <p>This strategy aims to infer the first more specific, casted type or (if this is not
* possible) a type that has been inferred from all {@link ArgumentTypeStrategy}s.
*/
public static AndArgumentTypeStrategy and(ArgumentTypeStrategy... strategies) {
return new AndArgumentTypeStrategy(Arrays.asList(strategies));
} | 3.26 |
flink_InputTypeStrategies_compositeSequence_rdh | /**
* An strategy that lets you apply other strategies for subsequences of the actual arguments.
*
* <p>The {@link #sequence(ArgumentTypeStrategy...)} should be preferred in most of the cases.
* Use this strategy only if you need to apply a common logic to a subsequence of the arguments.
*/
public static SubsequenceStrategyBuilder compositeSequence() {
return new SubsequenceStrategyBuilder(); } | 3.26 |
flink_InputTypeStrategies_repeatingSequence_rdh | /**
* Arbitrarily often repeating sequence of argument type strategies.
*/
public static InputTypeStrategy repeatingSequence(ArgumentTypeStrategy... strategies) {return new RepeatingSequenceInputTypeStrategy(Arrays.asList(strategies));
} | 3.26 |
flink_InputTypeStrategies_symbol_rdh | /**
* Strategy for a symbol argument of a specific {@link TableSymbol} enum, with value being one
* of the provided variants.
*
* <p>A symbol is implied to be a literal argument.
*/
@SafeVarargs
@SuppressWarnings("unchecked")
public static <T extends Enum<? extends TableSymbol>> SymbolArgumentTypeStrategy<T> symbol(T firstAllowedVariant, T... otherAllowedVariants) {
return new SymbolArgumentTypeStrategy<T>(((Class<T>) (firstAllowedVariant.getClass())), Stream.concat(Stream.of(firstAllowedVariant), Arrays.stream(otherAllowedVariants)).collect(Collectors.toSet()));
} | 3.26 |
flink_InputTypeStrategies_explicitSequence_rdh | /**
* Strategy for a named function signature of explicitly defined types like {@code f(s STRING, i
* INT)}. Implicit casts will be inserted if possible.
*
* <p>This is equivalent to using {@link #sequence(String[], ArgumentTypeStrategy[])} and {@link #explicit(DataType)}.
*/public static InputTypeStrategy explicitSequence(String[] argumentNames, DataType[] expectedDataTypes) {
final List<ArgumentTypeStrategy> strategies = Arrays.stream(expectedDataTypes).map(InputTypeStrategies::explicit).collect(Collectors.toList());
return new SequenceInputTypeStrategy(strategies, Arrays.asList(argumentNames));
} | 3.26 |
flink_InputTypeStrategies_varyingSequence_rdh | /**
* Strategy for a varying named function signature like {@code f(i INT, str STRING, num
* NUMERIC...)} using a sequence of {@link ArgumentTypeStrategy}s. The first n - 1 arguments
* must be constant. The n-th argument can occur 0, 1, or more times.
*/
public static InputTypeStrategy varyingSequence(String[] argumentNames, ArgumentTypeStrategy[] strategies) {
return new VaryingSequenceInputTypeStrategy(Arrays.asList(strategies), Arrays.asList(argumentNames));
} | 3.26 |
flink_InputTypeStrategies_wildcardWithCount_rdh | /**
* Strategy that does not perform any modification or validation of the input. It checks the
* argument count though.
*/
public static InputTypeStrategy wildcardWithCount(ArgumentCount argumentCount) {
return new WildcardInputTypeStrategy(argumentCount);
} | 3.26 |
flink_InputTypeStrategies_commonMultipleArrayType_rdh | /**
* An {@link InputTypeStrategy} that expects {@code minCount} arguments that have a common array
* type.
*/
public static InputTypeStrategy commonMultipleArrayType(int minCount) {
return new CommonArrayInputTypeStrategy(ConstantArgumentCount.from(minCount));
} | 3.26 |
flink_InputTypeStrategies_logical_rdh | /**
* Strategy for an argument that corresponds to a given {@link LogicalTypeFamily} and
* nullability. Implicit casts will be inserted if possible.
*/
public static FamilyArgumentTypeStrategy logical(LogicalTypeFamily expectedFamily, boolean expectedNullability) {
return new FamilyArgumentTypeStrategy(expectedFamily, expectedNullability);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.