name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_AbstractPythonFunctionOperator_m1_rdh | /**
* Advances the watermark of all managed timer services, potentially firing event time timers.
* It also ensures that the fired timers are processed in the Python user-defined functions.
*/
private void m1(Watermark watermark) throws Exception {
if (getTimeServiceManager().isPresent()) {
InternalTimeServiceManager<?> timeServiceManager = getTimeServiceManager().get();
timeServiceManager.advanceWatermark(watermark);
while (!isBundleFinished()) {
invokeFinishBundle();
timeServiceManager.advanceWatermark(watermark);
}
}
} | 3.26 |
flink_AbstractPythonFunctionOperator_checkInvokeFinishBundleByCount_rdh | /**
* Checks whether to invoke finishBundle by elements count. Called in processElement.
*/
protected void checkInvokeFinishBundleByCount() throws Exception {
if (elementCount >= maxBundleSize) {
invokeFinishBundle();
}
} | 3.26 |
flink_AbstractPythonFunctionOperator_isBundleFinished_rdh | /**
* Returns whether the bundle is finished.
*/
public boolean isBundleFinished() {
return elementCount == 0;
} | 3.26 |
flink_AbstractPythonFunctionOperator_getConfiguration_rdh | /**
* Returns the {@link Configuration}.
*/
public Configuration getConfiguration() {
return config;
} | 3.26 |
flink_AbstractPythonFunctionOperator_checkInvokeFinishBundleByTime_rdh | /**
* Checks whether to invoke finishBundle by timeout.
*/
private void checkInvokeFinishBundleByTime() throws Exception {
long now = getProcessingTimeService().getCurrentProcessingTime();
if ((now - lastFinishBundleTime) >= maxBundleTimeMills) {
invokeFinishBundle(); }
} | 3.26 |
flink_ShuffleMaster_close_rdh | /**
* Closes this shuffle master service which should release all resources. A shuffle master will
* only be closed when the cluster is shut down.
*/ @Override
default void close() throws Exception {
} | 3.26 |
flink_ShuffleMaster_registerJob_rdh | /**
* Registers the target job together with the corresponding {@link JobShuffleContext} to this
* shuffle master. Through the shuffle context, one can obtain some basic information like job
* ID, job configuration. It enables ShuffleMaster to notify JobMaster about lost result
* partitions, so that JobMaster can identify and reproduce unavailable partitions earlier.
*
* @param context
* the corresponding shuffle context of the target job.
*/
default void registerJob(JobShuffleContext context) {
} | 3.26 |
flink_ShuffleMaster_unregisterJob_rdh | /**
* Unregisters the target job from this shuffle master, which means the corresponding job has
* reached a global termination state and all the allocated resources except for the cluster
* partitions can be cleared.
*
* @param jobID
* ID of the target job to be unregistered.
*/
default void unregisterJob(JobID jobID) {
} | 3.26 |
flink_ShuffleMaster_start_rdh | /**
* Starts this shuffle master as a service. One can do some initialization here, for example
* getting access and connecting to the external system.
*/
default void start() throws Exception {
} | 3.26 |
flink_ShuffleMaster_computeShuffleMemorySizeForTask_rdh | /**
* Compute shuffle memory size for a task with the given {@link TaskInputsOutputsDescriptor}.
*
* @param taskInputsOutputsDescriptor
* describes task inputs and outputs information for shuffle
* memory calculation.
* @return shuffle memory size for a task with the given {@link TaskInputsOutputsDescriptor}.
*/
default MemorySize computeShuffleMemorySizeForTask(TaskInputsOutputsDescriptor taskInputsOutputsDescriptor) {
return MemorySize.ZERO;
} | 3.26 |
flink_BernoulliSampler_sample_rdh | /**
* Sample the input elements, for each input element, take a Bernoulli trail for sampling.
*
* @param input
* Elements to be sampled.
* @return The sampled result which is lazy computed upon input elements.
*/
@Override
public Iterator<T> sample(final Iterator<T> input) {
if (fraction == 0) {
return emptyIterable;
}
return new SampledIterator<T>() {T current = null;
@Override
public boolean hasNext() {
if (current == null) {
current =
m0();
}
return current != null;
}
@Override
public T next() {
if (current == null) {
return m0();
} else {
T result = current;
current = null;
return result;
}
}
private T m0() {
if (fraction <= THRESHOLD) {
double rand = random.nextDouble();
double u = Math.max(rand, EPSILON);
int gap = ((int) (Math.log(u) / Math.log(1 - fraction)));
int elementCount = 0;
if (input.hasNext()) {
T element = input.next();
while (input.hasNext() && (elementCount < gap)) {
element = input.next();
elementCount++;
}
if (elementCount < gap)
{
return null;
} else {
return element;
}
} else {
return null;
}
} else {
while (input.hasNext()) {
T element = input.next();if (random.nextDouble() <= fraction) {
return element;
}
}
return null;
}}
};
} | 3.26 |
flink_AsyncTableFunctionProvider_of_rdh | /**
* Helper method for creating a static provider.
*/
static <T> AsyncTableFunctionProvider<T> of(AsyncTableFunction<T> asyncTableFunction) {
return () -> asyncTableFunction;
} | 3.26 |
flink_HsFullSpillingStrategy_checkRelease_rdh | /**
* Release subpartition's spilled buffer from head. Each subpartition fairly retains a fixed
* number of buffers, and all the remaining buffers are released. If this subpartition does not
* have so many qualified buffers, all of them will be retained.
*/
private void checkRelease(HsSpillingInfoProvider spillingInfoProvider, int poolSize, Decision.Builder builder) {
if (spillingInfoProvider.getNumTotalRequestedBuffers() < (poolSize * releaseThreshold)) {
// In case situation changed since onMemoryUsageChanged() returns Optional#empty()
return;
}
int releaseNum
= ((int)
(poolSize * releaseBufferRatio));
int numSubpartitions = spillingInfoProvider.getNumSubpartitions();
int expectedSubpartitionReleaseNum = releaseNum / numSubpartitions;
TreeMap<Integer, Deque<BufferIndexAndChannel>> bufferToRelease = new TreeMap<>();
for (int v9 = 0; v9 < numSubpartitions; v9++) {
Deque<BufferIndexAndChannel> buffersInOrder = spillingInfoProvider.getBuffersInOrder(v9, SpillStatus.SPILL, ConsumeStatusWithId.ALL_ANY);
// if the number of subpartition spilling buffers less than expected release number,
// release all of them.
int subpartitionReleaseNum = Math.min(buffersInOrder.size(), expectedSubpartitionReleaseNum);
int subpartitionSurvivedNum = buffersInOrder.size() - subpartitionReleaseNum;
while ((subpartitionSurvivedNum--) != 0) {
buffersInOrder.pollLast();
}
bufferToRelease.put(v9, buffersInOrder);
}
// collect results in order
for (int i = 0; i < numSubpartitions; i++) {
Deque<BufferIndexAndChannel> v14
= bufferToRelease.get(i); if ((v14 !=
null)
&& (!v14.isEmpty())) {
builder.addBufferToRelease(i, bufferToRelease.getOrDefault(i, new ArrayDeque<>()));
}
}
} | 3.26 |
flink_HsFullSpillingStrategy_onBufferFinished_rdh | // For the case of buffer finished, whenever the number of unSpillBuffers reaches
// numBuffersTriggerSpillingRatio times currentPoolSize, make a decision based on global
// information. Otherwise, no need to take action.
@Override
public Optional<Decision> onBufferFinished(int numTotalUnSpillBuffers, int currentPoolSize) {
return numTotalUnSpillBuffers < (numBuffersTriggerSpillingRatio * currentPoolSize) ? Optional.of(Decision.NO_ACTION) : Optional.empty();
} | 3.26 |
flink_HsFullSpillingStrategy_m0_rdh | // For the case of buffer consumed, there is no need to take action for HsFullSpillingStrategy.
@Override
public Optional<Decision> m0(BufferIndexAndChannel consumedBuffer) {
return Optional.of(Decision.NO_ACTION);
} | 3.26 |
flink_ZooKeeperJobGraphStoreWatcher_fromEvent_rdh | /**
* Returns a JobID for the event's path.
*/
private JobID fromEvent(PathChildrenCacheEvent event) {return JobID.fromHexString(ZKPaths.getNodeFromPath(event.getData().getPath()));
} | 3.26 |
flink_TransitiveClosureNaive_join_rdh | /**
* left: Path (z,x) - x is reachable by z right: Edge (x,y) -
* edge x-->y exists out: Path (z,y) - y is reachable by z
*/
@Override
public Tuple2<Long, Long> join(Tuple2<Long, Long> left, Tuple2<Long, Long> right) throws Exception {
return new Tuple2<Long, Long>(left.f0, right.f1);
} | 3.26 |
flink_ExecutorUtils_gracefulShutdown_rdh | /**
* Gracefully shutdown the given {@link ExecutorService}. The call waits the given timeout that
* all ExecutorServices terminate. If the ExecutorServices do not terminate in this time, they
* will be shut down hard.
*
* @param timeout
* to wait for the termination of all ExecutorServices
* @param unit
* of the timeout
* @param executorServices
* to shut down
*/
public static void gracefulShutdown(long timeout, TimeUnit unit,
ExecutorService... executorServices) {
for (ExecutorService executorService : executorServices) {
executorService.shutdown();
}
boolean wasInterrupted = false;final long endTime =
unit.toMillis(timeout) + System.currentTimeMillis();
long timeLeft = unit.toMillis(timeout);
boolean hasTimeLeft = timeLeft > 0L;
for (ExecutorService executorService : executorServices) {
if (wasInterrupted || (!hasTimeLeft)) {executorService.shutdownNow();} else {
try {
if (!executorService.awaitTermination(timeLeft, TimeUnit.MILLISECONDS)) {
LOG.warn("ExecutorService did not terminate in time. Shutting it down now.");
executorService.shutdownNow();
}
} catch (InterruptedException e) {
LOG.warn("Interrupted while shutting down executor services. Shutting all " + "remaining ExecutorServices down now.", e);
executorService.shutdownNow();
wasInterrupted = true;
Thread.currentThread().interrupt();
}
timeLeft = endTime - System.currentTimeMillis();hasTimeLeft = timeLeft > 0L;
}
}
} | 3.26 |
flink_ExecutorUtils_m0_rdh | /**
* Shuts the given {@link ExecutorService} down in a non-blocking fashion. The shut down will be
* executed by a thread from the common fork-join pool.
*
* <p>The executor services will be shut down gracefully for the given timeout period.
* Afterwards {@link ExecutorService#shutdownNow()} will be called.
*
* @param timeout
* before {@link ExecutorService#shutdownNow()} is called
* @param unit
* time unit of the timeout
* @param executorServices
* to shut down
* @return Future which is completed once the {@link ExecutorService} are shut down
*/
public static CompletableFuture<Void> m0(long timeout, TimeUnit unit, ExecutorService... executorServices) {
return CompletableFuture.supplyAsync(() -> {
gracefulShutdown(timeout, unit, executorServices);
return null;
});
} | 3.26 |
flink_TestLoggerResource_asSingleTestResource_rdh | /**
* Enables the use of {@link TestLoggerResource} for try-with-resources statement.
*/
public static SingleTestResource asSingleTestResource(String loggerName,
Level level) throws Throwable {
return new SingleTestResource(loggerName, level);
} | 3.26 |
flink_ExecutionVertex_notifyPendingDeployment_rdh | // --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
void notifyPendingDeployment(Execution execution) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execution)) {
getExecutionGraphAccessor().getExecutionDeploymentListener().onStartedDeployment(execution.getAttemptId(), execution.getAssignedResourceLocation().getResourceID());
}
} | 3.26 |
flink_ExecutionVertex_executionFinished_rdh | // --------------------------------------------------------------------------------------------
// Notifications from the Execution Attempt
// --------------------------------------------------------------------------------------------
void executionFinished(Execution execution) {
getJobVertex().executionVertexFinished();
} | 3.26 |
flink_ExecutionVertex_finishPartitionsIfNeeded_rdh | /**
* Mark partition finished if needed.
*
* @return list of finished partitions.
*/
@VisibleForTestingpublic List<IntermediateResultPartition> finishPartitionsIfNeeded() {
List<IntermediateResultPartition> finishedPartitions = null;MarkPartitionFinishedStrategy markPartitionFinishedStrategy = getExecutionGraphAccessor().getMarkPartitionFinishedStrategy();
for (IntermediateResultPartition partition : f0.values()) {
if (markPartitionFinishedStrategy.needMarkPartitionFinished(partition.getResultType())) {
partition.markFinished();
if (finishedPartitions == null) {
finishedPartitions = new LinkedList<>();
}
finishedPartitions.add(partition);
}
}
if (finishedPartitions == null) {
return Collections.emptyList();
} else {
return finishedPartitions;
}
} | 3.26 |
flink_ExecutionVertex_toString_rdh | // --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
@Override
public String toString() {
return getTaskNameWithSubtaskIndex();
} | 3.26 |
flink_ExecutionVertex_cancel_rdh | /**
* Cancels this ExecutionVertex.
*
* @return A future that completes once the execution has reached its final state.
*/
public CompletableFuture<?> cancel() {
// to avoid any case of mixup in the presence of concurrent calls,
// we copy a reference to the stack to make sure both calls go to the same Execution
final Execution exec
= currentExecution;
exec.cancel();
return exec.getReleaseFuture();
} | 3.26 |
flink_ExecutionVertex_createNewExecution_rdh | // --------------------------------------------------------------------------------------------
Execution createNewExecution(final long timestamp) {
return new Execution(getExecutionGraphAccessor().getFutureExecutor(), this, nextAttemptNumber++, timestamp, timeout);
} | 3.26 |
flink_ExecutionVertex_addConsumedPartitionGroup_rdh | // --------------------------------------------------------------------------------------------
// Graph building
// --------------------------------------------------------------------------------------------
public void addConsumedPartitionGroup(ConsumedPartitionGroup consumedPartitions) {
getExecutionGraphAccessor().getEdgeManager().connectVertexWithConsumedPartitionGroup(executionVertexId, consumedPartitions);
} | 3.26 |
flink_ExecutionVertex_notifyStateTransition_rdh | /**
* Simply forward this notification.
*/
void notifyStateTransition(Execution execution, ExecutionState previousState, ExecutionState newState) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execution)) {
getExecutionGraphAccessor().notifyExecutionChange(execution, previousState,
newState);
}
} | 3.26 |
flink_ExecutionVertex_resetForNewExecution_rdh | // --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
/**
* Archives the current Execution and creates a new Execution for this vertex.
*/
public void resetForNewExecution() {
resetForNewExecutionInternal(System.currentTimeMillis());
} | 3.26 |
flink_ExecutionVertex_markFailed_rdh | /**
* This method marks the task as failed, but will make no attempt to remove task execution from
* the task manager. It is intended for cases where the task is known not to be deployed yet.
*
* @param t
* The exception that caused the task to fail.
*/
public void markFailed(Throwable t) {
currentExecution.markFailed(t);} | 3.26 |
flink_ExecutionVertex_getTaskNameWithSubtaskIndex_rdh | /**
* Creates a simple name representation in the style 'taskname (x/y)', where 'taskname' is the
* name as returned by {@link #getTaskName()}, 'x' is the parallel subtask index as returned by
* {@link #getParallelSubtaskIndex()}{@code + 1}, and 'y' is the total number of tasks, as
* returned by {@link #getTotalNumberOfParallelSubtasks()}.
*
* @return A simple name representation in the form 'myTask (2/7)'
*/
@Override
public String getTaskNameWithSubtaskIndex() {
return this.taskNameWithSubtask;
} | 3.26 |
flink_SinkContextUtil_forTimestamp_rdh | /**
* Creates a {@link SinkFunction.Context} that throws an exception when trying to access the
* current watermark or processing time.
*/public static Context forTimestamp(long timestamp) {
return new SinkFunction.Context() {
@Override
public long currentProcessingTime() {
throw new RuntimeException("Not implemented");
}
@Override
public long currentWatermark() {
throw new RuntimeException("Not implemented");
}
@Override
public Long timestamp() {
return timestamp;
}
};
} | 3.26 |
flink_PlannerFactoryUtil_createPlanner_rdh | /**
* Discovers a planner factory and creates a planner instance.
*/
public static Planner createPlanner(Executor executor, TableConfig tableConfig, ClassLoader userClassLoader, ModuleManager moduleManager, CatalogManager catalogManager, FunctionCatalog functionCatalog) {
final PlannerFactory plannerFactory = FactoryUtil.discoverFactory(Thread.currentThread().getContextClassLoader(), PlannerFactory.class, PlannerFactory.DEFAULT_IDENTIFIER);
final Context context = new DefaultPlannerContext(executor, tableConfig, userClassLoader, moduleManager, catalogManager, functionCatalog);
return plannerFactory.create(context);
} | 3.26 |
flink_CachingAsyncLookupFunction_updateLatestLoadTime_rdh | // --------------------------------- Helper functions ----------------------------
private synchronized void updateLatestLoadTime(long loadTime) {
if (latestLoadTime == UNINITIALIZED) {
cacheMetricGroup.latestLoadTimeGauge(() ->
latestLoadTime);
}
latestLoadTime = loadTime;
} | 3.26 |
flink_ManagedTableListener_isManagedTable_rdh | /**
* Check a resolved catalog table is Flink's managed table or not.
*/
public static boolean isManagedTable(@Nullable
Catalog catalog, CatalogBaseTable table) {
if ((catalog == null) || (!catalog.supportsManagedTable())) {
// catalog not support managed table
return false;
}if ((table.getTableKind() != TableKind.TABLE) || (!(table instanceof CatalogTable))) {
// view is not managed table
return false;
}
Map<String, String> options;
try {
options = table.getOptions();
} catch (TableException ignore) {
// exclude abnormal tables, such as InlineCatalogTable that does not have the options
return false;
}
// check legacy connector, here we need to check the factory, other properties are dummy
if (TableFactoryUtil.isLegacyConnectorOptions(catalog, new Configuration(), true, ObjectIdentifier.of("dummy_catalog", "dummy_database", "dummy_table"), ((CatalogTable) (table)), true)) {
// legacy connector is not managed table
return false;
}
if (!StringUtils.isNullOrWhitespaceOnly(options.get(FactoryUtil.CONNECTOR.key()))) {
// with connector is not managed table
return false;
}
if (table instanceof ResolvedCatalogBaseTable) {
table = ((ResolvedCatalogBaseTable<?>) (table)).getOrigin();
}
// ConnectorCatalogTable is not managed table
return !(table instanceof ConnectorCatalogTable);
} | 3.26 |
flink_ManagedTableListener_notifyTableDrop_rdh | /**
* Notify for dropping managed table.
*/
public void notifyTableDrop(@Nullable
Catalog catalog, ObjectIdentifier identifier, ResolvedCatalogBaseTable<?> table, boolean isTemporary, boolean ignoreIfNotExists) {if (isManagedTable(catalog, table)) {
discoverManagedTableFactory(classLoader).onDropTable(createTableFactoryContext(identifier, ((ResolvedCatalogTable) (table)), isTemporary), ignoreIfNotExists);
}
} | 3.26 |
flink_ManagedTableListener_notifyTableCompaction_rdh | /**
* Notify compaction for managed table.
*/
public Map<String, String> notifyTableCompaction(@Nullable
Catalog catalog, ObjectIdentifier identifier, ResolvedCatalogBaseTable<?> table, CatalogPartitionSpec partitionSpec, boolean isTemporary) {
if (isManagedTable(catalog, table)) {
if (RuntimeExecutionMode.STREAMING.equals(config.get(ExecutionOptions.RUNTIME_MODE))) {
throw new ValidationException("Compact managed table only works under batch mode.");
}
return discoverManagedTableFactory(classLoader).onCompactTable(createTableFactoryContext(identifier, ((ResolvedCatalogTable) (table)), isTemporary), partitionSpec);
}
throw new ValidationException("Only managed table supports compaction");
} | 3.26 |
flink_ManagedTableListener_notifyTableCreation_rdh | /**
* Notify for creating managed table.
*/
public ResolvedCatalogBaseTable<?> notifyTableCreation(@Nullable
Catalog catalog, ObjectIdentifier identifier, ResolvedCatalogBaseTable<?> table, boolean isTemporary, boolean ignoreIfExists) {
if (isManagedTable(catalog, table)) {
ResolvedCatalogTable managedTable = m0(identifier, table, isTemporary);
discoverManagedTableFactory(classLoader).onCreateTable(createTableFactoryContext(identifier, managedTable, isTemporary), ignoreIfExists);
return managedTable;
}
return table;
} | 3.26 |
flink_ManagedTableListener_m0_rdh | /**
* Enrich options for creating managed table.
*/
private ResolvedCatalogTable m0(ObjectIdentifier identifier, ResolvedCatalogBaseTable<?> table, boolean isTemporary) {
if (!(table instanceof ResolvedCatalogTable)) {
throw new UnsupportedOperationException("Managed table only supports catalog table, unsupported table type: " + table.getClass());
}ResolvedCatalogTable resolvedTable =
((ResolvedCatalogTable) (table));
Map<String, String> newOptions
= discoverManagedTableFactory(classLoader).enrichOptions(createTableFactoryContext(identifier, resolvedTable, isTemporary));
return resolvedTable.copy(newOptions);
} | 3.26 |
flink_AliasOperationUtils_createAliasList_rdh | /**
* Creates a list of valid alias expressions. Resulting expression might still contain {@link UnresolvedReferenceExpression}.
*
* @param aliases
* aliases to validate
* @param child
* relational operation on top of which to apply the aliases
* @return validated list of aliases
*/
static List<Expression> createAliasList(List<Expression> aliases, QueryOperation child) {
ResolvedSchema childSchema = child.getResolvedSchema();
if (aliases.size() > childSchema.getColumnCount()) {throw new ValidationException("Aliasing more fields than we actually have.");
}
List<ValueLiteralExpression> fieldAliases = aliases.stream().map(f -> f.accept(aliasLiteralValidator)).collect(Collectors.toList());
List<String> childNames =
childSchema.getColumnNames();
return IntStream.range(0, childNames.size()).mapToObj(idx -> {UnresolvedReferenceExpression oldField = unresolvedRef(childNames.get(idx));
if (idx < fieldAliases.size()) {
ValueLiteralExpression alias = fieldAliases.get(idx);
return unresolvedCall(BuiltInFunctionDefinitions.AS, oldField, alias);
} else { return oldField;
}
}).collect(Collectors.toList());
} | 3.26 |
flink_Created_startScheduling_rdh | /**
* Starts the scheduling by going into the {@link WaitingForResources} state.
*/
void startScheduling() {
context.goToWaitingForResources(null);
} | 3.26 |
flink_PekkoRpcServiceUtils_extractMaximumFramesize_rdh | // ------------------------------------------------------------------------
// RPC service configuration
// ------------------------------------------------------------------------
public static long extractMaximumFramesize(Configuration configuration) {
String maxFrameSizeStr = configuration.getString(AkkaOptions.FRAMESIZE);
String configStr = String.format(SIMPLE_CONFIG_TEMPLATE, maxFrameSizeStr);Config config = ConfigFactory.parseString(configStr);
return config.getBytes(MAXIMUM_FRAME_SIZE_PATH);
} | 3.26 |
flink_PekkoRpcServiceUtils_createRemoteRpcService_rdh | // ------------------------------------------------------------------------
// RPC instantiation
// ------------------------------------------------------------------------
static PekkoRpcService createRemoteRpcService(Configuration configuration, @Nullable
String externalAddress, String externalPortRange, @Nullable
String bindAddress, @SuppressWarnings("OptionalUsedAsFieldOrParameterType")
Optional<Integer> bindPort) throws Exception {
final PekkoRpcServiceBuilder rpcServiceBuilder = PekkoRpcServiceUtils.remoteServiceBuilder(configuration, externalAddress, externalPortRange);
if (bindAddress != null) {
rpcServiceBuilder.withBindAddress(bindAddress);
}
bindPort.ifPresent(rpcServiceBuilder::withBindPort);
return rpcServiceBuilder.createAndStart();
} | 3.26 |
flink_PekkoRpcServiceUtils_getRpcUrl_rdh | /**
*
* @param hostname
* The hostname or address where the target RPC service is listening.
* @param port
* The port where the target RPC service is listening.
* @param endpointName
* The name of the RPC endpoint.
* @param addressResolution
* Whether to try address resolution of the given hostname or not. This
* allows to fail fast in case that the hostname cannot be resolved.
* @param protocol
* True, if security/encryption is enabled, false otherwise.
* @return The RPC URL of the specified RPC endpoint.
*/
public static String getRpcUrl(String hostname, int port, String endpointName, AddressResolution addressResolution, Protocol protocol) throws UnknownHostException {
checkNotNull(hostname, "hostname is null");
checkNotNull(endpointName, "endpointName is null");
checkArgument(isValidClientPort(port), "port must be in [1, 65535]");
if (addressResolution == AddressResolution.TRY_ADDRESS_RESOLUTION) {
// Fail fast if the hostname cannot be resolved
// noinspection ResultOfMethodCallIgnored
InetAddress.getByName(hostname);
}
final String hostPort = NetUtils.unresolvedHostAndPortToNormalizedString(hostname, port);
return internalRpcUrl(endpointName, Optional.of(new RemoteAddressInformation(hostPort, protocol)));
} | 3.26 |
flink_NFACompiler_createLoopingGroupPatternState_rdh | /**
* Create the states for the group pattern as a looping one.
*
* @param groupPattern
* the group pattern to create the states for
* @param sinkState
* the state that the group pattern being converted should point to
* @return the first state of the states of the group pattern
*/
private State<T> createLoopingGroupPatternState(final GroupPattern<T, ?> groupPattern, final State<T> sinkState) {
final IterativeCondition<T> proceedCondition = getTrueFunction();
Pattern<T, ?> oldCurrentPattern = f0;
Pattern<T, ?> oldFollowingPattern
= followingPattern;
GroupPattern<T, ?> oldGroupPattern = currentGroupPattern;
final State<T> dummyState = createState(StateType.Normal,
true);
State<T> lastSink = dummyState;
currentGroupPattern = groupPattern;f0 = groupPattern.getRawPattern();
lastSink = createMiddleStates(lastSink);
lastSink = convertPattern(lastSink);
lastSink.addProceed(sinkState, proceedCondition);
dummyState.addProceed(lastSink, proceedCondition);
f0 = oldCurrentPattern;
followingPattern = oldFollowingPattern;
currentGroupPattern = oldGroupPattern;
return
lastSink;
} | 3.26 |
flink_NFACompiler_createSingletonState_rdh | /**
* Creates a simple single state. For an OPTIONAL state it also consists of a similar state
* without the PROCEED edge, so that for each PROCEED transition branches in computation
* state graph can be created only once.
*
* @param ignoreCondition
* condition that should be applied to IGNORE transition
* @param sinkState
* state that the state being converted should point to
* @param proceedState
* state that the state being converted should proceed to
* @param isOptional
* whether the state being converted is optional
* @return the created state
*/
@SuppressWarnings("unchecked")
private State<T> createSingletonState(final State<T> sinkState, final State<T> proceedState, final IterativeCondition<T> takeCondition, final IterativeCondition<T> ignoreCondition, final boolean isOptional) {
if (f0 instanceof GroupPattern) {
return createGroupPatternState(((GroupPattern) (f0)), sinkState, proceedState, isOptional);
}
final State<T> singletonState = createState(StateType.Normal, true);
// if event is accepted then all notPatterns previous to the optional states are no
// longer valid
final State<T> sink = copyWithoutTransitiveNots(sinkState);
singletonState.addTake(sink, takeCondition);
// if no element accepted the previous nots are still valid.
final IterativeCondition<T> v57 = getTrueFunction();
if (isOptional) {
if (f0.getQuantifier().hasProperty(QuantifierProperty.GREEDY)) {
final IterativeCondition<T> untilCondition = ((IterativeCondition<T>) (f0.getUntilCondition()));
if (untilCondition != null) {
singletonState.addProceed(originalStateMap.get(proceedState.getName()), new RichAndCondition<>(v57, untilCondition));
}
singletonState.addProceed(proceedState, untilCondition != null ? new RichAndCondition<>(v57, new RichNotCondition<>(untilCondition))
: v57);
} else
{
singletonState.addProceed(proceedState, v57);
}
}if (ignoreCondition != null) {
final State<T> ignoreState;
if (isOptional || isHeadOfOptionalGroupPattern(f0)) {
ignoreState
= createState(StateType.Normal,
false);
ignoreState.addTake(sink, takeCondition);
ignoreState.addIgnore(ignoreCondition);
addStopStates(ignoreState);
} else {
ignoreState = singletonState;
}
singletonState.addIgnore(ignoreState, ignoreCondition);
}
return singletonState;
} | 3.26 |
flink_NFACompiler_compileFactory_rdh | /**
* Compiles the given pattern into a {@link NFAFactory}. The NFA factory can be used to
* create multiple NFAs.
*/
void compileFactory() {
Pattern<T, ?> lastPattern = f0;
checkPatternNameUniqueness();
checkPatternSkipStrategy();
// we're traversing the pattern from the end to the beginning --> the first state is the
// final state
State<T> sinkState = createEndingState();
// add all the normal states
sinkState = createMiddleStates(sinkState);
// add the beginning state
createStartState(sinkState);
// check the window times between events for pattern
m0();
if (((lastPattern.getQuantifier().getConsumingStrategy() == ConsumingStrategy.NOT_FOLLOW) && ((!windowTimes.containsKey(lastPattern.getName())) || (windowTimes.get(lastPattern.getName()) <= 0))) && (getWindowTime() == 0)) {
throw new MalformedPatternException("NotFollowedBy is not supported without windowTime as a last part of a Pattern!");
}} | 3.26 |
flink_NFACompiler_m1_rdh | /**
* Creates a "complex" state consisting of given number of states with same {@link IterativeCondition}.
*
* @param sinkState
* the state that the created state should point to
* @param proceedState
* state that the state being converted should proceed to
* @param times
* number of times the state should be copied
* @return the first state of the "complex" state, next state should point to it
*/
@SuppressWarnings("unchecked")
private State<T> m1(final State<T> sinkState, final State<T> proceedState, Times times) {
State<T> lastSink = sinkState;setCurrentGroupPatternFirstOfLoop(false);
final IterativeCondition<T> untilCondition = ((IterativeCondition<T>) (f0.getUntilCondition()));
final IterativeCondition<T> innerIgnoreCondition
= extendWithUntilCondition(getInnerIgnoreCondition(f0), untilCondition, false);
final IterativeCondition<T> takeCondition = extendWithUntilCondition(getTakeCondition(f0), untilCondition, true);
if (f0.getQuantifier().hasProperty(QuantifierProperty.GREEDY) && (times.getFrom() != times.getTo())) {
if (untilCondition != null) {
State<T> v52 = copy(sinkState);
originalStateMap.put(sinkState.getName(), v52);
}
updateWithGreedyCondition(sinkState, takeCondition);
}
for (int i
= times.getFrom(); i < times.getTo(); i++) {
lastSink = createSingletonState(lastSink, proceedState, takeCondition, innerIgnoreCondition, true);
addStopStateToLooping(lastSink);}
for (int i = 0; i < (times.getFrom() - 1);
i++) {
lastSink = createSingletonState(lastSink, null, takeCondition, innerIgnoreCondition, false);
addStopStateToLooping(lastSink);
}
// we created the intermediate states in the loop, now we create the start of the loop.
setCurrentGroupPatternFirstOfLoop(true);
return createSingletonState(lastSink, proceedState, takeCondition, getIgnoreCondition(f0), isPatternOptional(f0));
} | 3.26 |
flink_NFACompiler_createLooping_rdh | /**
* Creates the given state as a looping one. Looping state is one with TAKE edge to itself
* and PROCEED edge to the sinkState. It also consists of a similar state without the
* PROCEED edge, so that for each PROCEED transition branches in computation state graph can
* be created only once.
*
* @param sinkState
* the state that the converted state should point to
* @return the first state of the created complex state
*/
@SuppressWarnings("unchecked")
private State<T> createLooping(final State<T> sinkState) {
if (f0 instanceof GroupPattern) {
return createLoopingGroupPatternState(((GroupPattern) (f0)), sinkState);
}
final IterativeCondition<T> v71 = ((IterativeCondition<T>) (f0.getUntilCondition()));
final IterativeCondition<T> ignoreCondition = extendWithUntilCondition(getInnerIgnoreCondition(f0), v71, false);final IterativeCondition<T> takeCondition = extendWithUntilCondition(getTakeCondition(f0), v71, true);
IterativeCondition<T> proceedCondition = getTrueFunction();
final State<T> loopingState = createState(StateType.Normal, true);
if (f0.getQuantifier().hasProperty(QuantifierProperty.GREEDY)) {
if (v71 != null) {
State<T> sinkStateCopy = copy(sinkState);loopingState.addProceed(sinkStateCopy, new RichAndCondition<>(proceedCondition, v71));
originalStateMap.put(sinkState.getName(), sinkStateCopy);
}
loopingState.addProceed(sinkState, v71 !=
null ? new RichAndCondition<>(proceedCondition, new RichNotCondition<>(v71)) : proceedCondition);
updateWithGreedyCondition(sinkState, getTakeCondition(f0));
} else {
loopingState.addProceed(sinkState, proceedCondition);
}loopingState.addTake(takeCondition);
addStopStateToLooping(loopingState);
if (ignoreCondition != null) {
final State<T> ignoreState = createState(StateType.Normal, false);
ignoreState.addTake(loopingState, takeCondition);
ignoreState.addIgnore(ignoreCondition);
loopingState.addIgnore(ignoreState, ignoreCondition);
addStopStateToLooping(ignoreState);
}
return loopingState;
}
/**
* This method extends the given condition with stop(until) condition if necessary. The
* until condition needs to be applied only if both of the given conditions are not null.
*
* @param condition
* the condition to extend
* @param untilCondition
* the until condition to join with the given condition
* @param isTakeCondition
* whether the {@code condition} is for {@code TAKE} | 3.26 |
flink_NFACompiler_m0_rdh | /**
* Check pattern window times between events.
*/
private void m0() {
windowTime.ifPresent(windowTime -> {
if (windowTimes.values().stream().anyMatch(time
-> time > windowTime)) {
throw new MalformedPatternException("The window length between the previous and current event cannot be larger than the window length between the first and last event for a Pattern.");
}
});
} | 3.26 |
flink_NFACompiler_createEndingState_rdh | /**
* Creates the dummy Final {@link State} of the NFA graph.
*
* @return dummy Final state
*/
private State<T> createEndingState() {
State<T> endState = createState(ENDING_STATE_NAME, StateType.Final);
windowTime = Optional.ofNullable(f0.getWindowTime()).map(Time::toMilliseconds);
return endState;
} | 3.26 |
flink_NFACompiler_checkPatternNameUniqueness_rdh | /**
* Check if the given pattern's name is already used or not. If yes, it throws a {@link MalformedPatternException}.
*
* @param pattern
* The pattern to be checked
*/
private void checkPatternNameUniqueness(final Pattern pattern) {
if (pattern instanceof GroupPattern) {
Pattern patternToCheck =
((GroupPattern) (pattern)).getRawPattern();
while (patternToCheck != null) { checkPatternNameUniqueness(patternToCheck);
patternToCheck
= patternToCheck.getPrevious();
}
}
else {
stateNameHandler.checkNameUniqueness(pattern.getName());
}
} | 3.26 |
flink_NFACompiler_createGroupPatternState_rdh | /**
* Create all the states for the group pattern.
*
* @param groupPattern
* the group pattern to create the states for
* @param sinkState
* the state that the group pattern being converted should point to
* @param proceedState
* the state that the group pattern being converted should proceed to
* @param isOptional
* whether the group pattern being converted is optional
* @return the first state of the states of the group pattern
*/
private State<T> createGroupPatternState(final GroupPattern<T,
?> groupPattern, final
State<T> sinkState,
final State<T> proceedState, final boolean isOptional) {
final IterativeCondition<T> proceedCondition = getTrueFunction();
Pattern<T, ?> oldCurrentPattern = f0;
Pattern<T, ?> oldFollowingPattern = followingPattern; GroupPattern<T, ?> oldGroupPattern = currentGroupPattern;
State<T> lastSink = sinkState;
currentGroupPattern = groupPattern;
f0 = groupPattern.getRawPattern();
lastSink = createMiddleStates(lastSink);
lastSink = convertPattern(lastSink);
if (isOptional) {
// for the first state of a group pattern, its PROCEED edge should point to
// the following state of that group pattern
lastSink.addProceed(proceedState, proceedCondition);
}
f0 = oldCurrentPattern;
followingPattern = oldFollowingPattern;
currentGroupPattern = oldGroupPattern;
return lastSink;
} | 3.26 |
flink_NFACompiler_isCurrentGroupPatternFirstOfLoop_rdh | /**
* Checks if the current group pattern is the head of the TIMES/LOOPING quantifier or not a
* TIMES/LOOPING quantifier pattern.
*/
private boolean isCurrentGroupPatternFirstOfLoop() {
if (firstOfLoopMap.containsKey(currentGroupPattern)) {
return firstOfLoopMap.get(currentGroupPattern);
} else {
return true;
}
}
/**
* Checks if the given pattern is the head pattern of the current group pattern.
*
* @param pattern
* the pattern to be checked
* @return {@code true} iff the given pattern is in a group pattern and it is the head
pattern of the group pattern, {@code false} | 3.26 |
flink_NFACompiler_copyWithoutTransitiveNots_rdh | /**
* This method creates an alternative state that is target for TAKE transition from an
* optional State. Accepting an event in optional State discards all not Patterns that were
* present before it.
*
* <p>E.g for a Pattern
* begin("a").notFollowedBy("b").followedByAny("c").optional().followedByAny("d") a sequence
* like : {a c b d} is a valid match, but {a b d} is not.
*
* <p><b>NOTICE:</b> This method creates copy only if it necessary.
*
* @param sinkState
* a state to create copy without transitive nots
* @return the copy of the state itself if no modifications were needed
*/
private State<T> copyWithoutTransitiveNots(final State<T> sinkState) {
final List<Tuple2<IterativeCondition<T>, String>> currentNotCondition = getCurrentNotCondition();
if (currentNotCondition.isEmpty() || (!f0.getQuantifier().hasProperty(QuantifierProperty.OPTIONAL))) {
// we do not create an alternative path if we are NOT in an OPTIONAL state or there
// is no NOTs prior to
// the optional state
return sinkState;
}
final State<T> copyOfSink = createState(sinkState.getName(), sinkState.getStateType());
for (StateTransition<T> tStateTransition : sinkState.getStateTransitions()) {
if (tStateTransition.getAction() == StateTransitionAction.PROCEED) {
State<T> targetState = tStateTransition.getTargetState();boolean
remove = false;
if (targetState.isStop()) {
for (Tuple2<IterativeCondition<T>, String> notCondition : currentNotCondition) {
if (targetState.getName().equals(notCondition.f1)) { remove = true;
}
}
} else {
targetState = copyWithoutTransitiveNots(tStateTransition.getTargetState());
}
if (!remove) {
copyOfSink.addStateTransition(tStateTransition.getAction(), targetState, tStateTransition.getCondition());
}
} else {
copyOfSink.addStateTransition(tStateTransition.getAction(), tStateTransition.getTargetState().equals(tStateTransition.getSourceState()) ? copyOfSink : tStateTransition.getTargetState(), tStateTransition.getCondition());
}
}
return copyOfSink;
} | 3.26 |
flink_NFACompiler_getCurrentNotCondition_rdh | /**
* Retrieves list of conditions resulting in Stop state and names of the corresponding NOT
* patterns.
*
* <p>A current not condition can be produced in two cases:
*
* <ol>
* <li>the previous pattern is a {@link Quantifier.ConsumingStrategy#NOT_FOLLOW}
* <li>exists a backward path of {@link Quantifier.QuantifierProperty#OPTIONAL} patterns
* to {@link Quantifier.ConsumingStrategy#NOT_FOLLOW}
* </ol>
*
* <p><b>WARNING:</b> for more info on the second case see: {@link NFAFactoryCompiler#copyWithoutTransitiveNots(State)}
*
* @return list of not conditions with corresponding names
*/
private List<Tuple2<IterativeCondition<T>, String>> getCurrentNotCondition() {
List<Tuple2<IterativeCondition<T>, String>> notConditions = new ArrayList<>();
Pattern<T, ? extends T> previousPattern = f0;
while ((previousPattern.getPrevious() != null) && (previousPattern.getPrevious().getQuantifier().hasProperty(QuantifierProperty.OPTIONAL) || (previousPattern.getPrevious().getQuantifier().getConsumingStrategy() == ConsumingStrategy.NOT_FOLLOW))) {
previousPattern = previousPattern.getPrevious();
if (previousPattern.getQuantifier().getConsumingStrategy() == ConsumingStrategy.NOT_FOLLOW) {
final IterativeCondition<T> notCondition = getTakeCondition(previousPattern);
notConditions.add(Tuple2.of(notCondition, previousPattern.getName()));
}
}
return notConditions;
} | 3.26 |
flink_NFACompiler_setCurrentGroupPatternFirstOfLoop_rdh | /**
* Marks the current group pattern as the head of the TIMES quantifier or not.
*
* @param isFirstOfLoop
* whether the current group pattern is the head of the TIMES
* quantifier
*/
@SuppressWarnings("unchecked")
private void setCurrentGroupPatternFirstOfLoop(boolean isFirstOfLoop) {
if (f0 instanceof GroupPattern) {
firstOfLoopMap.put(((GroupPattern<T, ?>) (f0)), isFirstOfLoop);
}
} | 3.26 |
flink_NFACompiler_checkPatternSkipStrategy_rdh | /**
* Check pattern after match skip strategy.
*/
private void checkPatternSkipStrategy() {
if (afterMatchSkipStrategy.getPatternName().isPresent()) {
String patternName = afterMatchSkipStrategy.getPatternName().get();
Pattern<T, ?> pattern = f0;
while ((pattern.getPrevious() != null) && (!pattern.getName().equals(patternName))) {
pattern = pattern.getPrevious();
}
// pattern name match check.
if (!pattern.getName().equals(patternName)) {
throw new MalformedPatternException("The pattern name specified in AfterMatchSkipStrategy " + "can not be found in the given Pattern");
}
}
} | 3.26 |
flink_NFACompiler_canProduceEmptyMatches_rdh | /**
* Verifies if the provided pattern can possibly generate empty match. Example of patterns that
* can possibly generate empty matches are: A*, A?, A* B? etc.
*
* @param pattern
* pattern to check
* @return true if empty match could potentially match the pattern, false otherwise
*/
public static boolean canProduceEmptyMatches(final Pattern<?, ?> pattern) { NFAFactoryCompiler<?> compiler = new NFAFactoryCompiler<>(checkNotNull(pattern));
compiler.compileFactory();
State<?> startState = compiler.getStates().stream().filter(State::isStart).findFirst().orElseThrow(() ->
new IllegalStateException("Compiler produced no start state. It is a bug. File a jira."));
Set<State<?>> visitedStates = new HashSet<>();
final Stack<State<?>> statesToCheck = new Stack<>();
statesToCheck.push(startState);
while (!statesToCheck.isEmpty()) {
final State<?> currentState = statesToCheck.pop();
if (visitedStates.contains(currentState)) {
continue;
} else {
visitedStates.add(currentState);
}
for
(StateTransition<?> transition : currentState.getStateTransitions()) {
if (transition.getAction() == StateTransitionAction.PROCEED) {
if (transition.getTargetState().isFinal()) {
return true;
} else {
statesToCheck.push(transition.getTargetState());
}
}
}
}
return false;
} | 3.26 |
flink_NFACompiler_isPatternOptional_rdh | /**
* Checks if the given pattern is optional. If the given pattern is the head of a group
* pattern, the optional status depends on the group pattern.
*/
private boolean isPatternOptional(Pattern<T, ?> pattern) {
return pattern.getQuantifier().hasProperty(QuantifierProperty.OPTIONAL);
} | 3.26 |
flink_NFACompiler_createState_rdh | /**
* Creates a state with {@link State.StateType#Normal} and adds it to the collection of
* created states. Should be used instead of instantiating with new operator.
*
* @param name
* the name of the state
* @param stateType
* the type of the state
* @return the created state
*/
private State<T> createState(String name, State.StateType stateType) {
String stateName = stateNameHandler.getUniqueInternalName(name);State<T> state = new State<>(stateName, stateType);
states.add(state);
return state;
} | 3.26 |
flink_NFACompiler_getTrueFunction_rdh | /**
*
* @return An true function extended with stop(until) condition if necessary.
*/
@SuppressWarnings("unchecked")
private IterativeCondition<T> getTrueFunction() {
IterativeCondition<T> trueCondition = BooleanConditions.trueFunction();
if ((currentGroupPattern != null) && (currentGroupPattern.getUntilCondition() != null)) {
trueCondition = extendWithUntilCondition(trueCondition, ((IterativeCondition<T>) (currentGroupPattern.getUntilCondition())), true);
}
return trueCondition;
} | 3.26 |
flink_NFACompiler_createMiddleStates_rdh | /**
* Creates all the states between Start and Final state.
*
* @param sinkState
* the state that last state should point to (always the Final state)
* @return the next state after Start in the resulting graph
*/
private State<T> createMiddleStates(final State<T> sinkState) {
State<T> lastSink = sinkState;
while (f0.getPrevious() != null) {
if (f0.getQuantifier().getConsumingStrategy() == ConsumingStrategy.NOT_FOLLOW) {
// skip notFollow patterns, they are converted into edge conditions
if (((f0.getWindowTime(WithinType.PREVIOUS_AND_CURRENT) != null) || (getWindowTime()
> 0)) && lastSink.isFinal()) {
final State<T> notFollow = createState(StateType.Pending, true);
final IterativeCondition<T> notCondition = getTakeCondition(f0);
final State<T> stopState = createStopState(notCondition, f0.getName());
notFollow.addProceed(stopState, notCondition);
notFollow.addIgnore(new RichNotCondition<>(notCondition));
lastSink = notFollow;
}
} else if (f0.getQuantifier().getConsumingStrategy() == ConsumingStrategy.NOT_NEXT) {
final State<T> notNext = createState(StateType.Normal, true);
final IterativeCondition<T> notCondition = getTakeCondition(f0);
final State<T> stopState = createStopState(notCondition, f0.getName());
if (lastSink.isFinal()) {
// so that the proceed to final is not fired
notNext.addIgnore(lastSink, new RichNotCondition<>(notCondition));
} else {
notNext.addProceed(lastSink, new RichNotCondition<>(notCondition));
}
notNext.addProceed(stopState, notCondition);
lastSink = notNext;
} else {
lastSink = convertPattern(lastSink);
}
// we traverse the pattern graph backwards
followingPattern = f0;
f0 = f0.getPrevious();
final Time currentWindowTime = f0.getWindowTime();
if ((currentWindowTime != null) && (currentWindowTime.toMilliseconds() < windowTime.orElse(Long.MAX_VALUE))) {
// the window time is the global minimum of all window times of each state
windowTime = Optional.of(currentWindowTime.toMilliseconds());
}
}
return lastSink;
}
/**
* Creates the Start {@link State} | 3.26 |
flink_AdaptiveScheduler_hasDesiredResources_rdh | // ----------------------------------------------------------------
@Override
public boolean hasDesiredResources()
{
final Collection<? extends SlotInfo> freeSlots = declarativeSlotPool.getFreeSlotInfoTracker().getFreeSlotsInformation();
return hasDesiredResources(desiredResources, freeSlots);
} | 3.26 |
flink_AdaptiveScheduler_computeVertexParallelismStoreForExecution_rdh | /**
* Creates the parallelism store that should be used to build the {@link ExecutionGraph}, which
* will respect the vertex parallelism of the passed {@link JobGraph} in all execution modes.
*
* @param jobGraph
* The job graph for execution.
* @param executionMode
* The mode of scheduler execution.
* @param defaultMaxParallelismFunc
* a function for computing a default max parallelism if none
* is specified on a given vertex
* @return The parallelism store.
*/
@VisibleForTesting
static VertexParallelismStore computeVertexParallelismStoreForExecution(JobGraph jobGraph, SchedulerExecutionMode executionMode, Function<JobVertex, Integer> defaultMaxParallelismFunc) {
if (executionMode == SchedulerExecutionMode.REACTIVE) {
return computeReactiveModeVertexParallelismStore(jobGraph.getVertices(), defaultMaxParallelismFunc, false);
}
return SchedulerBase.computeVertexParallelismStore(jobGraph.getVertices(), defaultMaxParallelismFunc);
} | 3.26 |
flink_AdaptiveScheduler_computeReactiveModeVertexParallelismStore_rdh | /**
* Creates the parallelism store for a set of vertices, optionally with a flag to leave the
* vertex parallelism unchanged. If the flag is set, the parallelisms must be valid for
* execution.
*
* <p>We need to set parallelism to the max possible value when requesting resources, but when
* executing the graph we should respect what we are actually given.
*
* @param vertices
* The vertices to store parallelism information for
* @param adjustParallelism
* Whether to adjust the parallelism
* @param defaultMaxParallelismFunc
* a function for computing a default max parallelism if none
* is specified on a given vertex
* @return The parallelism store.
*/
@VisibleForTesting
static VertexParallelismStore computeReactiveModeVertexParallelismStore(Iterable<JobVertex> vertices, Function<JobVertex, Integer> defaultMaxParallelismFunc, boolean adjustParallelism) {
DefaultVertexParallelismStore store = new DefaultVertexParallelismStore();
for (JobVertex vertex : vertices) {
// if no max parallelism was configured by the user, we calculate and set a default
final int maxParallelism = (vertex.getMaxParallelism() == JobVertex.MAX_PARALLELISM_DEFAULT) ? defaultMaxParallelismFunc.apply(vertex) : vertex.getMaxParallelism();
// If the parallelism has already been adjusted, respect what has been configured in the
// vertex. Otherwise, scale it to the max parallelism to attempt to be "as parallel as
// possible"
final int parallelism;
if (adjustParallelism) {
parallelism = maxParallelism;
} else {
parallelism = vertex.getParallelism();
}
VertexParallelismInformation parallelismInfo = // Allow rescaling if the new desired max parallelism
// is not less than what was declared here during scheduling.
// This prevents the situation where more resources are requested
// based on the computed default, when actually fewer are necessary.
new DefaultVertexParallelismInfo(parallelism, maxParallelism, newMax -> newMax >=
maxParallelism ? Optional.empty() : Optional.of("Cannot lower max parallelism in Reactive mode."));
store.setParallelismInfo(vertex.getID(), parallelismInfo);
}
return store;
} | 3.26 |
flink_AdaptiveScheduler_computeVertexParallelismStore_rdh | /**
* Creates the parallelism store that should be used for determining scheduling requirements,
* which may choose different parallelisms than set in the {@link JobGraph} depending on the
* execution mode.
*
* @param jobGraph
* The job graph for execution.
* @param executionMode
* The mode of scheduler execution.
* @return The parallelism store.
*/
private static VertexParallelismStore computeVertexParallelismStore(JobGraph jobGraph, SchedulerExecutionMode executionMode) {
if (executionMode == SchedulerExecutionMode.REACTIVE) {
return computeReactiveModeVertexParallelismStore(jobGraph.getVertices(), SchedulerBase::getDefaultMaxParallelism, true);
}
return SchedulerBase.computeVertexParallelismStore(jobGraph);
} | 3.26 |
flink_AdaptiveScheduler_transitionToState_rdh | // ----------------------------------------------------------------
/**
* Transition the scheduler to another state. This method guards against state transitions while
* there is already a transition ongoing. This effectively means that you can not call this
* method from a State constructor or State#onLeave.
*
* @param targetState
* State to transition to
* @param <T>
* Type of the target state
* @return A target state instance
*/
@VisibleForTesting
<T extends State> T transitionToState(StateFactory<T> targetState) {Preconditions.checkState(!isTransitioningState, "State transitions must not be triggered while another state transition is in progress.");
Preconditions.checkState(state.getClass() != targetState.getStateClass(), "Attempted to transition into the very state the scheduler is already in.");
componentMainThreadExecutor.assertRunningInMainThread();
try {
isTransitioningState = true;
LOG.debug("Transition from state {} to {}.", state.getClass().getSimpleName(), targetState.getStateClass().getSimpleName());
final JobStatus previousJobStatus = state.getJobStatus();
state.onLeave(targetState.getStateClass());
T targetStateInstance = targetState.getState();
state = targetStateInstance;
final JobStatus newJobStatus = state.getJobStatus();
if (previousJobStatus != newJobStatus) {
final long v48 = System.currentTimeMillis();
jobStatusListeners.forEach(listener -> listener.jobStatusChanges(jobInformation.getJobID(), newJobStatus, v48));
}
return targetStateInstance;
} finally {
isTransitioningState = false;
}
} | 3.26 |
flink_AdaptiveScheduler_shouldRescale_rdh | /**
* In regular mode, rescale the job if added resource meets {@link JobManagerOptions#MIN_PARALLELISM_INCREASE}. In force mode rescale if the parallelism has
* changed.
*/
@Override
public boolean shouldRescale(ExecutionGraph executionGraph, boolean forceRescale) {
final Optional<VertexParallelism> maybeNewParallelism = slotAllocator.determineParallelism(jobInformation, declarativeSlotPool.getAllSlotsInformation());
return maybeNewParallelism.filter(vertexParallelism -> {
RescalingController rescalingControllerToUse = (forceRescale) ? forceRescalingController : rescalingController;
return rescalingControllerToUse.shouldRescale(getCurrentParallelism(executionGraph), vertexParallelism);
}).isPresent();
} | 3.26 |
flink_AdaptiveScheduler_checkIdleSlotTimeout_rdh | /**
* Check for slots that are idle for more than {@link JobManagerOptions#SLOT_IDLE_TIMEOUT} and
* release them back to the ResourceManager.
*/
private void checkIdleSlotTimeout() {
if (getState().getJobStatus().isGloballyTerminalState()) {
// Job has reached the terminal state, so we can return all slots to the ResourceManager
// to speed things up because we no longer need them. This optimization lets us skip
// waiting for the slot pool service to close.
for (SlotInfo slotInfo : declarativeSlotPool.getAllSlotsInformation()) {
declarativeSlotPool.releaseSlot(slotInfo.getAllocationId(), new FlinkException("Returning slots to their owners, because the job has reached a globally terminal state."));
}
return;
} else if (getState().getJobStatus().isTerminalState()) {
// do nothing
// prevent idleness check running again while scheduler was already shut down
// don't release slots because JobMaster may want to hold on to slots in case
// it re-acquires leadership
return;}
declarativeSlotPool.releaseIdleSlots(System.currentTimeMillis());
getMainThreadExecutor().schedule(this::checkIdleSlotTimeout, slotIdleTimeout.toMillis(), TimeUnit.MILLISECONDS);
} | 3.26 |
flink_PekkoInvocationHandler_ask_rdh | /**
* Sends the message to the RPC endpoint and returns a future containing its response.
*
* @param message
* to send to the RPC endpoint
* @param timeout
* time to wait until the response future is failed with a {@link TimeoutException}
* @return Response future
*/
protected CompletableFuture<?> ask(Object message, Duration timeout) {
final CompletableFuture<?> response = ScalaFutureUtils.toJava(Patterns.ask(rpcEndpoint, message, timeout.toMillis()));
return guardCompletionWithContextClassLoader(response, flinkClassLoader);
} | 3.26 |
flink_PekkoInvocationHandler_createRpcInvocationMessage_rdh | /**
* Create the RpcInvocation message for the given RPC.
*
* @param declaringClassName
* of the RPC
* @param methodName
* of the RPC
* @param isLocalRpcInvocation
* whether the RPC must be sent as a local message
* @param parameterTypes
* of the RPC
* @param args
* of the RPC
* @return RpcInvocation message which encapsulates the RPC details
* @throws IOException
* if we cannot serialize the RPC invocation parameters
*/
private RpcInvocation createRpcInvocationMessage(final String declaringClassName, final String methodName, final boolean isLocalRpcInvocation, final Class<?>[] parameterTypes, final Object[] args) throws IOException {
final RpcInvocation rpcInvocation;
if (isLocal
&& ((!forceRpcInvocationSerialization) || isLocalRpcInvocation)) {
rpcInvocation = new LocalRpcInvocation(declaringClassName, methodName, parameterTypes, args);
} else {
rpcInvocation = new RemoteRpcInvocation(declaringClassName, methodName, parameterTypes, args);
}
return rpcInvocation;
} | 3.26 |
flink_PekkoInvocationHandler_tell_rdh | // ------------------------------------------------------------------------
// Helper methods
// ------------------------------------------------------------------------
/**
* Sends the message to the RPC endpoint.
*
* @param message
* to send to the RPC endpoint.
*/
protected void tell(Object message) {
rpcEndpoint.tell(message, ActorRef.noSender());
} | 3.26 |
flink_RpcSystem_load_rdh | /**
* Loads the RpcSystem.
*
* @param config
* Flink configuration
* @return loaded RpcSystem
*/ static RpcSystem load(Configuration config) {
final PriorityQueue<RpcSystemLoader> rpcSystemLoaders = new PriorityQueue<>(Comparator.comparingInt(RpcSystemLoader::getLoadPriority));
ServiceLoader.load(RpcSystemLoader.class).forEach(rpcSystemLoaders::add);
final Iterator<RpcSystemLoader> iterator = rpcSystemLoaders.iterator();
Exception loadError = null;
while (iterator.hasNext()) {
final RpcSystemLoader next = iterator.next();
try {
return next.loadRpcSystem(config);
}
catch (Exception e) {
loadError = ExceptionUtils.firstOrSuppressed(e, loadError);
}
}
throw new RpcLoaderException("Could not load RpcSystem.", loadError);
} | 3.26 |
flink_RpcSystem_close_rdh | /**
* Hook to cleanup resources, like common thread pools or classloaders.
*/
@Override
default void close() {
} | 3.26 |
flink_TumblingEventTimeWindows_of_rdh | /**
* Creates a new {@code TumblingEventTimeWindows} {@link WindowAssigner} that assigns elements
* to time windows based on the element timestamp, offset and a staggering offset, depending on
* the staggering policy.
*
* @param size
* The size of the generated windows.
* @param offset
* The globalOffset which window start would be shifted by.
* @param windowStagger
* The utility that produces staggering offset in runtime.
*/
@PublicEvolving
public static TumblingEventTimeWindows of(Time size, Time offset, WindowStagger windowStagger) {
return new TumblingEventTimeWindows(size.toMilliseconds(), offset.toMilliseconds(), windowStagger);
} | 3.26 |
flink_OnMainThreadJobManagerRunnerRegistry_getWrappedDelegate_rdh | /**
* Returns the delegated {@link JobManagerRunnerRegistry}. This method can be used to workaround
* the main thread safeguard.
*/
@Overridepublic JobManagerRunnerRegistry getWrappedDelegate() {
return this.delegate;
} | 3.26 |
flink_MetricFetcherImpl_queryMetrics_rdh | /**
* Query the metrics from the given QueryServiceGateway.
*
* @param queryServiceGateway
* to query for metrics
*/
private CompletableFuture<Void> queryMetrics(final MetricQueryServiceGateway queryServiceGateway) {
LOG.debug("Query metrics for {}.", queryServiceGateway.getAddress());
return queryServiceGateway.queryMetrics(timeout).thenComposeAsync((MetricDumpSerialization.MetricSerializationResult result) -> {
metrics.addAll(deserializer.deserialize(result));
return FutureUtils.completedVoidFuture();
}, executor);
} | 3.26 |
flink_MetricFetcherImpl_update_rdh | /**
* This method can be used to signal this MetricFetcher that the metrics are still in use and
* should be updated.
*/
@Override
public void update() {
synchronized(this) {
long currentTime = System.currentTimeMillis();
// Before all querying metric tasks are completed, new metric updating tasks cannot
// be added. This is to avoid resource waste or other problems, such as OOM, caused by
// adding too many useless querying tasks. See FLINK-29134.
if (((currentTime - lastUpdateTime) > updateInterval) && fetchMetricsFuture.isDone()) {
lastUpdateTime = currentTime;
fetchMetricsFuture = fetchMetrics();
}
}
} | 3.26 |
flink_MetricFetcherImpl_retrieveAndQueryMetrics_rdh | /**
* Retrieves and queries the specified QueryServiceGateway.
*
* @param queryServiceAddress
* specifying the QueryServiceGateway
*/private CompletableFuture<Void> retrieveAndQueryMetrics(String queryServiceAddress) {
LOG.debug("Retrieve metric query service gateway for {}", queryServiceAddress);
final CompletableFuture<MetricQueryServiceGateway> queryServiceGatewayFuture = queryServiceRetriever.retrieveService(queryServiceAddress);
return queryServiceGatewayFuture.thenComposeAsync(this::queryMetrics, executor);
} | 3.26 |
flink_MetricFetcherImpl_m0_rdh | /**
* Returns the MetricStore containing all stored metrics.
*
* @return MetricStore containing all stored metrics;
*/@Override
public MetricStore m0() {
return metrics;
} | 3.26 |
flink_ScalaCsvOutputFormat_m0_rdh | // --------------------------------------------------------------------------------------------
@Override
public String m0() {
return ((("CsvOutputFormat (path: " + this.getOutputFilePath()) + ", delimiter: ") + this.fieldDelimiter) + ")";
} | 3.26 |
flink_ScalaCsvOutputFormat_open_rdh | // --------------------------------------------------------------------------------------------
@Override
public void open(int taskNumber, int numTasks) throws IOException {
super.open(taskNumber, numTasks);this.wrt = (this.charsetName == null) ? new OutputStreamWriter(new BufferedOutputStream(this.stream, 4096)) : new OutputStreamWriter(new BufferedOutputStream(this.stream, 4096), this.charsetName);
} | 3.26 |
flink_ScalaCsvOutputFormat_setQuoteStrings_rdh | /**
* Configures whether the output format should quote string values. String values are fields of
* type {@link String} and {@link org.apache.flink.types.StringValue}, as well as all subclasses
* of the latter.
*
* <p>By default, strings are not quoted.
*
* @param quoteStrings
* Flag indicating whether string fields should be quoted.
*/
public void setQuoteStrings(boolean quoteStrings) {
this.quoteStrings = quoteStrings;
} | 3.26 |
flink_ScalaCsvOutputFormat_setAllowNullValues_rdh | /**
* Configures the format to either allow null values (writing an empty field), or to throw an
* exception when encountering a null field.
*
* <p>By default, null values are allowed.
*
* @param allowNulls
* Flag to indicate whether the output format should accept null values.
*/
public void setAllowNullValues(boolean allowNulls) {
this.allowNullValues = allowNulls;
} | 3.26 |
flink_ScalaCsvOutputFormat_setInputType_rdh | /**
* The purpose of this method is solely to check whether the data type to be processed is in
* fact a tuple type.
*/
@Override
public void setInputType(TypeInformation<?> type, ExecutionConfig executionConfig) {
if (!type.isTupleType()) {
throw new
InvalidProgramException(("The " + ScalaCsvOutputFormat.class.getSimpleName()) + " can only be used to write tuple data sets.");
}
} | 3.26 |
flink_ScalaCsvOutputFormat_setCharsetName_rdh | /**
* Sets the charset with which the CSV strings are written to the file. If not specified, the
* output format uses the systems default character encoding.
*
* @param charsetName
* The name of charset to use for encoding the output.
*/
public void
setCharsetName(String charsetName) {
this.charsetName = charsetName;
} | 3.26 |
flink_CoGroupWithSolutionSetFirstDriver_initialize_rdh | // --------------------------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
public void
initialize() {
final TypeComparator<IT1> solutionSetComparator;
// grab a handle to the hash table from the iteration broker
if (taskContext instanceof AbstractIterativeTask) {
AbstractIterativeTask<?, ?> iterativeTaskContext = ((AbstractIterativeTask<?, ?>) (taskContext));
String identifier = iterativeTaskContext.brokerKey();
Object table
= SolutionSetBroker.instance().get(identifier);
if (table instanceof CompactingHashTable) {
this.hashTable = ((CompactingHashTable<IT1>) (table));
solutionSetSerializer = this.hashTable.getBuildSideSerializer();
solutionSetComparator = this.hashTable.getBuildSideComparator().duplicate();
}
else if (table instanceof JoinHashMap) {
this.objectMap = ((JoinHashMap<IT1>) (table));
solutionSetSerializer = this.objectMap.getBuildSerializer();
solutionSetComparator = this.objectMap.getBuildComparator().duplicate();
} else {
throw new RuntimeException("Unrecognized solution set index: " + table);
}
} else {
throw new RuntimeException("The task context of this driver is no iterative task context.");
}
TaskConfig
config = taskContext.getTaskConfig();
ClassLoader classLoader = taskContext.getUserCodeClassLoader();
TypeComparatorFactory<IT2> probeSideComparatorFactory = config.getDriverComparator(0, classLoader);
this.probeSideSerializer = taskContext.<IT2>getInputSerializer(0).getSerializer();
this.probeSideComparator = probeSideComparatorFactory.createComparator();
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (objectReuseEnabled) {
solutionSideRecord = solutionSetSerializer.createInstance();
}
TypePairComparatorFactory<IT1, IT2> factory = taskContext.getTaskConfig().getPairComparatorFactory(taskContext.getUserCodeClassLoader());
pairComparator = factory.createComparator21(solutionSetComparator, this.probeSideComparator);
} | 3.26 |
flink_CoGroupWithSolutionSetFirstDriver_setup_rdh | // --------------------------------------------------------------------------------------------
@Override
public void setup(TaskContext<CoGroupFunction<IT1, IT2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
} | 3.26 |
flink_TableFunctionResultFuture_setInput_rdh | /**
* Sets the input row from left table, which will be used to cross join with the result of right
* table.
*/
public void setInput(Object input) {
this.input = input;
} | 3.26 |
flink_TableFunctionResultFuture_getResultFuture_rdh | /**
* Gets the internal collector which used to emit the final row.
*/
public ResultFuture<?> getResultFuture() {
return this.resultFuture;
} | 3.26 |
flink_TableFunctionResultFuture_setResultFuture_rdh | /**
* Sets the current collector, which used to emit the final row.
*/
public void setResultFuture(ResultFuture<?> resultFuture) { this.resultFuture = resultFuture;
} | 3.26 |
flink_SqlConstraintValidator_validateAndChangeColumnNullability_rdh | /**
* Check constraints and change the nullability of primary key columns.
*
* @throws SqlValidateException
* if encountered duplicate primary key constraints, or the
* constraint is enforced or unique.
*/
public static void validateAndChangeColumnNullability(List<SqlTableConstraint> tableConstraints, SqlNodeList columnList) throws SqlValidateException {
List<SqlTableConstraint> fullConstraints = getFullConstraints(tableConstraints, columnList);
if (fullConstraints.stream().filter(SqlTableConstraint::isPrimaryKey).count() > 1) {
throw new SqlValidateException(fullConstraints.get(1).getParserPosition(), "Duplicate primary key definition");
}
for (SqlTableConstraint constraint : fullConstraints) {
validate(constraint);
Set<String> primaryKeyColumns = Arrays.stream(constraint.getColumnNames()).collect(Collectors.toSet());
// rewrite primary key's nullability to false
// e.g. CREATE TABLE tbl (`a` STRING PRIMARY KEY NOT ENFORCED, ...) or
// CREATE TABLE tbl (`a` STRING, PRIMARY KEY(`a`) NOT ENFORCED) will change `a`
// to STRING NOT NULL
for (SqlNode column : columnList) {
SqlTableColumn tableColumn = ((SqlTableColumn) (column));
if ((tableColumn instanceof SqlTableColumn.SqlRegularColumn) && primaryKeyColumns.contains(tableColumn.getName().getSimple())) {
SqlTableColumn.SqlRegularColumn regularColumn = ((SqlTableColumn.SqlRegularColumn) (column));
SqlDataTypeSpec notNullType = regularColumn.getType().withNullable(false);
regularColumn.setType(notNullType);
}
}
}
} | 3.26 |
flink_SqlConstraintValidator_validate_rdh | /**
* Check table constraint.
*/
private static void validate(SqlTableConstraint constraint) throws SqlValidateException {
if (constraint.isUnique()) {
throw new SqlValidateException(constraint.getParserPosition(), "UNIQUE constraint is not supported yet");
}
if (constraint.isEnforced()) {
throw new SqlValidateException(constraint.getParserPosition(), ("Flink doesn't support ENFORCED mode for PRIMARY KEY constraint. ENFORCED/NOT ENFORCED " + "controls if the constraint checks are performed on the incoming/outgoing data. ") + "Flink does not own the data therefore the only supported mode is the NOT ENFORCED mode");
}
} | 3.26 |
flink_SqlConstraintValidator_getFullConstraints_rdh | /**
* Returns the column constraints plus the table constraints.
*/
public static List<SqlTableConstraint> getFullConstraints(List<SqlTableConstraint> tableConstraints, SqlNodeList columnList) {
List<SqlTableConstraint> ret = new ArrayList<>();
columnList.forEach(column -> {
SqlTableColumn tableColumn = ((SqlTableColumn) (column));
if (tableColumn instanceof SqlTableColumn.SqlRegularColumn) {
SqlTableColumn.SqlRegularColumn regularColumn = ((SqlTableColumn.SqlRegularColumn) (tableColumn));
regularColumn.getConstraint().map(ret::add);
}
});
ret.addAll(tableConstraints);
return ret;
} | 3.26 |
flink_UnionOperator_translateToDataFlow_rdh | /**
* Returns the BinaryNodeTranslation of the Union.
*
* @param input1
* The first input of the union, as a common API operator.
* @param input2
* The second input of the union, as a common API operator.
* @return The common API union operator.
*/@Override
protected Union<T> translateToDataFlow(Operator<T> input1, Operator<T> input2) {
return new Union<T>(input1, input2, f0);
} | 3.26 |
flink_ResourceManagerUtils_parseRestBindPortFromWebInterfaceUrl_rdh | /**
* Parse the port from the webInterfaceUrl.
*
* @param webInterfaceUrl
* The web interface url to be parsed
* @return the parsed rest port or -1 if failed
*/
public static Integer parseRestBindPortFromWebInterfaceUrl(String webInterfaceUrl) {
if (webInterfaceUrl != null) {
final int lastColon = webInterfaceUrl.lastIndexOf(':');
if (lastColon ==
(-1)) {
return -1;
} else
{
try {
return Integer.parseInt(webInterfaceUrl.substring(lastColon + 1));
} catch (NumberFormatException e) {
return -1;}}
} else {
return -1;
}
} | 3.26 |
flink_TypeComparator_compareAgainstReference_rdh | // --------------------------------------------------------------------------------------------
@SuppressWarnings("rawtypes")
public int compareAgainstReference(Comparable[] keys) {
throw new UnsupportedOperationException("Workaround hack.");
} | 3.26 |
flink_CliClient_executeInInteractiveMode_rdh | /**
* Opens the interactive CLI shell.
*/
public void executeInInteractiveMode() {
executeInInteractiveMode(null);
} | 3.26 |
flink_CliClient_executeInNonInteractiveMode_rdh | /**
* Opens the non-interactive CLI shell.
*/
public void executeInNonInteractiveMode(String content) {
try {
terminal = terminalFactory.get();
executeFile(content, terminal.output(), ExecutionMode.NON_INTERACTIVE_EXECUTION);
} finally {
closeTerminal();
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.