name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_RestartPipelinedRegionFailoverStrategy_getRegionsToRestart_rdh
|
/**
* All 'involved' regions are proposed to be restarted. The 'involved' regions are calculated
* with rules below: 1. The region containing the failed task is always involved 2. If an input
* result partition of an involved region is not available, i.e. Missing or Corrupted, the
* region containing the partition producer task is involved 3. If a region is involved, all of
* its consumer regions are involved
*/
private Set<SchedulingPipelinedRegion> getRegionsToRestart(SchedulingPipelinedRegion failedRegion) {Set<SchedulingPipelinedRegion> regionsToRestart = Collections.newSetFromMap(new IdentityHashMap<>());
Set<SchedulingPipelinedRegion> visitedRegions = Collections.newSetFromMap(new IdentityHashMap<>());
Set<ConsumedPartitionGroup> visitedConsumedResultGroups = Collections.newSetFromMap(new IdentityHashMap<>());
Set<ConsumerVertexGroup> visitedConsumerVertexGroups = Collections.newSetFromMap(new IdentityHashMap<>());
// start from the failed region to visit all involved regions
Queue<SchedulingPipelinedRegion> regionsToVisit = new ArrayDeque<>();
visitedRegions.add(failedRegion);
regionsToVisit.add(failedRegion);
while (!regionsToVisit.isEmpty()) {
SchedulingPipelinedRegion v10 =
regionsToVisit.poll();
// an involved region should be restarted
regionsToRestart.add(v10);
// if a needed input result partition is not available, its producer region is involved
for (IntermediateResultPartitionID consumedPartitionId : getConsumedPartitionsToVisit(v10, visitedConsumedResultGroups)) {
if (!resultPartitionAvailabilityChecker.isAvailable(consumedPartitionId)) {
SchedulingResultPartition consumedPartition = topology.getResultPartition(consumedPartitionId);
SchedulingPipelinedRegion producerRegion = topology.getPipelinedRegionOfVertex(consumedPartition.getProducer().getId());
if (!visitedRegions.contains(producerRegion)) {
visitedRegions.add(producerRegion);
regionsToVisit.add(producerRegion);
}
}
}
// all consumer regions of an involved region should be involved
for (ExecutionVertexID consumerVertexId : getConsumerVerticesToVisit(v10, visitedConsumerVertexGroups)) {
SchedulingPipelinedRegion consumerRegion = topology.getPipelinedRegionOfVertex(consumerVertexId);
if (!visitedRegions.contains(consumerRegion)) {
visitedRegions.add(consumerRegion);
regionsToVisit.add(consumerRegion);
}
}
}
return regionsToRestart;
}
| 3.26 |
flink_RestartPipelinedRegionFailoverStrategy_getTasksNeedingRestart_rdh
|
// task failure handling
// ------------------------------------------------------------------------
/**
* Returns a set of IDs corresponding to the set of vertices that should be restarted. In this
* strategy, all task vertices in 'involved' regions are proposed to be restarted. The
* 'involved' regions are calculated with rules below: 1. The region containing the failed task
* is always involved 2. If an input result partition of an involved region is not available,
* i.e. Missing or Corrupted, the region containing the partition producer task is involved 3.
* If a region is involved, all of its consumer regions are involved
*
* @param executionVertexId
* ID of the failed task
* @param cause
* cause of the failure
* @return set of IDs of vertices to restart
*/
@Override
public Set<ExecutionVertexID> getTasksNeedingRestart(ExecutionVertexID executionVertexId, Throwable cause) {
final SchedulingPipelinedRegion failedRegion = topology.getPipelinedRegionOfVertex(executionVertexId);
if (failedRegion == null) {
// TODO: show the task name in the log
throw new IllegalStateException("Can not find the failover region for task " + executionVertexId, cause);
}
// if the failure cause is data consumption error, mark the corresponding data partition to
// be failed,
// so that the failover process will try to recover it
Optional<PartitionException> dataConsumptionException
= ExceptionUtils.findThrowable(cause, PartitionException.class);
if (dataConsumptionException.isPresent()) {
resultPartitionAvailabilityChecker.markResultPartitionFailed(dataConsumptionException.get().getPartitionId().getPartitionId());
}
// calculate the tasks to restart based on the result of regions to restart
Set<ExecutionVertexID> tasksToRestart = new HashSet<>();
for (SchedulingPipelinedRegion region : getRegionsToRestart(failedRegion)) {
for (SchedulingExecutionVertex vertex : region.getVertices()) {
// we do not need to restart tasks which are already in the initial state
if (vertex.getState() != ExecutionState.CREATED) {
tasksToRestart.add(vertex.getId());
}
}
}
// the previous failed partition will be recovered. remove its failed state from the checker
if (dataConsumptionException.isPresent()) {
resultPartitionAvailabilityChecker.removeResultPartitionFromFailedState(dataConsumptionException.get().getPartitionId().getPartitionId());
}
return tasksToRestart;
}
| 3.26 |
flink_RestartPipelinedRegionFailoverStrategy_getFailoverRegion_rdh
|
// ------------------------------------------------------------------------
// testing
// ------------------------------------------------------------------------
/**
* Returns the failover region that contains the given execution vertex.
*
* @return the failover region that contains the given execution vertex
*/
@VisibleForTesting
public SchedulingPipelinedRegion getFailoverRegion(ExecutionVertexID vertexID) {
return topology.getPipelinedRegionOfVertex(vertexID);
}
| 3.26 |
flink_WindowMapState_put_rdh
|
/**
* Associates a new value with the given key.
*
* @param key
* The key of the mapping
* @param value
* The new value of the mapping
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void put(W window, RowData key, UV value) throws Exception {
windowState.setCurrentNamespace(window);
windowState.put(key, value);
}
| 3.26 |
flink_WindowMapState_contains_rdh
|
/**
* Returns whether there exists the given mapping.
*
* @param key
* The key of the mapping
* @return True if there exists a mapping whose key equals to the given key
* @throws Exception
* Thrown if the system cannot access the state.
*/ public boolean contains(W window, RowData key) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.contains(key);
}
| 3.26 |
flink_WindowMapState_remove_rdh
|
/**
* Deletes the mapping of the given key.
*
* @param key
* The key of the mapping
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void remove(W window, RowData key) throws Exception {
windowState.setCurrentNamespace(window);
windowState.remove(key);
}
| 3.26 |
flink_WindowMapState_putAll_rdh
|
/**
* Copies all of the mappings from the given map into the state.
*
* @param map
* The mappings to be stored in this state
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void putAll(W window, Map<RowData, UV> map) throws Exception {
windowState.setCurrentNamespace(window);
windowState.putAll(map);
}
| 3.26 |
flink_WindowMapState_get_rdh
|
/**
* Returns the current value associated with the given key.
*
* @param key
* The key of the mapping
* @return The value of the mapping with the given key
* @throws Exception
* Thrown if the system cannot access the state.
*/
public UV get(W window, RowData key) throws
Exception {windowState.setCurrentNamespace(window);
return windowState.get(key);
}
| 3.26 |
flink_WindowMapState_entries_rdh
|
/**
* Returns all the mappings in the state.
*
* @return An iterable view of all the key-value pairs in the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterable<Map.Entry<RowData, UV>> entries(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.entries();
}
| 3.26 |
flink_WindowMapState_values_rdh
|
/**
* Returns all the values in the state.
*
* @return An iterable view of all the values in the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterable<UV> values(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.values();
}
| 3.26 |
flink_WindowMapState_iterator_rdh
|
/**
* Iterates over all the mappings in the state.
*
* @return An iterator over all the mappings in the state
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterator<Map.Entry<RowData, UV>> iterator(W window) throws Exception {windowState.setCurrentNamespace(window);
return windowState.iterator();
}
| 3.26 |
flink_WindowMapState_keys_rdh
|
/**
* Returns all the keys in the state.
*
* @return An iterable view of all the keys in the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterable<RowData> keys(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.keys();}
| 3.26 |
flink_WindowMapState_isEmpty_rdh
|
/**
* Returns true if this state contains no key-value mappings, otherwise false.
*
* @return True if this state contains no key-value mappings, otherwise false.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public boolean isEmpty(W window) throws Exception
{
windowState.setCurrentNamespace(window);
return windowState.isEmpty();
}
| 3.26 |
flink_MainThreadValidatorUtil_isRunningInExpectedThread_rdh
|
/**
* Returns true iff the current thread is equals to the provided expected thread and logs
* violations.
*
* @param expected
* the expected main thread.
* @return true iff the current thread is equals to the provided expected thread.
*/
public static boolean
isRunningInExpectedThread(@Nullable
Thread expected) {
Thread
actual = Thread.currentThread();
if (expected != actual) {
String violationMsg = ((("Violation of main thread constraint detected: expected <" + expected) +
"> but running in <") + actual) + ">.";
LOG.warn(violationMsg, new Exception(violationMsg));
return false;
}
return true;
}
| 3.26 |
flink_FileSystemSafetyNet_wrapWithSafetyNetWhenActivated_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
static FileSystem wrapWithSafetyNetWhenActivated(FileSystem fs) {
SafetyNetCloseableRegistry reg = REGISTRIES.get();
return reg != null ? new SafetyNetWrapperFileSystem(fs, reg) :
fs;
}
| 3.26 |
flink_FileSystemSafetyNet_closeSafetyNetAndGuardedResourcesForThread_rdh
|
/**
* Closes the safety net for a thread. This closes all remaining unclosed streams that were
* opened by safety-net-guarded file systems. After this method was called, no streams can be
* opened any more from any FileSystem instance that was obtained while the thread was guarded
* by the safety net.
*
* <p>This method should be called at the very end of a guarded thread.
*/
@Internal
public static void closeSafetyNetAndGuardedResourcesForThread() {
SafetyNetCloseableRegistry registry = REGISTRIES.get();if (null != registry) {
REGISTRIES.remove();
IOUtils.closeQuietly(registry);
}
}
| 3.26 |
flink_FileSystemSafetyNet_initializeSafetyNetForThread_rdh
|
// ------------------------------------------------------------------------
// Activating / Deactivating
// ------------------------------------------------------------------------
/**
* Activates the safety net for a thread. {@link FileSystem} instances obtained by the thread
* that called this method will be guarded, meaning that their created streams are tracked and
* can be closed via the safety net closing hook.
*
* <p>This method should be called at the beginning of a thread that should be guarded.
*
* @throws IllegalStateException
* Thrown, if a safety net was already registered for the thread.
*/
@Internal
public static void initializeSafetyNetForThread() {SafetyNetCloseableRegistry oldRegistry = REGISTRIES.get();
checkState(null == oldRegistry, ("Found an existing FileSystem safety net for this thread: %s " + "This may indicate an accidental repeated initialization, or a leak of the")
+ "(Inheritable)ThreadLocal through a ThreadPool.", oldRegistry);
SafetyNetCloseableRegistry newRegistry = new SafetyNetCloseableRegistry();
REGISTRIES.set(newRegistry);
}
| 3.26 |
flink_AsyncSinkWriterStateSerializer_serialize_rdh
|
/**
* Serializes state in form of
* [DATA_IDENTIFIER,NUM_OF_ELEMENTS,SIZE1,REQUEST1,SIZE2,REQUEST2....].
*/
@Override
public byte[] serialize(BufferedRequestState<RequestEntryT> obj) throws IOException {
Collection<RequestEntryWrapper<RequestEntryT>> bufferState = obj.getBufferedRequestEntries();
try (final ByteArrayOutputStream baos = new ByteArrayOutputStream();final DataOutputStream out = new DataOutputStream(baos)) {
out.writeLong(DATA_IDENTIFIER);
out.writeInt(bufferState.size());
for (RequestEntryWrapper<RequestEntryT> wrapper : bufferState) {
out.writeLong(wrapper.getSize());
serializeRequestToStream(wrapper.getRequestEntry(), out);
}
return baos.toByteArray();
}
}
| 3.26 |
flink_KubernetesStateHandleStore_releaseAndTryRemove_rdh
|
/**
* Remove the key in state config map. As well as the state on external storage will be removed.
* It returns the {@link RetrievableStateHandle} stored under the given state node if any.
*
* @param key
* Key to be removed from ConfigMap
* @return True if the state handle isn't listed anymore.
* @throws Exception
* if removing the key or discarding the state failed
*/
@Override
public boolean releaseAndTryRemove(String key) throws Exception {checkNotNull(key, "Key in ConfigMap.");
final AtomicReference<RetrievableStateHandle<T>> stateHandleRefer = new AtomicReference<>();
final AtomicBoolean stateHandleDoesNotExist = new AtomicBoolean(false);
return updateConfigMap(configMap -> {
final String content = configMap.getData().get(key);
if (content != null) {try {
final StateHandleWithDeleteMarker<T> result = deserializeStateHandle(content);
if (!result.isMarkedForDeletion()) {
// Mark the ConfigMap entry as deleting. This basically
// starts a "removal transaction" that allows us to retry
// the removal if needed.
configMap.getData().put(key, serializeStateHandle(result.toDeleting()));
}
stateHandleRefer.set(result.getInner());
} catch (IOException e) {m1(key, configMapName, e);
// Remove entry from the config map as we can't recover from
// this (the serialization would fail on the retry as well).
Objects.requireNonNull(configMap.getData().remove(key));
}
return Optional.of(configMap);
}
else {
stateHandleDoesNotExist.set(true);
}
return Optional.empty();
}).thenCompose(updated -> {
if (updated &&
(stateHandleRefer.get() != null)) {try {
stateHandleRefer.get().discardState();
return updateConfigMap(configMap -> {// Now we can safely commit the "removal
// transaction" by removing the entry from the
// ConfigMap.
configMap.getData().remove(key);
return Optional.of(configMap);
});
} catch (Exception e) {
throw new CompletionException(e);
}
}
return CompletableFuture.completedFuture(stateHandleDoesNotExist.get() || updated);
}).get();
}
| 3.26 |
flink_KubernetesStateHandleStore_addEntry_rdh
|
/**
* Adds entry into the ConfigMap. If the entry already exists and contains delete marker, we try
* to finish the removal before the actual update.
*/private Optional<KubernetesConfigMap> addEntry(KubernetesConfigMap configMap, String key, byte[] serializedStateHandle) throws Exception {
final
String oldBase64Content = configMap.getData().get(key);
final String newBase64Content = m0(serializedStateHandle);
if (oldBase64Content != null) {
try {
final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(oldBase64Content);
if (stateHandle.isMarkedForDeletion()) {
// This might be a left-over after the fail-over. As the remove operation is
// idempotent let's try to finish it.
if (!releaseAndTryRemove(key)) {
throw new IllegalStateException("Unable to remove the marked as deleting entry.");
}
} else {
// It could happen that the kubernetes client retries a transaction that has
// already succeeded due to network issues. So we simply ignore when the
// new content is same as the existing one.
if (oldBase64Content.equals(newBase64Content)) {
return Optional.of(configMap);
}
throw getKeyAlreadyExistException(key);
}
}
catch (IOException e) {
// Just log the invalid entry, it will be overridden
// by the update code path below.
m1(key, configMapName, e);
}}
configMap.getData().put(key, newBase64Content);
return Optional.of(configMap);
}
| 3.26 |
flink_KubernetesStateHandleStore_replaceEntry_rdh
|
/**
* Replace the entry in the ConfigMap. If the entry already exists and contains delete marker,
* we treat it as non-existent and perform the best effort removal.
*/
private Optional<KubernetesConfigMap> replaceEntry(KubernetesConfigMap configMap, String key, byte[] serializedStateHandle, AtomicReference<RetrievableStateHandle<T>> oldStateHandleRef) throws NotExistException {
final String content = configMap.getData().get(key);
if (content != null) {try {
final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(content);
oldStateHandleRef.set(stateHandle.getInner());
if (stateHandle.isMarkedForDeletion()) {
final NotExistException exception =
getKeyNotExistException(key);
try {
// Try to finish the removal. We don't really care whether this succeeds or
// not, from the "replace" point of view, the entry doesn't exist.
releaseAndTryRemove(key);
} catch (Exception e) {
exception.addSuppressed(e);
}
throw exception;
}
} catch (IOException e) {
// Just log the invalid entry, it will be removed by the update code path below.
m1(key, configMapName, e);
}
configMap.getData().put(key, m0(serializedStateHandle));
return Optional.of(configMap);
}
throw getKeyNotExistException(key);
}
| 3.26 |
flink_KubernetesStateHandleStore_addAndLock_rdh
|
/**
* Creates a state handle, stores it in ConfigMap. We could guarantee that only the leader could
* update the ConfigMap. Since “Get(check the leader)-and-Update(write back to the ConfigMap)”
* is a transactional operation.
*
* @param key
* Key in ConfigMap
* @param state
* State to be added
* @throws AlreadyExistException
* if the name already exists
* @throws PossibleInconsistentStateException
* if the write-to-Kubernetes operation failed. This
* indicates that it's not clear whether the new state was successfully written to
* Kubernetes or not. No state was discarded. Proper error handling has to be applied on the
* caller's side.
* @throws Exception
* if persisting state or writing state handle failed
*/
@Override
public RetrievableStateHandle<T> addAndLock(String key, T state) throws PossibleInconsistentStateException, Exception {
checkNotNull(key, "Key in ConfigMap.");
checkNotNull(state, "State.");
final RetrievableStateHandle<T> storeHandle = storage.store(state);
final byte[] serializedStoreHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(storeHandle));
// initialize flag to serve the failure case
boolean discardState = true;
try {
// a successful operation will result in the state not being discarded
discardState = !updateConfigMap(cm -> {
try {
return addEntry(cm, key, serializedStoreHandle);
} catch (Exception e)
{
throw new CompletionException(e);
}
}).get();
return storeHandle;
} catch (Exception ex) {
final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class);
if (possibleInconsistentStateException.isPresent()) {
// it's unclear whether the state handle metadata was written to the ConfigMap -
// hence, we don't discard the data
discardState = false;
throw possibleInconsistentStateException.get();
}
throw ExceptionUtils.findThrowable(ex, AlreadyExistException.class).orElseThrow(() -> ex);
} finally {
if (discardState) {
storeHandle.discardState();
}
}
}
| 3.26 |
flink_KubernetesStateHandleStore_getAllAndLock_rdh
|
/**
* Gets all available state handles from Kubernetes.
*
* @return All state handles from ConfigMap.
*/
@Override
public List<Tuple2<RetrievableStateHandle<T>, String>> getAllAndLock() {
return kubeClient.getConfigMap(configMapName).map(configMap -> {
final List<Tuple2<RetrievableStateHandle<T>, String>> stateHandles = new ArrayList<>();configMap.getData().entrySet().stream().filter(entry -> configMapKeyFilter.test(entry.getKey())).forEach(entry -> {
try {
final StateHandleWithDeleteMarker<T> result = deserializeStateHandle(entry.getValue());
if (!result.isMarkedForDeletion()) {
stateHandles.add(new Tuple2<>(result.getInner(), entry.getKey()));
}
} catch (IOException e) {
LOG.warn("ConfigMap {} contained corrupted data. Ignoring the key {}.", configMapName, entry.getKey());
}
});
return stateHandles;
}).orElse(Collections.emptyList());
}
| 3.26 |
flink_KubernetesStateHandleStore_getAndLock_rdh
|
/**
* Gets the {@link RetrievableStateHandle} stored in the given ConfigMap.
*
* @param key
* Key in ConfigMap
* @return The retrieved state handle from the specified ConfigMap and key
* @throws IOException
* if the method failed to deserialize the stored state handle
* @throws NotExistException
* when the name does not exist
* @throws Exception
* if get state handle from ConfigMap failed
*/
@Override
public RetrievableStateHandle<T> getAndLock(String key) throws Exception {
checkNotNull(key, "Key in ConfigMap.");
final Optional<KubernetesConfigMap> optional = kubeClient.getConfigMap(configMapName);
if (optional.isPresent()) {
final KubernetesConfigMap configMap
= optional.get();
if (configMap.getData().containsKey(key)) {
final StateHandleWithDeleteMarker<T> result = deserializeStateHandle(configMap.getData().get(key));
if (result.isMarkedForDeletion()) {
throw getKeyMarkedAsDeletedException(key);
}
return result.getInner();
}
else
{
throw getKeyNotExistException(key);
}
} else {
throw getConfigMapNotExistException();
}
}
| 3.26 |
flink_KubernetesStateHandleStore_replace_rdh
|
/**
* Replaces a state handle in ConfigMap and discards the old state handle. Wo do not lock
* resource version and then replace in Kubernetes. Since the ConfigMap is periodically updated
* by leader, the resource version changes very fast. We use a "check-existence and update"
* transactional operation instead.
*
* @param key
* Key in ConfigMap
* @param resourceVersion
* resource version when checking existence via {@link #exists}.
* @param state
* State to be added
* @throws NotExistException
* if the name does not exist
* @throws PossibleInconsistentStateException
* if a failure occurred during the update operation.
* It's unclear whether the operation actually succeeded or not. No state was discarded. The
* method's caller should handle this case properly.
* @throws Exception
* if persisting state or writing state handle failed
*/
@Override
public void replace(String key, StringResourceVersion resourceVersion, T state)
throws Exception {
checkNotNull(key,
"Key in ConfigMap.");checkNotNull(state, "State.");
final RetrievableStateHandle<T> newStateHandle = storage.store(state);
final byte[] serializedStateHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(newStateHandle));
// initialize flags to serve the failure case
boolean discardOldState = false;
boolean discardNewState = true;
// We don't want to greedily pull the old state handle as we have to do that anyway in
// replaceEntry method for check of delete markers.
final AtomicReference<RetrievableStateHandle<T>> oldStateHandleRef = new AtomicReference<>();
try {
final boolean success = updateConfigMap(cm -> {
try {
return replaceEntry(cm, key, serializedStateHandle, oldStateHandleRef);
} catch (NotExistException e) {
throw new CompletionException(e);
}
}).get();
// swap subject for deletion in case of success
discardOldState = success;
discardNewState = !success;
} catch (Exception ex) {
final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class);
if (possibleInconsistentStateException.isPresent()) {
// it's unclear whether the state handle metadata was written to the ConfigMap -
// hence, we don't discard any data
discardNewState = false;
throw possibleInconsistentStateException.get();
} throw ExceptionUtils.findThrowable(ex, NotExistException.class).orElseThrow(() -> ex);
} finally {
if (discardNewState) {
newStateHandle.discardState();
}
if
(discardOldState) {
Objects.requireNonNull(oldStateHandleRef.get(), "state handle should have been set on success").discardState();
}
}
}
| 3.26 |
flink_KubernetesStateHandleStore_clearEntries_rdh
|
/**
* Remove all the filtered keys in the ConfigMap.
*
* @throws Exception
* when removing the keys failed
*/
@Override
public void clearEntries() throws Exception {
updateConfigMap(configMap -> {
configMap.getData().keySet().removeIf(configMapKeyFilter);
return Optional.of(configMap);
}).get();
}
| 3.26 |
flink_KubernetesStateHandleStore_getAllHandles_rdh
|
/**
* Return a list of all valid keys for state handles.
*
* @return List of valid state handle keys in Kubernetes ConfigMap
* @throws Exception
* if get state handle names from ConfigMap failed.
*/
@Override
public Collection<String> getAllHandles() throws Exception {
return kubeClient.getConfigMap(configMapName).map(configMap -> configMap.getData().keySet().stream().filter(configMapKeyFilter).filter(k -> {
try {
final String content = Objects.requireNonNull(configMap.getData().get(k));
return !deserializeStateHandle(content).isMarkedForDeletion();
} catch (IOException e) {
return false;
}
}).collect(Collectors.toList())).orElseThrow(this::getConfigMapNotExistException);
}
| 3.26 |
flink_KubernetesStateHandleStore_exists_rdh
|
/**
* Returns the resource version of the ConfigMap.
*
* @param key
* Key in ConfigMap
* @return resource version in {@link StringResourceVersion} format.
* @throws Exception
* if the check existence operation failed
*/
@Override
public StringResourceVersion exists(String key) throws Exception {
checkNotNull(key, "Key in ConfigMap.");
return kubeClient.getConfigMap(configMapName).map(configMap -> {
final String content = configMap.getData().get(key);
if (content != null) {
try {
final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(content);
if (stateHandle.isMarkedForDeletion()) {
return StringResourceVersion.notExisting();
}
} catch (IOException e) {
// Any calls to add or replace will try to remove this resource,
// so we can simply treat it as non-existent.
return StringResourceVersion.notExisting();
}
return StringResourceVersion.valueOf(configMap.getResourceVersion());
}
return StringResourceVersion.notExisting();
}).orElseThrow(this::getConfigMapNotExistException);
}
| 3.26 |
flink_RestClusterClientConfiguration_getRetryMaxAttempts_rdh
|
/**
*
* @see RestOptions#RETRY_MAX_ATTEMPTS
*/public int getRetryMaxAttempts() {
return retryMaxAttempts;
}
| 3.26 |
flink_RestClusterClientConfiguration_getAwaitLeaderTimeout_rdh
|
/**
*
* @see RestOptions#AWAIT_LEADER_TIMEOUT
*/
public long getAwaitLeaderTimeout() {
return awaitLeaderTimeout;
}
| 3.26 |
flink_RestClusterClientConfiguration_getRetryDelay_rdh
|
/**
*
* @see RestOptions#RETRY_DELAY
*/
public long getRetryDelay() {
return f0;
}
| 3.26 |
flink_TaskSlotTableImpl_notifyTimeout_rdh
|
// ---------------------------------------------------------------------
// TimeoutListener methods
// ---------------------------------------------------------------------
@Override
public void notifyTimeout(AllocationID key, UUID ticket) {
checkStarted();
if
(f0 != null) {
f0.timeoutSlot(key, ticket);}
}
| 3.26 |
flink_TaskSlotTableImpl_getTaskSlot_rdh
|
// ---------------------------------------------------------------------
// Internal methods
// ---------------------------------------------------------------------
@Nullable
private TaskSlot<T> getTaskSlot(AllocationID allocationId) {
Preconditions.checkNotNull(allocationId);
return allocatedSlots.get(allocationId);
}
| 3.26 |
flink_TaskSlotTableImpl_createSlotReport_rdh
|
// ---------------------------------------------------------------------
// Slot report methods
// ---------------------------------------------------------------------
@Overridepublic SlotReport createSlotReport(ResourceID resourceId) {
List<SlotStatus> slotStatuses = new ArrayList<>();
for (int i = 0; i < numberSlots; i++) {
SlotID slotId = new SlotID(resourceId, i);
SlotStatus slotStatus;
if (taskSlots.containsKey(i)) {
TaskSlot<T> taskSlot = taskSlots.get(i);
slotStatus
= new SlotStatus(slotId, taskSlot.getResourceProfile(), taskSlot.getJobId(), taskSlot.getAllocationId());
} else {
slotStatus = new SlotStatus(slotId, defaultSlotResourceProfile, null, null);
}
slotStatuses.add(slotStatus);
}
for (TaskSlot<T> taskSlot : allocatedSlots.values()) {
if (isDynamicIndex(taskSlot.getIndex())) {
SlotStatus slotStatus = new SlotStatus(new SlotID(resourceId, taskSlot.getIndex()), taskSlot.getResourceProfile(), taskSlot.getJobId(), taskSlot.getAllocationId());
slotStatuses.add(slotStatus);
}
}
final SlotReport slotReport = new SlotReport(slotStatuses);
return slotReport;
}
| 3.26 |
flink_TaskSlotTableImpl_addTask_rdh
|
// ---------------------------------------------------------------------
// Task methods
// ---------------------------------------------------------------------
@Override
public boolean addTask(T task) throws SlotNotFoundException, SlotNotActiveException {
checkRunning();
Preconditions.checkNotNull(task);
TaskSlot<T> taskSlot = getTaskSlot(task.getAllocationId());
if (taskSlot != null) {
if (taskSlot.isActive(task.getJobID(), task.getAllocationId())) {
if (taskSlot.add(task)) {
taskSlotMappings.put(task.getExecutionId(), new TaskSlotMapping<>(task, taskSlot));
return true;
} else {
return false;
}
} else {
throw new SlotNotActiveException(task.getJobID(), task.getAllocationId());
}
} else {
throw new SlotNotFoundException(task.getAllocationId());
}
}
| 3.26 |
flink_DriverStrategy_getDriverClass_rdh
|
// --------------------------------------------------------------------------------------------
public Class<? extends Driver<?, ?>> getDriverClass() {
return this.driverClass;
}
| 3.26 |
flink_CliTableResultView_getRow_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected String[] getRow(String[] resultRow) {
return resultRow;
}
| 3.26 |
flink_CliTableResultView_updatePage_rdh
|
// --------------------------------------------------------------------------------------------
private void updatePage() {
// retrieve page
final int v13 = (page == LAST_PAGE) ? pageCount : page;
final List<RowData> rows;
try {
rows = materializedResult.retrievePage(v13);
} catch (SqlExecutionException e) {close(e);
return;
}// convert page
final List<String[]> stringRows =
rows.stream().map(resultDescriptor.getRowDataStringConverter()::convert).collect(Collectors.toList());
// update results
if (previousResultsPage == v13) {
// only use the previous results if the current page number has not changed
// this allows for updated results when the key space remains constant
previousResults = results;
} else {
previousResults = null;
previousResultsPage = v13;
}
results = stringRows;
// check if selected row is still valid
if (selectedRow != NO_ROW_SELECTED) {
if (selectedRow >= results.size()) {
selectedRow = NO_ROW_SELECTED;
}
}
// reset view
resetAllParts();
}
| 3.26 |
flink_StatsSummarySnapshot_getAverage_rdh
|
/**
* Calculates the average over all seen values.
*
* @return Average over all seen values.
*/public long getAverage() {if (count == 0) {
return 0;
} else {
return sum / count;
}
}
| 3.26 |
flink_StatsSummarySnapshot_getQuantile_rdh
|
/**
* Returns the value for the given quantile based on the represented histogram statistics or
* {@link Double#NaN} if the histogram was not built.
*
* @param quantile
* Quantile to calculate the value for
* @return Value for the given quantile
*/
public double getQuantile(double quantile) {
return histogram == null ? Double.NaN : histogram.getQuantile(quantile);
}
| 3.26 |
flink_StatsSummarySnapshot_getMinimum_rdh
|
/**
* Returns the minimum seen value.
*
* @return The current minimum value.
*/
public long getMinimum() {
return min;
}
| 3.26 |
flink_ArrowFieldWriter_reset_rdh
|
/**
* Resets the state of the writer to write the next batch of fields.
*/
public void reset() {
valueVector.reset();
count =
0;
}
| 3.26 |
flink_ArrowFieldWriter_getCount_rdh
|
/**
* Returns the current count of elements written.
*/
public int getCount() {return count;
}
| 3.26 |
flink_ArrowFieldWriter_write_rdh
|
/**
* Writes the specified ordinal of the specified row.
*/
public void write(IN row, int ordinal) {
doWrite(row, ordinal);
count += 1;
}
| 3.26 |
flink_ArrowFieldWriter_finish_rdh
|
/**
* Finishes the writing of the current row batch.
*/
public void finish() {
valueVector.setValueCount(count);
}
| 3.26 |
flink_EventId_snapshotConfiguration_rdh
|
// -----------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<EventId>
snapshotConfiguration() {
return new EventIdSerializerSnapshot();
}
| 3.26 |
flink_PartitionTable_startTrackingPartitions_rdh
|
/**
* Starts the tracking of the given partition for the given key.
*/
public void startTrackingPartitions(K key, Collection<ResultPartitionID> newPartitionIds) {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(newPartitionIds);
if (newPartitionIds.isEmpty()) {
return;
}
trackedPartitionsPerKey.compute(key, (ignored, partitionIds) -> {
if (partitionIds == null) {
partitionIds = CollectionUtil.newHashSetWithExpectedSize(8);
}
partitionIds.addAll(newPartitionIds);
return partitionIds;
});
}
| 3.26 |
flink_PartitionTable_hasTrackedPartitions_rdh
|
/**
* Returns whether any partitions are being tracked for the given key.
*/
public boolean hasTrackedPartitions(K key) {
return trackedPartitionsPerKey.containsKey(key);
}
| 3.26 |
flink_PartitionTable_stopTrackingPartitions_rdh
|
/**
* Stops the tracking of the given set of partitions for the given key.
*/
public void stopTrackingPartitions(K key, Collection<ResultPartitionID> partitionIds) {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(partitionIds);
// If the key is unknown we do not fail here, in line with
// ShuffleEnvironment#releaseFinishedPartitions
trackedPartitionsPerKey.computeIfPresent(key, (ignored, resultPartitionIDS) -> {
resultPartitionIDS.removeAll(partitionIds);
return resultPartitionIDS.isEmpty() ? null : resultPartitionIDS;
});
}
| 3.26 |
flink_StreamingJoinOperator_processElement_rdh
|
/**
* Process an input element and output incremental joined records, retraction messages will be
* sent in some scenarios.
*
* <p>Following is the pseudo code to describe the core logic of this method. The logic of this
* method is too complex, so we provide the pseudo code to help understand the logic. We should
* keep sync the following pseudo code with the real logic of the method.
*
* <p>Note: "+I" represents "INSERT", "-D" represents "DELETE", "+U" represents "UPDATE_AFTER",
* "-U" represents "UPDATE_BEFORE". We forward input RowKind if it is inner join, otherwise, we
* always send insert and delete for simplification. We can optimize this to send -U & +U
* instead of D & I in the future (see FLINK-17337). They are equivalent in this join case. It
* may need some refactoring if we want to send -U & +U, so we still keep -D & +I for now for
* simplification. See {@code FlinkChangelogModeInferenceProgram.SatisfyModifyKindSetTraitVisitor}.
*
* <pre>
* if input record is accumulate
* | if input side is outer
* | | if there is no matched rows on the other side, send +I[record+null], state.add(record, 0)
* | | if there are matched rows on the other side
* | | | if other side is outer
* | | | | if the matched num in the matched rows == 0, send -D[null+other]
* | | | | if the matched num in the matched rows > 0, skip
* | | | | otherState.update(other, old + 1)
* | | | endif
* | | | send +I[record+other]s, state.add(record, other.size)
* | | endif
* | endif
* | if input side not outer
* | | state.add(record)
* | | if there is no matched rows on the other side, skip
* | | if there are matched rows on the other side
* | | | if other side is outer
* | | | | if the matched num in the matched rows == 0, send -D[null+other]
* | | | | if the matched num in the matched rows > 0, skip
* | | | | otherState.update(other, old + 1)
* | | | | send +I[record+other]s
* | | | else
* | | | | send +I/+U[record+other]s (using input RowKind)
* | | | endif
* | | endif
* | endif
* endif
*
* if input record is retract
* | state.retract(record)
* | if there is no matched rows on the other side
* | | if input side is outer, send -D[record+null]
* | endif
* | if there are matched rows on the other side, send -D[record+other]s if outer, send -D/-U[record+other]s if inner.
* | | if other side is outer
* | | | if the matched num in the matched rows == 0, this should never happen!
* | | | if the matched num in the matched rows == 1, send +I[null+other]
* | | | if the matched num in the matched rows > 1, skip
* | | | otherState.update(other, old - 1)
* | | endif
* | endif
* endif
* </pre>
*
* @param input
* the input element
* @param inputSideStateView
* state of input side
* @param otherSideStateView
* state of other side
* @param inputIsLeft
* whether input side is left side
*/
private void processElement(RowData input, JoinRecordStateView inputSideStateView, JoinRecordStateView otherSideStateView, boolean inputIsLeft) throws Exception {
boolean inputIsOuter = (inputIsLeft) ? leftIsOuter : rightIsOuter;
boolean otherIsOuter = (inputIsLeft) ? rightIsOuter : leftIsOuter;
boolean isAccumulateMsg = RowDataUtil.isAccumulateMsg(input);
RowKind inputRowKind = input.getRowKind();
input.setRowKind(RowKind.INSERT);// erase RowKind for later state updating
AssociatedRecords associatedRecords = AssociatedRecords.of(input, inputIsLeft, otherSideStateView, joinCondition);
if (isAccumulateMsg) {
// record is accumulate
if (inputIsOuter) {
// input side is outer
OuterJoinRecordStateView inputSideOuterStateView = ((OuterJoinRecordStateView) (inputSideStateView));
if (associatedRecords.isEmpty()) {
// there is no matched rows on the other side
// send +I[record+null]
outRow.setRowKind(RowKind.INSERT);
outputNullPadding(input, inputIsLeft);
// state.add(record, 0)
inputSideOuterStateView.addRecord(input, 0);
} else {
// there are matched rows on the other side
if (otherIsOuter) {
// other side is outer
OuterJoinRecordStateView otherSideOuterStateView = ((OuterJoinRecordStateView) (otherSideStateView));
for (OuterRecord outerRecord : associatedRecords.getOuterRecords()) {
RowData other = outerRecord.record;
// if the matched num in the matched rows == 0
if (outerRecord.numOfAssociations == 0) {
// send -D[null+other]
outRow.setRowKind(RowKind.DELETE);
outputNullPadding(other, !inputIsLeft);
}// ignore matched number > 0
// otherState.update(other, old + 1)
otherSideOuterStateView.updateNumOfAssociations(other, outerRecord.numOfAssociations + 1);
}
}
// send +I[record+other]s
outRow.setRowKind(RowKind.INSERT);
for (RowData other : associatedRecords.getRecords()) {
output(input, other, inputIsLeft);
}
// state.add(record, other.size)
inputSideOuterStateView.addRecord(input, associatedRecords.size());
}} else {
// input side not outer
// state.add(record)
inputSideStateView.addRecord(input);
if (!associatedRecords.isEmpty()) {
// if there are matched rows on the other side
if (otherIsOuter) {
// if other side is outer
OuterJoinRecordStateView otherSideOuterStateView = ((OuterJoinRecordStateView) (otherSideStateView));
for (OuterRecord outerRecord : associatedRecords.getOuterRecords()) {
if
(outerRecord.numOfAssociations == 0) {
// if the matched num in the matched rows == 0
// send -D[null+other]
outRow.setRowKind(RowKind.DELETE);
outputNullPadding(outerRecord.record, !inputIsLeft);
}
// otherState.update(other, old + 1)
otherSideOuterStateView.updateNumOfAssociations(outerRecord.record, outerRecord.numOfAssociations + 1);
}
// send +I[record+other]s
outRow.setRowKind(RowKind.INSERT);
}
else {
// send +I/+U[record+other]s (using input RowKind)
outRow.setRowKind(inputRowKind);
}
for (RowData other : associatedRecords.getRecords()) {
output(input, other, inputIsLeft);
}
}
// skip when there is no matched rows on the other side
}
} else {
// input record is retract
// state.retract(record)
inputSideStateView.retractRecord(input);if (associatedRecords.isEmpty())
{// there is no matched rows on the other side
if (inputIsOuter) {
// input side is outer
// send -D[record+null]
outRow.setRowKind(RowKind.DELETE);
outputNullPadding(input, inputIsLeft);
}
// nothing to do when input side is not outer
}
else {
// there are matched rows on the other side
if (inputIsOuter) {
// send -D[record+other]s
outRow.setRowKind(RowKind.DELETE);
} else {
// send -D/-U[record+other]s (using input RowKind)
outRow.setRowKind(inputRowKind);
}
for (RowData other : associatedRecords.getRecords()) {
output(input, other, inputIsLeft);
}
// if other side is outer
if (otherIsOuter) {
OuterJoinRecordStateView otherSideOuterStateView = ((OuterJoinRecordStateView) (otherSideStateView));
for (OuterRecord outerRecord : associatedRecords.getOuterRecords()) {
if (outerRecord.numOfAssociations == 1) {
// send +I[null+other]
outRow.setRowKind(RowKind.INSERT);
outputNullPadding(outerRecord.record, !inputIsLeft);
}// nothing else to do when number of associations > 1
// otherState.update(other, old - 1)
otherSideOuterStateView.updateNumOfAssociations(outerRecord.record, outerRecord.numOfAssociations - 1);
}
}
}
}
}
| 3.26 |
flink_StreamingJoinOperator_output_rdh
|
// -------------------------------------------------------------------------------------
private void output(RowData inputRow, RowData otherRow, boolean inputIsLeft) {
if (inputIsLeft) {
outRow.replace(inputRow, otherRow);
} else
{
outRow.replace(otherRow, inputRow);
}collector.collect(outRow);
}
| 3.26 |
flink_OutputFormatProvider_of_rdh
|
/**
* Helper method for creating a static provider with a provided sink parallelism.
*/
static OutputFormatProvider of(OutputFormat<RowData> outputFormat, Integer sinkParallelism)
{
return new OutputFormatProvider() {
@Override
public OutputFormat<RowData> createOutputFormat() {
return outputFormat;
}
@Override
public Optional<Integer> getParallelism() {
return Optional.ofNullable(sinkParallelism);
}
};
}
| 3.26 |
flink_LinkedOptionalMap_unwrapOptionals_rdh
|
/**
* Assuming all the entries of this map are present (keys and values) this method would return a
* map with these key and values, stripped from their Optional wrappers. NOTE: please note that
* if any of the key or values are absent this method would throw an {@link IllegalStateException}.
*/
public LinkedHashMap<K, V> unwrapOptionals() {
final LinkedHashMap<K, V> unwrapped = CollectionUtil.newLinkedHashMapWithExpectedSize(underlyingMap.size()); for (Entry<String, KeyValue<K, V>> entry : underlyingMap.entrySet()) {
String namedKey =
entry.getKey();
KeyValue<K, V> kv = entry.getValue();
if (kv.key == null) {throw new IllegalStateException(("Missing key '" + namedKey) + "'");
}
if (kv.value == null) {
throw new IllegalStateException(("Missing value for the key '" + namedKey) + "'");
}
unwrapped.put(kv.key, kv.value);
}
return unwrapped;
}
| 3.26 |
flink_LinkedOptionalMap_isOrderedSubset_rdh
|
/**
* Returns {@code true} if keyNames present at @left, appearing in prefix order at @right.
*/
public boolean isOrderedSubset() {
return isOrderedSubset;
}
| 3.26 |
flink_LinkedOptionalMap_absentKeysOrValues_rdh
|
/**
* Returns the key names of any keys or values that are absent.
*/
public Set<String> absentKeysOrValues()
{
return underlyingMap.entrySet().stream().filter(LinkedOptionalMap::keyOrValueIsAbsent).map(Entry::getKey).collect(Collectors.toCollection(LinkedHashSet::new));
}
| 3.26 |
flink_LinkedOptionalMap_keyNames_rdh
|
/**
* Returns the key names added to this map.
*/
public Set<String> keyNames() {
return underlyingMap.keySet();
}
| 3.26 |
flink_LinkedOptionalMap_mergeRightIntoLeft_rdh
|
/**
* Tries to merges the keys and the values of @right into @left.
*/
public static <K, V> MergeResult<K, V> mergeRightIntoLeft(LinkedOptionalMap<K, V> left, LinkedOptionalMap<K, V> right) {
LinkedOptionalMap<K, V> merged = new LinkedOptionalMap<>(left);
merged.putAll(right);
return new MergeResult<>(merged, isLeftPrefixOfRight(left, right));
}
| 3.26 |
flink_LinkedOptionalMap_size_rdh
|
// --------------------------------------------------------------------------------------------------------
// API
// --------------------------------------------------------------------------------------------------------
public int size() {
return underlyingMap.size();
}
| 3.26 |
flink_LinkedOptionalMap_hasAbsentKeysOrValues_rdh
|
/**
* Checks whether there are entries with absent keys or values.
*/
public boolean hasAbsentKeysOrValues() {
for (Entry<String, KeyValue<K,
V>> entry : underlyingMap.entrySet()) {
if (keyOrValueIsAbsent(entry)) {
return true;
}
}
return false;
}
| 3.26 |
flink_LogicalTableScan_create_rdh
|
// END FLINK MODIFICATION
/**
* Creates a LogicalTableScan.
*
* @param cluster
* Cluster
* @param relOptTable
* Table
* @param hints
* The hints
*/
public static LogicalTableScan create(RelOptCluster cluster, final RelOptTable relOptTable, List<RelHint> hints) {
final Table table = relOptTable.unwrap(Table.class);
final RelTraitSet traitSet = cluster.traitSetOf(Convention.NONE).replaceIfs(RelCollationTraitDef.INSTANCE, () -> {
if (table != null) {
return table.getStatistic().getCollations();
}
return ImmutableList.of();
});
return new LogicalTableScan(cluster, traitSet, hints,
relOptTable);}
| 3.26 |
flink_LogicalTableScan_explainTerms_rdh
|
// BEGIN FLINK MODIFICATION
// {@link #explainTerms} method should consider hints due to CALCITE-4581.
// This file should be remove once CALCITE-4581 is fixed.
@Override
public RelWriter explainTerms(RelWriter
pw) {
return super.explainTerms(pw).itemIf("hints", getHints(), !getHints().isEmpty());
}
| 3.26 |
flink_ChannelReaderInputView_close_rdh
|
/**
* Closes this InputView, closing the underlying reader and returning all memory segments.
*
* @return A list containing all memory segments originally supplied to this view.
* @throws IOException
* Thrown, if the underlying reader could not be properly closed.
*/
@Override
public List<MemorySegment> close() throws IOException {
if (this.closed) {
throw new IllegalStateException("Already closed.");
}
this.closed = true;
// re-collect all memory segments
ArrayList<MemorySegment> list = this.freeMem;
final MemorySegment current = getCurrentSegment();
if (current != null) {
list.add(current);
}
clear();
// close the writer and gather all segments
final LinkedBlockingQueue<MemorySegment> queue = this.reader.getReturnQueue();
this.reader.close();while (list.size() < this.numSegments) {
final MemorySegment m = queue.poll();
if
(m == null) {
// we get null if the queue is empty. that should not be the case if the reader was
// properly closed.
throw new RuntimeException("Bug in ChannelReaderInputView: MemorySegments lost.");
}
list.add(m);
}
return list;
}
| 3.26 |
flink_ChannelReaderInputView_nextSegment_rdh
|
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Gets the next segment from the asynchronous block reader. If more requests are to be issued,
* the method first sends a new request with the current memory segment. If no more requests are
* pending, the method adds the segment to the readers return queue, which thereby effectively
* collects all memory segments. Secondly, the method fetches the next non-consumed segment
* returned by the reader. If no further segments are available, this method thrown an {@link EOFException}.
*
* @param current
* The memory segment used for the next request.
* @return The memory segment to read from next.
* @throws EOFException
* Thrown, if no further segments are available.
* @throws IOException
* Thrown, if an I/O error occurred while reading
* @see AbstractPagedInputView#nextSegment(org.apache.flink.core.memory.MemorySegment)
*/
@Override
protected MemorySegment nextSegment(MemorySegment current) throws IOException {
// check if we are at our end
if (this.inLastBlock) {
throw new EOFException();
}
// send a request first. if we have only a single segment, this same segment will be the one
// obtained in
// the next lines
if (current != null) {
sendReadRequest(current);
}
// get the next segment
final MemorySegment seg
= this.reader.getNextReturnedBlock();
// check the header
if (seg.getShort(0) != ChannelWriterOutputView.HEADER_MAGIC_NUMBER) {
throw new IOException("The current block does not belong to a ChannelWriterOutputView / " + "ChannelReaderInputView: Wrong magic number.");
}
if ((seg.getShort(ChannelWriterOutputView.HEADER_FLAGS_OFFSET) &
ChannelWriterOutputView.FLAG_LAST_BLOCK) != 0) {
// last block
this.numRequestsRemaining = 0;
this.inLastBlock = true;
}
return seg;
}
| 3.26 |
flink_ChannelReaderInputView_sendReadRequest_rdh
|
/**
* Sends a new read requests, if further requests remain. Otherwise, this method adds the
* segment directly to the readers return queue.
*
* @param seg
* The segment to use for the read request.
* @throws IOException
* Thrown, if the reader is in error.
*/
protected void sendReadRequest(MemorySegment seg) throws IOException {
if (this.numRequestsRemaining != 0) {
this.reader.readBlock(seg);
if (this.numRequestsRemaining != (-1)) {
this.numRequestsRemaining--;
}
} else {
// directly add it to the end of the return queue
this.freeMem.add(seg);
}}
| 3.26 |
flink_SpecificInputTypeStrategies_windowTimeIndicator_rdh
|
/**
* See {@link WindowTimeIndictorInputTypeStrategy}.
*/
public static InputTypeStrategy windowTimeIndicator() {
return new WindowTimeIndictorInputTypeStrategy(null);
}
| 3.26 |
flink_LocalInputPreferredSlotSharingStrategy_getExecutionVertices_rdh
|
/**
* The vertices are topologically sorted since {@link DefaultExecutionTopology#getVertices}
* are topologically sorted.
*/
private LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> getExecutionVertices() {
final LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> vertices = new LinkedHashMap<>();
for (SchedulingExecutionVertex executionVertex : topology.getVertices()) {
final List<SchedulingExecutionVertex> executionVertexGroup = vertices.computeIfAbsent(executionVertex.getId().getJobVertexId(), k -> new ArrayList<>());
executionVertexGroup.add(executionVertex);}
return vertices;
}
| 3.26 |
flink_LocalInputPreferredSlotSharingStrategy_build_rdh
|
/**
* Build ExecutionSlotSharingGroups for all vertices in the topology. The
* ExecutionSlotSharingGroup of a vertex is determined in order below:
*
* <p>1. try finding an existing group of the corresponding co-location constraint.
*
* <p>2. try finding an available group of its producer vertex if the producer is in the
* same slot sharing group.
*
* <p>3. try finding any available group.
*
* <p>4. create a new group.
*/
private Map<ExecutionVertexID, ExecutionSlotSharingGroup> build() {
final LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> allVertices =
getExecutionVertices();
// loop on job vertices so that an execution vertex will not be added into a group
// if that group better fits another execution vertex
for (List<SchedulingExecutionVertex>
executionVertices : allVertices.values()) {
final List<SchedulingExecutionVertex> remaining = tryFindOptimalAvailableExecutionSlotSharingGroupFor(executionVertices);
findAvailableOrCreateNewExecutionSlotSharingGroupFor(remaining);
updateConstraintToExecutionSlotSharingGroupMap(executionVertices);
}
return executionSlotSharingGroupMap;
}
| 3.26 |
flink_CompactCoordinator_coordinate_rdh
|
/**
* Do stable compaction coordination.
*/
private void coordinate(long checkpointId, Map<String, List<Path>> partFiles) {
Function<Path, Long> sizeFunc = path -> {
try {
return fileSystem.getFileStatus(path).getLen();
} catch
(IOException e) {
throw new UncheckedIOException(e);}
};
// We need a stable compaction algorithm.
Map<String, List<List<Path>>> compactUnits = new HashMap<>();
partFiles.forEach((p, files) -> {
// Sort files for stable compaction algorithm.
files.sort(Comparator.comparing(Path::getPath));
compactUnits.put(p, BinPacking.pack(files, sizeFunc, targetFileSize));
});
// Now, send this stable pack list to compactor.
// NOTE, use broadcast emitting (Because it needs to emit checkpoint barrier),
// operators will pick its units by unit id and task id.
int unitId = 0;
for (Map.Entry<String, List<List<Path>>> unitsEntry : compactUnits.entrySet())
{String partition = unitsEntry.getKey();
for (List<Path> unit : unitsEntry.getValue()) {
output.collect(new StreamRecord<>(new CompactionUnit(unitId, partition, unit)));
unitId++;
}
}
LOG.debug("Coordinate checkpoint-{}, compaction units are: {}", checkpointId, compactUnits);
// Emit checkpoint barrier
output.collect(new StreamRecord<>(new EndCompaction(checkpointId)));
}
| 3.26 |
flink_SplitsAssignment_assignment_rdh
|
/**
*
* @return A mapping from subtask ID to their split assignment.
*/
public Map<Integer, List<SplitT>> assignment() {
return assignment;
}
| 3.26 |
flink_StateMetaInfoSnapshotReadersWriters_getWriter_rdh
|
/**
* Returns the writer for {@link StateMetaInfoSnapshot}.
*/
@Nonnull
public static StateMetaInfoWriter getWriter() {
return CurrentWriterImpl.INSTANCE;
}
| 3.26 |
flink_StateMetaInfoSnapshotReadersWriters_getReader_rdh
|
/**
* Returns a reader for {@link StateMetaInfoSnapshot} with the requested state type and version
* number.
*
* @param readVersion
* the format version to read.
* @return the requested reader.
*/
@Nonnull
public static StateMetaInfoReader getReader(int readVersion) {
checkArgument(readVersion <= CURRENT_STATE_META_INFO_SNAPSHOT_VERSION, "Unsupported read version for state meta info [%s]", readVersion);
if (readVersion < 6) {
// versions before 5 still had different state meta info formats between keyed /
// operator state
throw new UnsupportedOperationException(String.format("No longer supported version [%d]. Please upgrade first to Flink 1.16. ", readVersion));
}
return CurrentReaderImpl.INSTANCE;
}
| 3.26 |
flink_LogicalTypeCasts_supportsAvoidingCast_rdh
|
/**
* Returns whether the source type can be safely interpreted as the target type. This allows
* avoiding casts by ignoring some logical properties. This is basically a relaxed {@link LogicalType#equals(Object)}.
*
* <p>In particular this means:
*
* <p>Atomic, non-string types (INT, BOOLEAN, ...) and user-defined structured types must be
* fully equal (i.e. {@link LogicalType#equals(Object)}). However, a NOT NULL type can be stored
* in NULL type but not vice versa.
*
* <p>Atomic, string types must be contained in the target type (e.g. CHAR(2) is contained in
* VARCHAR(3), but VARCHAR(2) is not contained in CHAR(3)). Same for binary strings.
*
* <p>Constructed types (ARRAY, ROW, MAP, etc.) and user-defined distinct type must be of same
* kind but ignore field names and other logical attributes. Structured and row kinds are
* compatible. However, all the children types ({@link LogicalType#getChildren()}) must be
* compatible.
*/
public static boolean supportsAvoidingCast(LogicalType sourceType, LogicalType targetType) {
final CastAvoidanceChecker checker = new CastAvoidanceChecker(sourceType);
return targetType.accept(checker);
}
/**
* See {@link #supportsAvoidingCast(LogicalType, LogicalType)}
| 3.26 |
flink_LogicalTypeCasts_supportsReinterpretCast_rdh
|
/**
* Returns whether the source type can be reinterpreted as the target type.
*
* <p>Reinterpret casts correspond to the SQL reinterpret_cast and represent the logic behind a
* {@code REINTERPRET_CAST(sourceType AS targetType)} operation.
*/
public static boolean supportsReinterpretCast(LogicalType sourceType, LogicalType targetType) {
if (sourceType.getTypeRoot() == targetType.getTypeRoot()) {
return true;
}
switch (sourceType.getTypeRoot()) {
case INTEGER :switch (targetType.getTypeRoot()) {
case
DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
return true;
default : return false;
}
case BIGINT :
switch (targetType.getTypeRoot()) {
case TIMESTAMP_WITHOUT_TIME_ZONE :
case INTERVAL_DAY_TIME :
return true;
default :
return false;
}
case DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
switch (targetType.getTypeRoot()) {
case INTEGER :
case BIGINT :
return true;
default :
return false;
}
case TIMESTAMP_WITHOUT_TIME_ZONE :
case INTERVAL_DAY_TIME :
return targetType.getTypeRoot() == BIGINT;
default :
return false;
}
}
| 3.26 |
flink_LogicalTypeCasts_supportsExplicitCast_rdh
|
/**
* Returns whether the source type can be casted to the target type.
*
* <p>Explicit casts correspond to the SQL cast specification and represent the logic behind a
* {@code CAST(sourceType AS targetType)} operation. For example, it allows for converting most
* types of the {@link LogicalTypeFamily#PREDEFINED} family to types of the {@link LogicalTypeFamily#CHARACTER_STRING} family.
*/
public static boolean supportsExplicitCast(LogicalType sourceType, LogicalType targetType) {
return supportsCasting(sourceType, targetType, true);
}
| 3.26 |
flink_LogicalTypeCasts_supportsCasting_rdh
|
// --------------------------------------------------------------------------------------------
private static boolean supportsCasting(LogicalType sourceType,
LogicalType targetType, boolean allowExplicit) {
// a NOT NULL type cannot store a NULL type
// but it might be useful to cast explicitly with knowledge about the data
if ((sourceType.isNullable() && (!targetType.isNullable())) && (!allowExplicit)) {
return false;
}
// ignore nullability during compare
if (sourceType.copy(true).equals(targetType.copy(true))) {
return true;
}
final LogicalTypeRoot sourceRoot = sourceType.getTypeRoot();
final LogicalTypeRoot targetRoot = targetType.getTypeRoot();
if (sourceRoot ==
NULL) {
// null can be cast to an arbitrary type
return true;
} else if ((sourceRoot == DISTINCT_TYPE) && (targetRoot == DISTINCT_TYPE)) {
// the two distinct types are not equal (from initial invariant), casting is not
// possible
return false;
} else if (sourceRoot == DISTINCT_TYPE) {
return supportsCasting(((DistinctType) (sourceType)).getSourceType(), targetType, allowExplicit);
} else if (targetRoot == DISTINCT_TYPE) {
return supportsCasting(sourceType, ((DistinctType) (targetType)).getSourceType(), allowExplicit);
} else if (sourceType.is(INTERVAL) && targetType.is(EXACT_NUMERIC)) {
// cast between interval and exact numeric is only supported if interval has a single
// field
return isSingleFieldInterval(sourceType);
} else if (sourceType.is(EXACT_NUMERIC) && targetType.is(INTERVAL)) {
// cast between interval and exact numeric is only supported if interval has a single
// field
return
isSingleFieldInterval(targetType);
}
else
if ((sourceType.is(CONSTRUCTED) || sourceType.is(STRUCTURED_TYPE)) && (targetType.is(CONSTRUCTED) || targetType.is(STRUCTURED_TYPE))) {
if (sourceType.is(CONSTRUCTED) || targetType.is(CONSTRUCTED)) {
return m1(sourceType, targetType, allowExplicit);
}
return m0(sourceType, targetType, (s, t) -> supportsCasting(s, t, allowExplicit));
} else if ((((sourceRoot == RAW) && (!targetType.is(BINARY_STRING))) && (!targetType.is(CHARACTER_STRING))) || (targetRoot == RAW)) {
// the two raw types are not equal (from initial invariant), casting is not possible
return false;
} else if ((sourceRoot == SYMBOL) || (targetRoot == SYMBOL)) {
// the two symbol types are not equal (from initial invariant), casting is not possible
return false;
}
if (implicitCastingRules.get(targetRoot).contains(sourceRoot)) {
return true;
}
if (allowExplicit) {
return explicitCastingRules.get(targetRoot).contains(sourceRoot);
}
return false;}
| 3.26 |
flink_PlanReference_fromJsonString_rdh
|
/**
* Create a reference starting from a JSON string.
*/
public static PlanReference fromJsonString(String jsonString) {
Objects.requireNonNull(jsonString, "Json string cannot be null");
return new ContentPlanReference(jsonString);
}
| 3.26 |
flink_PlanReference_fromResource_rdh
|
/**
* Create a reference from a file in the classpath.
*/
public static PlanReference fromResource(ClassLoader classLoader, String resourcePath) { Objects.requireNonNull(classLoader, "ClassLoader cannot be null");
Objects.requireNonNull(resourcePath, "Resource path cannot be null"); return new ResourcePlanReference(classLoader, resourcePath);
}
| 3.26 |
flink_PlanReference_fromFile_rdh
|
/**
* Create a reference starting from a file path.
*/
public static PlanReference fromFile(File file) {
Objects.requireNonNull(file, "File cannot be null");
return new FilePlanReference(file);
}
| 3.26 |
flink_OggJsonFormatFactory_validateDecodingFormatOptions_rdh
|
/**
* Validator for ogg decoding format.
*/ private static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions);
}
| 3.26 |
flink_OggJsonFormatFactory_validateEncodingFormatOptions_rdh
|
/**
* Validator for ogg encoding format.
*/
private static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);
}
| 3.26 |
flink_RowTimeMiniBatchAssginerOperator_getMiniBatchStart_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Method to get the mini-batch start for a watermark.
*/
private static long getMiniBatchStart(long watermark, long interval) {
return watermark - ((watermark +
interval) % interval);
}
| 3.26 |
flink_ResourceManagerServiceImpl_startNewLeaderResourceManager_rdh
|
// ------------------------------------------------------------------------
// Internal
// ------------------------------------------------------------------------
@GuardedBy("lock")
private void startNewLeaderResourceManager(UUID newLeaderSessionID) throws Exception {
stopLeaderResourceManager();
this.leaderSessionID = newLeaderSessionID;
this.leaderResourceManager = resourceManagerFactory.createResourceManager(f0, newLeaderSessionID);
final ResourceManager<?> newLeaderResourceManager
= this.leaderResourceManager;
previousResourceManagerTerminationFuture.thenComposeAsync(ignore -> {
synchronized(lock) {
return startResourceManagerIfIsLeader(newLeaderResourceManager);
}
}, handleLeaderEventExecutor).thenAcceptAsync(isStillLeader -> {
if (isStillLeader) {
leaderElection.confirmLeadership(newLeaderSessionID, newLeaderResourceManager.getAddress());
} }, ioExecutor);
}
| 3.26 |
flink_ResourceManagerServiceImpl_startResourceManagerIfIsLeader_rdh
|
/**
* Returns a future that completes as {@code true} if the resource manager is still leader and
* started, and {@code false} if it's no longer leader.
*/
@GuardedBy("lock")
private CompletableFuture<Boolean> startResourceManagerIfIsLeader(ResourceManager<?> resourceManager) {
if
(isLeader(resourceManager)) {
resourceManager.start(); forwardTerminationFuture(resourceManager);
return resourceManager.getStartedFuture().thenApply(ignore -> true);
} else {
return CompletableFuture.completedFuture(false);
}
}
| 3.26 |
flink_ResourceManagerServiceImpl_start_rdh
|
// ------------------------------------------------------------------------
// ResourceManagerService
// ------------------------------------------------------------------------
@Override
public void start() throws Exception {
synchronized(lock) {
if (running) {
LOG.debug("Resource manager service has already started.");
return;
}
running = true;
}
LOG.info("Starting resource manager service.");
leaderElection.startLeaderElection(this);
}
| 3.26 |
flink_ResourceManagerServiceImpl_grantLeadership_rdh
|
// ------------------------------------------------------------------------
// LeaderContender
// ------------------------------------------------------------------------
@Override
public void grantLeadership(UUID newLeaderSessionID) {
handleLeaderEventExecutor.execute(() -> {
synchronized(lock) {
if (!running) {
LOG.info("Resource manager service is not running. Ignore granting leadership with session ID {}.", newLeaderSessionID);
return;
}
LOG.info("Resource manager service is granted leadership with session id {}.", newLeaderSessionID);
try {
startNewLeaderResourceManager(newLeaderSessionID);
} catch (Throwable t)
{
fatalErrorHandler.onFatalError(new FlinkException("Cannot start resource manager.", t));
}
}
});
}
| 3.26 |
flink_PriorityQueueSetFactory_m0_rdh
|
/**
* Creates a {@link KeyGroupedInternalPriorityQueue}.
*
* @param stateName
* unique name for associated with this queue.
* @param byteOrderedElementSerializer
* a serializer that with a format that is lexicographically
* ordered in alignment with elementPriorityComparator.
* @param allowFutureMetadataUpdates
* whether allow metadata to update in the future or not.
* @param <T>
* type of the stored elements.
* @return the queue with the specified unique name.
*/
default <T extends HeapPriorityQueueElement & PriorityComparable<? super T> &
Keyed<?>> KeyGroupedInternalPriorityQueue<T> m0(@Nonnull
String stateName, @Nonnull
TypeSerializer<T> byteOrderedElementSerializer, boolean allowFutureMetadataUpdates) {
if (allowFutureMetadataUpdates) {
throw new UnsupportedOperationException(this.getClass().getName() + " doesn't support to allow to update future metadata.");
} else {
return create(stateName, byteOrderedElementSerializer);
}
}
| 3.26 |
flink_ExecutorNotifier_notifyReadyAsync_rdh
|
/**
* Call the given callable once. Notify the {@link #executorToNotify} to execute the handler.
*
* <p>Note that when this method is invoked multiple times, it is possible that multiple
* callables are executed concurrently, so do the handlers. For example, assuming both the
* workerExecutor and executorToNotify are single threaded. The following code may still throw a
* <code>ConcurrentModificationException</code>.
*
* <pre>{@code final List<Integer> list = new ArrayList<>();
*
* // The callable adds an integer 1 to the list, while it works at the first glance,
* // A ConcurrentModificationException may be thrown because the caller and
* // handler may modify the list at the same time.
* notifier.notifyReadyAsync(
* () -> list.add(1),
* (ignoredValue, ignoredThrowable) -> list.add(2));}</pre>
*
* <p>Instead, the above logic should be implemented in as:
*
* <pre>{@code // Modify the state in the handler.
* notifier.notifyReadyAsync(() -> 1, (v, ignoredThrowable) -> {
* list.add(v));
* list.add(2);
* });}</pre>
*
* @param callable
* the callable to execute before notifying the executor to notify.
* @param handler
* the handler that handles the result from the callable.
* @param initialDelayMs
* the initial delay in ms before invoking the given callable.
* @param periodMs
* the interval in ms to invoke the callable.
*/public <T> void notifyReadyAsync(Callable<T> callable, BiConsumer<T, Throwable> handler, long initialDelayMs, long
periodMs) {
workerExecutor.scheduleAtFixedRate(() -> {
try {
T result = callable.call();
executorToNotify.execute(() -> handler.accept(result, null));
} catch (Throwable t) {
executorToNotify.execute(() -> handler.accept(null, t));
}
}, initialDelayMs, periodMs, TimeUnit.MILLISECONDS);
}
| 3.26 |
flink_RequestedLocalProperties_reset_rdh
|
/**
* This method resets the local properties to a state where no properties are given.
*/
public void reset() {
this.ordering = null;
this.groupedFields = null;
}
| 3.26 |
flink_RequestedLocalProperties_getGroupedFields_rdh
|
/**
* Gets the grouped fields.
*
* @return The grouped fields, or <code>null</code> if nothing is grouped.
*/
public FieldSet getGroupedFields() {
return this.groupedFields;
}
| 3.26 |
flink_RequestedLocalProperties_hashCode_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
final int prime = 31;
int v10 = 1;
v10 = (prime * v10) + (this.ordering == null ? 0 : this.ordering.hashCode());
v10 = (prime * v10) + (this.groupedFields == null ? 0 : this.groupedFields.hashCode());
return v10;
}
| 3.26 |
flink_RequestedLocalProperties_getOrdering_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the key order.
*
* @return The key order, or <code>null</code> if nothing is ordered.
*/
public Ordering getOrdering() {
return ordering;
}
| 3.26 |
flink_RequestedLocalProperties_isTrivial_rdh
|
/**
* Checks, if the properties in this object are trivial, i.e. only standard values.
*/
public boolean isTrivial() {return (ordering == null) && (this.groupedFields == null);
}
| 3.26 |
flink_RequestedLocalProperties_filterBySemanticProperties_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Filters these properties by what can be preserved by the given SemanticProperties when
* propagated down to the given input.
*
* @param props
* The SemanticProperties which define which fields are preserved.
* @param input
* The index of the operator's input.
* @return The filtered RequestedLocalProperties
*/
public RequestedLocalProperties filterBySemanticProperties(SemanticProperties props, int input) {// no semantic properties, all local properties are filtered
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
if (this.ordering != null) {
Ordering newOrdering
= new Ordering();
for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int v2 = this.ordering.getInvolvedIndexes().get(i);
int sourceField = props.getForwardingSourceField(input, v2);
if (sourceField >= 0) {
newOrdering.appendOrdering(sourceField, this.ordering.getType(i), this.ordering.getOrder(i));
} else {
return null;
}
}
return new RequestedLocalProperties(newOrdering);
} else if
(this.groupedFields != null) {
FieldSet newGrouping = new FieldSet();
// check, whether the local key grouping is preserved
for (Integer targetField
: this.groupedFields) {
int sourceField = props.getForwardingSourceField(input, targetField);
if (sourceField >= 0) {
newGrouping = newGrouping.addField(sourceField);
} else {
return null;
}
}
return new RequestedLocalProperties(newGrouping);
} else {
return null;
}
}
| 3.26 |
flink_RequestedLocalProperties_setOrdering_rdh
|
/**
* Sets the order for these interesting local properties.
*
* @param ordering
* The order to set.
*/
public void setOrdering(Ordering ordering) {
this.ordering = ordering;
}
| 3.26 |
flink_RequestedLocalProperties_isMetBy_rdh
|
/**
* Checks, if this set of properties, as interesting properties, is met by the given properties.
*
* @param other
* The properties for which to check whether they meet these properties.
* @return True, if the properties are met, false otherwise.
*/public boolean isMetBy(LocalProperties other) {
if (this.ordering != null) {
// we demand an ordering
return (other.getOrdering() != null) && this.ordering.isMetBy(other.getOrdering());
} else if (this.groupedFields
!= null) {
// check if the other fields are unique
if ((other.getGroupedFields() != null) &&
other.getGroupedFields().isValidUnorderedPrefix(this.groupedFields)) {
return true;
} else
{
return other.areFieldsUnique(this.groupedFields);
}
} else {
return true;
}
}
| 3.26 |
flink_RequestedLocalProperties_m0_rdh
|
/**
* Sets the fields that are grouped in these data properties.
*
* @param groupedFields
* The fields that are grouped in these data properties.
*/
public void m0(FieldSet groupedFields) {
this.groupedFields = groupedFields;
}
| 3.26 |
flink_ExponentialDelayRestartBackoffTimeStrategy_calculateJitterBackoffMS_rdh
|
/**
* Calculate jitter offset to avoid thundering herd scenario. The offset range increases with
* the number of restarts.
*
* <p>F.e. for backoff time 8 with jitter 0.25, it generates random number in range [-2, 2].
*
* @return random value in interval [-n, n], where n represents jitter * current backoff
*/
private long calculateJitterBackoffMS() {
if (jitterFactor == 0) {
return 0;
} else {
long offset = ((long) (currentBackoffMS * jitterFactor));
return ThreadLocalRandom.current().nextLong(-offset, offset + 1);
}
}
| 3.26 |
flink_SqlConstraintEnforcement_symbol_rdh
|
/**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
}
| 3.26 |
flink_TimeIndicatorTypeInfo_createSerializer_rdh
|
// this replaces the effective serializer by a LongSerializer
// it is a hacky but efficient solution to keep the object creation overhead low but still
// be compatible with the corresponding SqlTimestampTypeInfo
@Override
@SuppressWarnings("unchecked") public TypeSerializer<Timestamp> createSerializer(ExecutionConfig executionConfig) {
return ((TypeSerializer) (LongSerializer.INSTANCE));
}
| 3.26 |
flink_SingleElementIterator_set_rdh
|
/**
* Resets the element. After this call, the iterator has one element available, which is the
* given element.
*
* @param current
* The element to make available to the iterator.
*/
public void set(E current) {
this.current = current;
this.available = true;
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.