name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_PartitionRequestListenerManager_removeExpiration_rdh | /**
* Remove the expire partition request listener and add it to the given timeoutListeners.
*
* @param now
* the timestamp
* @param timeout
* the timeout mills
* @param timeoutListeners
* the expire partition request listeners
*/
public void removeExpiration(long now, long timeout, Collection<PartitionRequestListener> timeoutListeners) {
Iterator<Map.Entry<InputChannelID, PartitionRequestListener>> iterator = f0.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<InputChannelID, PartitionRequestListener> entry = iterator.next();
PartitionRequestListener partitionRequestListener = entry.getValue();
if ((now - partitionRequestListener.getCreateTimestamp()) > timeout) {
timeoutListeners.add(partitionRequestListener);
iterator.remove();
}
}
} | 3.26 |
flink_ProcessingTimeoutTrigger_of_rdh | /**
* Creates a new {@link ProcessingTimeoutTrigger} that fires when the inner trigger is fired or
* when the timeout timer fires.
*
* <p>For example: {@code ProcessingTimeoutTrigger.of(CountTrigger.of(3), 100, false, true)},
* will create a CountTrigger with timeout of 100 millis. So, if the first record arrives at
* time {@code t}, and the second record arrives at time {@code t+50}, the trigger will fire
* when the third record arrives or when the time is {code t+100} (timeout).
*
* @param nestedTrigger
* the nested {@link Trigger}
* @param timeout
* the timeout interval
* @param resetTimerOnNewRecord
* each time a new element arrives, reset the timer and start a new
* one
* @param shouldClearOnTimeout
* whether to call {@link Trigger#clear(Window, TriggerContext)}
* when the processing-time timer fires
* @param <T>
* The type of the element.
* @param <W>
* The type of {@link Window Windows} on which this trigger can operate.
* @return {@link ProcessingTimeoutTrigger} with the above configuration.
*/
public static <T, W extends Window> ProcessingTimeoutTrigger<T, W> of(Trigger<T, W> nestedTrigger, Duration timeout, boolean resetTimerOnNewRecord, boolean shouldClearOnTimeout) {
return new ProcessingTimeoutTrigger<>(nestedTrigger, timeout.toMillis(), resetTimerOnNewRecord, shouldClearOnTimeout);
} | 3.26 |
flink_RootExceptionHistoryEntry_fromGlobalFailure_rdh | /**
* Creates a {@code RootExceptionHistoryEntry} based on the passed {@link ErrorInfo}. No
* concurrent failures will be added.
*
* @param errorInfo
* The failure information that shall be used to initialize the {@code RootExceptionHistoryEntry}.
* @return The {@code RootExceptionHistoryEntry} instance.
* @throws NullPointerException
* if {@code errorInfo} is {@code null} or the passed info does not
* contain a {@code Throwable}.
* @throws IllegalArgumentException
* if the passed {@code timestamp} is not bigger than {@code 0}.
*/
public static RootExceptionHistoryEntry fromGlobalFailure(ErrorInfo errorInfo) {
Preconditions.checkNotNull(errorInfo, "errorInfo");
return fromGlobalFailure(errorInfo.getException(), errorInfo.getTimestamp(), FailureEnricherUtils.EMPTY_FAILURE_LABELS, Collections.emptyList());
} | 3.26 |
flink_RootExceptionHistoryEntry_fromFailureHandlingResultSnapshot_rdh | /**
* Creates a {@code RootExceptionHistoryEntry} based on the passed {@link FailureHandlingResultSnapshot}.
*
* @param snapshot
* The reason for the failure.
* @return The {@code RootExceptionHistoryEntry} instance.
* @throws NullPointerException
* if {@code cause} or {@code failingTaskName} are {@code null}.
* @throws IllegalArgumentException
* if the {@code timestamp} of the passed {@code FailureHandlingResult} is not bigger than {@code 0}.
*/
public static RootExceptionHistoryEntry fromFailureHandlingResultSnapshot(FailureHandlingResultSnapshot snapshot) {
String failingTaskName = null;
TaskManagerLocation taskManagerLocation = null;
if (snapshot.getRootCauseExecution().isPresent()) {
final Execution rootCauseExecution = snapshot.getRootCauseExecution().get();
failingTaskName = rootCauseExecution.getVertexWithAttempt();
taskManagerLocation = rootCauseExecution.getAssignedResourceLocation();
}
return createRootExceptionHistoryEntry(snapshot.getRootCause(), snapshot.getTimestamp(), snapshot.getFailureLabels(), failingTaskName, taskManagerLocation, snapshot.getConcurrentlyFailedExecution());
} | 3.26 |
flink_BinaryInMemorySortBuffer_getIterator_rdh | // -------------------------------------------------------------------------
/**
* Gets an iterator over all records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
public MutableObjectIterator<BinaryRowData>
getIterator() {
return new MutableObjectIterator<BinaryRowData>() {
private final int size = size();
private int current = 0;
private int currentSegment = 0;
private int currentOffset = 0;
private MemorySegment currentIndexSegment = sortIndex.get(0);
@Override
public BinaryRowData next(BinaryRowData target) {
if (this.current <
this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer = this.currentIndexSegment.getLong(this.currentOffset);
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(target, pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
@Overridepublic BinaryRowData next() {
throw new RuntimeException("Not support!");
}
};
} | 3.26 |
flink_BinaryInMemorySortBuffer_reset_rdh | // -------------------------------------------------------------------------
// Memory Segment
// -------------------------------------------------------------------------
/**
* Resets the sort buffer back to the state where it is empty. All contained data is discarded.
*/
public void reset() {
// reset all offsets
this.numRecords = 0;
this.currentSortIndexOffset =
0;
this.currentDataBufferOffset = 0;
this.sortIndexBytes = 0;
// return all memory
returnToSegmentPool();
// grab first buffers
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.f0.reset();
} | 3.26 |
flink_BinaryInMemorySortBuffer_createBuffer_rdh | /**
* Create a memory sorter in `insert` way.
*/
public static BinaryInMemorySortBuffer createBuffer(NormalizedKeyComputer normalizedKeyComputer, AbstractRowDataSerializer<RowData> inputSerializer, BinaryRowDataSerializer serializer, RecordComparator comparator, MemorySegmentPool memoryPool) {
checkArgument(memoryPool.freePages() >= MIN_REQUIRED_BUFFERS);
int totalNumBuffers = memoryPool.freePages();ArrayList<MemorySegment> v1 = new ArrayList<>(16);
return new BinaryInMemorySortBuffer(normalizedKeyComputer, inputSerializer, serializer, comparator, v1, new SimpleCollectingOutputView(v1, memoryPool, memoryPool.pageSize()), memoryPool, totalNumBuffers);
} | 3.26 |
flink_BinaryInMemorySortBuffer_write_rdh | /**
* Writes a given record to this sort buffer. The written record will be appended and take the
* last logical position.
*
* @param record
* The record to be written.
* @return True, if the record was successfully written, false, if the sort buffer was full.
* @throws IOException
* Thrown, if an error occurred while serializing the record into the
* buffers.
*/
public boolean write(RowData record) throws IOException {
// check whether we need a new memory segment for the sort index
if (!checkNextIndexOffset()) {
return false;
}
// serialize the record into the data buffers
int skip;
try {
skip = this.inputSerializer.serializeToPages(record, this.f0);
} catch (EOFException e) {
return false;
}
final long newOffset = this.f0.getCurrentOffset();
long currOffset = currentDataBufferOffset + skip;
writeIndexAndNormalizedKey(record, currOffset);
this.currentDataBufferOffset = newOffset;
return true;
} | 3.26 |
flink_MergingSharedSlotProfileRetrieverFactory_getSlotProfile_rdh | /**
* Computes a {@link SlotProfile} of an execution slot sharing group.
*
* <p>The preferred locations of the {@link SlotProfile} is a union of the preferred
* locations of all executions sharing the slot. The input locations within the bulk are
* ignored to avoid cyclic dependencies within the region, e.g. in case of all-to-all
* pipelined connections, so that the allocations do not block each other.
*
* <p>The preferred {@link AllocationID}s of the {@link SlotProfile} are all previous {@link AllocationID}s of all executions sharing the slot.
*
* <p>The {@link SlotProfile} also refers to all reserved {@link AllocationID}s of the job.
*
* @param executionSlotSharingGroup
* executions sharing the slot.
* @param physicalSlotResourceProfile
* {@link ResourceProfile} of the slot.
* @return {@link SlotProfile} to allocate for the {@code executionSlotSharingGroup}.
*/
@Override
public SlotProfile getSlotProfile(ExecutionSlotSharingGroup executionSlotSharingGroup, ResourceProfile physicalSlotResourceProfile) {
Collection<AllocationID> priorAllocations = new HashSet<>();
Collection<TaskManagerLocation> preferredLocations = new ArrayList<>();
for (ExecutionVertexID execution : executionSlotSharingGroup.getExecutionVertexIds()) {
priorAllocationIdRetriever.apply(execution).ifPresent(priorAllocations::add);
preferredLocations.addAll(preferredLocationsRetriever.getPreferredLocations(execution, producersToIgnore));
}
return SlotProfile.priorAllocation(physicalSlotResourceProfile, physicalSlotResourceProfile, preferredLocations, priorAllocations, reservedAllocationIds);
} | 3.26 |
flink_TableFactoryService_findAllInternal_rdh | /**
* Finds a table factory of the given class, property map, and classloader.
*
* @param factoryClass
* desired factory class
* @param properties
* properties that describe the factory configuration
* @param classLoader
* classloader for service loading
* @param <T>
* factory class type
* @return the matching factory
*/
private static <T extends TableFactory> List<T> findAllInternal(Class<T> factoryClass, Map<String, String> properties, Optional<ClassLoader> classLoader) {
List<TableFactory> tableFactories = discoverFactories(classLoader);
return filter(tableFactories, factoryClass, properties);
} | 3.26 |
flink_TableFactoryService_extractWildcardPrefixes_rdh | /**
* Converts the prefix of properties with wildcards (e.g., "format.*").
*/
private static List<String> extractWildcardPrefixes(List<String> propertyKeys) {
return propertyKeys.stream().filter(p -> p.endsWith("*")).map(s -> s.substring(0, s.length() - 1)).collect(Collectors.toList());
} | 3.26 |
flink_TableFactoryService_discoverFactories_rdh | /**
* Searches for factories using Java service providers.
*
* @return all factories in the classpath
*/
private static List<TableFactory> discoverFactories(Optional<ClassLoader> classLoader) {
try {List<TableFactory> result = new LinkedList<>();
ClassLoader cl = classLoader.orElse(Thread.currentThread().getContextClassLoader());ServiceLoader.load(TableFactory.class, cl).iterator().forEachRemaining(result::add);
return result;
} catch (ServiceConfigurationError e) {
LOG.error("Could not load service provider for table factories.", e);
throw new TableException("Could not load service provider for table factories.", e);
}
} | 3.26 |
flink_TableFactoryService_normalizeSupportedProperties_rdh | /**
* Prepares the supported properties of a factory to be used for match operations.
*/
private static Tuple2<List<String>, List<String>> normalizeSupportedProperties(TableFactory factory) {
List<String> supportedProperties = factory.supportedProperties();
if (supportedProperties == null) {
throw new TableException(String.format("Supported properties of factory '%s' must not be null.", factory.getClass().getName()));
}
List<String> supportedKeys = supportedProperties.stream().map(String::toLowerCase).collect(Collectors.toList());
// extract wildcard prefixes
List<String> wildcards = extractWildcardPrefixes(supportedKeys);
return Tuple2.of(supportedKeys, wildcards);
} | 3.26 |
flink_TableFactoryService_filterByContext_rdh | /**
* Filters for factories with matching context.
*
* @return all matching factories
*/
private static <T extends TableFactory> List<T> filterByContext(Class<T> factoryClass, Map<String, String> properties, List<T> classFactories) {
List<T> matchingFactories = new ArrayList<>();
ContextBestMatched<T> bestMatched = null;
for (T factory : classFactories) {
Map<String, String> requestedContext = normalizeContext(factory);
Map<String, String> plainContext = new HashMap<>(requestedContext);
// we remove the version for now until we have the first backwards compatibility case
// with the version we can provide mappings in case the format changes
plainContext.remove(CONNECTOR_PROPERTY_VERSION);
plainContext.remove(FORMAT_PROPERTY_VERSION);
plainContext.remove(FactoryUtil.PROPERTY_VERSION.key());
// check if required context is met
Map<String, Tuple2<String, String>> mismatchedProperties = new HashMap<>();
Map<String, String> missingProperties = new HashMap<>();
for (Map.Entry<String, String> e : plainContext.entrySet()) {
if (properties.containsKey(e.getKey())) {
String fromProperties = properties.get(e.getKey());
if (!Objects.equals(fromProperties, e.getValue())) {
mismatchedProperties.put(e.getKey(), new Tuple2<>(e.getValue(), fromProperties));
}
} else {
missingProperties.put(e.getKey(), e.getValue());
}
}
int matchedSize = (plainContext.size() - mismatchedProperties.size()) - missingProperties.size();
if (matchedSize == plainContext.size()) {
matchingFactories.add(factory);
} else if ((bestMatched == null) || (matchedSize > bestMatched.matchedSize)) {
bestMatched = new ContextBestMatched<>(factory, matchedSize, mismatchedProperties, missingProperties);
}
}
if (matchingFactories.isEmpty()) {
String bestMatchedMessage = null;
if ((bestMatched != null) && (bestMatched.matchedSize > 0)) {
StringBuilder builder = new StringBuilder();
builder.append(bestMatched.factory.getClass().getName());
if (bestMatched.missingProperties.size() > 0) {
builder.append("\nMissing properties:");
bestMatched.missingProperties.forEach((k, v)
-> builder.append("\n").append(k).append("=").append(v));
}
if (bestMatched.mismatchedProperties.size() > 0) {
builder.append("\nMismatched properties:");
bestMatched.mismatchedProperties.entrySet().stream().filter(e -> e.getValue().f1 != null).forEach(e -> builder.append(String.format("\n'%s' expects '%s', but is '%s'", e.getKey(), e.getValue().f0, e.getValue().f1)));
}
bestMatchedMessage = builder.toString();}
// noinspection unchecked
throw new NoMatchingTableFactoryException("Required context properties mismatch.", bestMatchedMessage, factoryClass, ((List<TableFactory>) (classFactories)), properties);
}
return matchingFactories;
} | 3.26 |
flink_TableFactoryService_findSingleInternal_rdh | /**
* Finds a table factory of the given class, property map, and classloader.
*
* @param factoryClass
* desired factory class
* @param properties
* properties that describe the factory configuration
* @param classLoader
* classloader for service loading
* @param <T>
* factory class type
* @return the matching factory
*/
private static <T extends TableFactory> T findSingleInternal(Class<T> factoryClass, Map<String, String> properties, Optional<ClassLoader> classLoader) {
List<TableFactory> tableFactories = discoverFactories(classLoader);
List<T> filtered = filter(tableFactories, factoryClass, properties);
if (filtered.size() > 1) {
throw new AmbiguousTableFactoryException(filtered, factoryClass, tableFactories, properties);
} else {
return filtered.get(0);
}
} | 3.26 |
flink_TableFactoryService_findAll_rdh | /**
* Finds all table factories of the given class and property map.
*
* @param factoryClass
* desired factory class
* @param propertyMap
* properties that describe the factory configuration
* @param <T>
* factory class type
* @return all the matching factories
*/
public static <T extends TableFactory> List<T> findAll(Class<T> factoryClass, Map<String, String> propertyMap) {
return findAllInternal(factoryClass, propertyMap,
Optional.empty());
} | 3.26 |
flink_TableFactoryService_filter_rdh | /**
* Filters found factories by factory class and with matching context.
*/
private static <T extends TableFactory> List<T> filter(List<TableFactory> foundFactories, Class<T> factoryClass, Map<String, String> properties) {
Preconditions.checkNotNull(factoryClass);
Preconditions.checkNotNull(properties);
List<T> classFactories = filterByFactoryClass(factoryClass, properties, foundFactories);
List<T> contextFactories = filterByContext(factoryClass, properties, classFactories);
return filterBySupportedProperties(factoryClass, properties, classFactories, contextFactories);
} | 3.26 |
flink_TableFactoryService_normalizeContext_rdh | /**
* Prepares the properties of a context to be used for match operations.
*/private static Map<String, String> normalizeContext(TableFactory factory) {
Map<String, String> requiredContext = factory.requiredContext();
if (requiredContext == null) {
throw new TableException(String.format("Required context of factory '%s' must not be null.", factory.getClass().getName()));
}
return requiredContext.keySet().stream().collect(Collectors.toMap(String::toLowerCase, requiredContext::get));
} | 3.26 |
flink_TableFactoryService_filterBySupportedProperties_rdh | /**
* Filters the matching class factories by supported properties.
*/private static <T extends TableFactory> List<T> filterBySupportedProperties(Class<T> factoryClass, Map<String, String> properties, List<T> classFactories, List<T> contextFactories) {
final List<String> plainGivenKeys = new LinkedList<>();
properties.keySet().forEach(k -> {
// replace arrays with wildcard
String key =
k.replaceAll(".\\d+", ".#");
// ignore duplicates
if (!plainGivenKeys.contains(key)) {
plainGivenKeys.add(key);
}
});
List<T> supportedFactories = new LinkedList<>();
Tuple2<T, List<String>> bestMatched = null;
for (T factory : contextFactories) {
Set<String> requiredContextKeys = normalizeContext(factory).keySet();
Tuple2<List<String>, List<String>> tuple2 = normalizeSupportedProperties(factory);
// ignore context keys
List<String> givenContextFreeKeys = plainGivenKeys.stream().filter(p -> !requiredContextKeys.contains(p)).collect(Collectors.toList());boolean v29 = true;
List<String> unsupportedKeys = new
ArrayList<>();
for (String k : givenContextFreeKeys) {
if (!(tuple2.f0.contains(k)
|| tuple2.f1.stream().anyMatch(k::startsWith))) {
v29 = false;
unsupportedKeys.add(k);
}
}if (v29) {
supportedFactories.add(factory);
} else if ((bestMatched == null) || (unsupportedKeys.size() < bestMatched.f1.size())) {
bestMatched = new Tuple2<>(factory, unsupportedKeys);
}}
if (supportedFactories.isEmpty()) {
String bestMatchedMessage = null;
if (bestMatched != null) {
bestMatchedMessage = String.format("%s\nUnsupported property keys:\n%s", bestMatched.f0.getClass().getName(), String.join("\n", bestMatched.f1));
}
// noinspection unchecked
throw new NoMatchingTableFactoryException("No factory supports all properties.", bestMatchedMessage, factoryClass, ((List<TableFactory>) (classFactories)), properties);
}
return supportedFactories;
} | 3.26 |
flink_TableFactoryService_find_rdh | /**
* Finds a table factory of the given class, property map, and classloader.
*
* @param factoryClass
* desired factory class
* @param propertyMap
* properties that describe the factory configuration
* @param classLoader
* classloader for service loading
* @param <T>
* factory class type
* @return the matching factory
*/
public static <T extends TableFactory> T find(Class<T> factoryClass, Map<String, String> propertyMap, ClassLoader classLoader) {
Preconditions.checkNotNull(classLoader);
return findSingleInternal(factoryClass, propertyMap, Optional.of(classLoader));
} | 3.26 |
flink_IteratorSourceReaderBase_start_rdh | // ------------------------------------------------------------------------
@Override
public void start() {
// request a split if we don't have one
if (remainingSplits.isEmpty()) {
context.sendSplitRequest();
}
start(context);
} | 3.26 |
flink_NoOpResultSubpartitionView_getNextBuffer_rdh | /**
* A dummy implementation of the {@link ResultSubpartitionView}.
*/ public class NoOpResultSubpartitionView implements ResultSubpartitionView {
@Nullable
public BufferAndBacklog getNextBuffer() {return null;
} | 3.26 |
flink_CheckpointStatsCache_tryGet_rdh | /**
* Try to look up a checkpoint by it's ID in the cache.
*
* @param checkpointId
* ID of the checkpoint to look up.
* @return The checkpoint or <code>null</code> if checkpoint not found.
*/
public AbstractCheckpointStats tryGet(long checkpointId) {
if (cache != null) {
return cache.getIfPresent(checkpointId);
} else {
return null;
}
} | 3.26 |
flink_CheckpointStatsCache_tryAdd_rdh | /**
* Try to add the checkpoint to the cache.
*
* @param checkpoint
* Checkpoint to be added.
*/
public void tryAdd(AbstractCheckpointStats checkpoint) {
// Don't add in progress checkpoints as they will be replaced by their
// completed/failed version eventually.
if (((cache != null) && (checkpoint != null)) && (!checkpoint.getStatus().isInProgress())) {
cache.put(checkpoint.getCheckpointId(), checkpoint);
}
} | 3.26 |
flink_KvStateLocationRegistry_notifyKvStateUnregistered_rdh | /**
* Notifies the registry about an unregistered KvState instance.
*
* @param jobVertexId
* JobVertexID the KvState instance belongs to
* @param keyGroupRange
* Key group index the KvState instance belongs to
* @param registrationName
* Name under which the KvState has been registered
* @throws IllegalArgumentException
* If another operator registered the state instance
* @throws IllegalArgumentException
* If the registration name is not known
*/
public void notifyKvStateUnregistered(JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName) {
KvStateLocation location = lookupTable.get(registrationName);
if (location != null) {
// Duplicate name if vertex IDs don't match
if (!location.getJobVertexId().equals(jobVertexId)) {
throw new IllegalArgumentException((((("Another operator (" + location.getJobVertexId()) + ") registered the KvState ") + "under '") + registrationName) + "'.");
}
location.unregisterKvState(keyGroupRange);
if (location.getNumRegisteredKeyGroups() == 0) {
lookupTable.remove(registrationName);
}
} else {
throw new IllegalArgumentException((("Unknown registration name '" + registrationName) + "'. ") + "Probably registration/unregistration race.");
}
} | 3.26 |
flink_KvStateLocationRegistry_notifyKvStateRegistered_rdh | /**
* Notifies the registry about a registered KvState instance.
*
* @param jobVertexId
* JobVertexID the KvState instance belongs to
* @param keyGroupRange
* Key group range the KvState instance belongs to
* @param registrationName
* Name under which the KvState has been registered
* @param kvStateId
* ID of the registered KvState instance
* @param kvStateServerAddress
* Server address where to find the KvState instance
* @throws IllegalArgumentException
* If JobVertexID does not belong to job
* @throws IllegalArgumentException
* If state has been registered with same name by another
* operator.
* @throws IndexOutOfBoundsException
* If key group index is out of bounds.
*/public void notifyKvStateRegistered(JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, KvStateID kvStateId, InetSocketAddress kvStateServerAddress) {
KvStateLocation location = lookupTable.get(registrationName);
if (location == null) {
// First registration for this operator, create the location info
ExecutionJobVertex vertex = jobVertices.get(jobVertexId);
if (vertex != null) {
int parallelism = vertex.getMaxParallelism();
location = new KvStateLocation(jobId, jobVertexId, parallelism, registrationName); lookupTable.put(registrationName, location);
} else {
throw new IllegalArgumentException("Unknown JobVertexID " + jobVertexId);
}
}
// Duplicated name if vertex IDs don't match
if (!location.getJobVertexId().equals(jobVertexId)) {
IllegalStateException duplicate = new IllegalStateException(((("Registration name clash. KvState with name '" + registrationName)
+
"' has already been registered by another operator (") +
location.getJobVertexId()) + ").");
ExecutionJobVertex vertex = jobVertices.get(jobVertexId);
if (vertex != null) {
vertex.fail(new SuppressRestartsException(duplicate));
}throw duplicate;
}
location.registerKvState(keyGroupRange, kvStateId, kvStateServerAddress);
} | 3.26 |
flink_KvStateLocationRegistry_getKvStateLocation_rdh | /**
* Returns the {@link KvStateLocation} for the registered KvState instance or <code>null</code>
* if no location information is available.
*
* @param registrationName
* Name under which the KvState instance is registered.
* @return Location information or <code>null</code>.
*/
public KvStateLocation getKvStateLocation(String registrationName) {
return lookupTable.get(registrationName);
} | 3.26 |
flink_VarBinaryType_ofEmptyLiteral_rdh | /**
* The SQL standard defines that character string literals are allowed to be zero-length strings
* (i.e., to contain no characters) even though it is not permitted to declare a type that is
* zero. For consistent behavior, the same logic applies to binary strings. This has also
* implications on variable-length binary strings during type inference because any fixed-length
* binary string should be convertible to a variable-length one.
*
* <p>This method enables this special kind of binary string.
*
* <p>Zero-length binary strings have no serializable string representation.
*/
public static VarBinaryType ofEmptyLiteral() {
return new VarBinaryType(EMPTY_LITERAL_LENGTH, false);
} | 3.26 |
flink_EventsGenerator_nextInvalid_rdh | /**
* Creates an event for an illegal state transition of one of the internal state machines. If
* the generator has not yet started any state machines (for example, because no call to {@link #next(int, int)} was made, yet), this will return null.
*
* @return An event for a illegal state transition, or null, if not possible.
*/
@Nullable
public Event nextInvalid() {
final Iterator<Entry<Integer, State>> iter = states.entrySet().iterator();
if (iter.hasNext()) {
final Entry<Integer, State> entry = iter.next();
State currentState = entry.getValue();
int address = entry.getKey();
iter.remove();
EventType event = currentState.randomInvalidTransition(rnd);
return new Event(event, address);
} else {
return null;
}
} | 3.26 |
flink_EventsGenerator_next_rdh | // ------------------------------------------------------------------------
/**
* Creates a new random event. This method randomly pick either one of its currently running
* state machines, or start a new state machine for a random IP address.
*
* <p>With {@link #errorProb} probability, the generated event will be from an illegal state
* transition of one of the currently running state machines.
*
* @param minIp
* The lower bound for the range from which a new IP address may be picked.
* @param maxIp
* The upper bound for the range from which a new IP address may be picked.
* @return A next random event.
*/
public Event next(int minIp, int maxIp) {
final double p = rnd.nextDouble();
if ((p * 1000) >= states.size()) {
// create a new state machine
final int nextIP = rnd.nextInt(maxIp - minIp) + minIp;
if (!states.containsKey(nextIP)) {
EventTypeAndState eventAndState = State.Initial.randomTransition(rnd);
states.put(nextIP, eventAndState.state);
return new Event(eventAndState.eventType, nextIP);
} else {
// collision on IP address, try again
return next(minIp, maxIp);
}} else {
// pick an existing state machine
// skip over some elements in the linked map, then take the next
// update it, and insert it at the end
int numToSkip = Math.min(20, rnd.nextInt(states.size()));
Iterator<Entry<Integer, State>> iter = states.entrySet().iterator();
for (int i = numToSkip; i > 0; --i) {
iter.next();
}
Entry<Integer, State> entry = iter.next();
State currentState = entry.getValue();
int address = entry.getKey();
iter.remove();
if (p < errorProb) {
EventType event = currentState.randomInvalidTransition(rnd);
return new Event(event, address);
} else {
EventTypeAndState eventAndState = currentState.randomTransition(rnd);
if (!eventAndState.state.isTerminal()) {
// reinsert
states.put(address, eventAndState.state);
}
return new Event(eventAndState.eventType, address);
}
}
} | 3.26 |
flink_TypeSerializerSchemaCompatibility_getReconfiguredSerializer_rdh | /**
* Gets the reconfigured serializer. This throws an exception if {@link #isCompatibleWithReconfiguredSerializer()} is {@code false}.
*/public TypeSerializer<T> getReconfiguredSerializer() {
Preconditions.checkState(isCompatibleWithReconfiguredSerializer(), "It is only possible to get a reconfigured serializer if the compatibility type is %s, but the type is %s", Type.COMPATIBLE_WITH_RECONFIGURED_SERIALIZER, resultType);
return reconfiguredNewSerializer;
} | 3.26 |
flink_TypeSerializerSchemaCompatibility_isIncompatible_rdh | /**
* Returns whether or not the type of the compatibility is {@link Type#INCOMPATIBLE}.
*
* @return whether or not the type of the compatibility is {@link Type#INCOMPATIBLE}.
*/
public boolean isIncompatible() {
return resultType == Type.INCOMPATIBLE;
} | 3.26 |
flink_TypeSerializerSchemaCompatibility_isCompatibleAfterMigration_rdh | /**
* Returns whether or not the type of the compatibility is {@link Type#COMPATIBLE_AFTER_MIGRATION}.
*
* @return whether or not the type of the compatibility is {@link Type#COMPATIBLE_AFTER_MIGRATION}.
*/
public boolean isCompatibleAfterMigration() {
return resultType ==
Type.COMPATIBLE_AFTER_MIGRATION;} | 3.26 |
flink_TypeSerializerSchemaCompatibility_incompatible_rdh | /**
* Returns a result that indicates there is no possible way for the new serializer to be
* use-able. This normally indicates that there is no common Java class between what the
* previous bytes can be deserialized into and what can be written by the new serializer.
*
* <p>In this case, there is no possible way for the new serializer to continue to be used, even
* with migration. Recovery of the Flink job will fail.
*
* @return a result that indicates incompatibility between the new and previous serializer.
*/public static <T> TypeSerializerSchemaCompatibility<T> incompatible() {
return new TypeSerializerSchemaCompatibility<>(Type.INCOMPATIBLE, null);
} | 3.26 |
flink_TypeSerializerSchemaCompatibility_isCompatibleWithReconfiguredSerializer_rdh | /**
* Returns whether or not the type of the compatibility is {@link Type#COMPATIBLE_WITH_RECONFIGURED_SERIALIZER}.
*
* @return whether or not the type of the compatibility is {@link Type#COMPATIBLE_WITH_RECONFIGURED_SERIALIZER}.
*/
public boolean isCompatibleWithReconfiguredSerializer() {
return resultType == Type.COMPATIBLE_WITH_RECONFIGURED_SERIALIZER;
} | 3.26 |
flink_TypeSerializerSchemaCompatibility_isCompatibleAsIs_rdh | /**
* Returns whether or not the type of the compatibility is {@link Type#COMPATIBLE_AS_IS}.
*
* @return whether or not the type of the compatibility is {@link Type#COMPATIBLE_AS_IS}.
*/public boolean isCompatibleAsIs() {
return resultType == Type.COMPATIBLE_AS_IS;
} | 3.26 |
flink_ByteValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_RestoredCheckpointStats_getExternalPath_rdh | /**
* Returns the external path if this checkpoint was persisted externally.
*
* @return External path of this checkpoint or <code>null</code>.
*/
@Nullable
public String getExternalPath() {
return externalPath;
} | 3.26 |
flink_RestoredCheckpointStats_getRestoreTimestamp_rdh | /**
* Returns the timestamp when the checkpoint was restored.
*
* @return Timestamp when the checkpoint was restored.
*/
public long getRestoreTimestamp() {
return restoreTimestamp;
} | 3.26 |
flink_RestoredCheckpointStats_getCheckpointId_rdh | /**
* Returns the ID of this checkpoint.
*
* @return ID of this checkpoint.
*/
public long getCheckpointId() {
return checkpointId;
} | 3.26 |
flink_RestoredCheckpointStats_getProperties_rdh | /**
* Returns the properties of the restored checkpoint.
*
* @return Properties of the restored checkpoint.
*/
public CheckpointProperties getProperties() {
return props;} | 3.26 |
flink_HiveParserTypeCheckCtx_setOuterRR_rdh | /**
*
* @param outerRR
* the outerRR to set
*/
public void setOuterRR(HiveParserRowResolver outerRR) {
this.outerRR = outerRR;
} | 3.26 |
flink_HiveParserTypeCheckCtx_getOuterRR_rdh | /**
*
* @return the outerRR
*/
public HiveParserRowResolver getOuterRR() {
return outerRR;
} | 3.26 |
flink_HiveParserTypeCheckCtx_setAllowStatefulFunctions_rdh | /**
*
* @param allowStatefulFunctions
* whether to allow stateful UDF invocations
*/
public void setAllowStatefulFunctions(boolean
allowStatefulFunctions) {
this.allowStatefulFunctions = allowStatefulFunctions;
} | 3.26 |
flink_HiveParserTypeCheckCtx_getError_rdh | /**
*
* @return the error
*/
public String getError() {
return error;
} | 3.26 |
flink_HiveParserTypeCheckCtx_getAllowStatefulFunctions_rdh | /**
*
* @return whether to allow stateful UDF invocations
*/
public boolean getAllowStatefulFunctions() {
return allowStatefulFunctions;
} | 3.26 |
flink_HiveParserTypeCheckCtx_getUnparseTranslator_rdh | /**
*
* @return the unparseTranslator
*/
public HiveParserUnparseTranslator getUnparseTranslator() {
return unparseTranslator;
} | 3.26 |
flink_HiveParserTypeCheckCtx_getSubqueryToRelNode_rdh | /**
*
* @return the outerRR
*/
public Map<HiveParserASTNode, RelNode> getSubqueryToRelNode() {
return subqueryToRelNode;
} | 3.26 |
flink_HiveParserTypeCheckCtx_m0_rdh | /**
*
* @param unparseTranslator
* the unparseTranslator to set
*/
public void m0(HiveParserUnparseTranslator unparseTranslator) {
this.unparseTranslator = unparseTranslator;
} | 3.26 |
flink_HiveParserTypeCheckCtx_setInputRR_rdh | /**
*
* @param inputRR
* the inputRR to set
*/
public void setInputRR(HiveParserRowResolver inputRR) {
this.inputRR
= inputRR;
} | 3.26 |
flink_HiveParserTypeCheckCtx_setSubqueryToRelNode_rdh | /**
*
* @param subqueryToRelNode
* the subqueryToRelNode to set
*/
public void setSubqueryToRelNode(Map<HiveParserASTNode, RelNode> subqueryToRelNode) {
this.subqueryToRelNode = subqueryToRelNode;
} | 3.26 |
flink_HiveParserTypeCheckCtx_getInputRR_rdh | /**
*
* @return the inputRR
*/
public HiveParserRowResolver getInputRR() {
return inputRR;
} | 3.26 |
flink_HiveParserTypeCheckCtx_setError_rdh | /**
*
* @param error
* the error to set
*/
public void setError(String error, HiveParserASTNode errorSrcNode) {
if (LOG.isDebugEnabled()) {
// Logger the callstack from which the error has been set.
LOG.debug((("Setting error: [" + error)
+ "] from ") + (errorSrcNode == null ? "null" : errorSrcNode.toStringTree()), new Exception());}
this.error = error;
this.errorSrcNode = errorSrcNode;
} | 3.26 |
flink_InternalOperatorIOMetricGroup_reuseOutputMetricsForTask_rdh | /**
* Causes the containing task to use this operators output record counter.
*/
public void reuseOutputMetricsForTask() {
TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup();
taskIO.reuseRecordsOutputCounter(this.numRecordsOut);
} | 3.26 |
flink_InternalOperatorIOMetricGroup_reuseInputMetricsForTask_rdh | /**
* Causes the containing task to use this operators input record counter.
*/
public void reuseInputMetricsForTask() {
TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup();
taskIO.reuseRecordsInputCounter(this.numRecordsIn);
} | 3.26 |
flink_SingleInputOperator_accept_rdh | // --------------------------------------------------------------------------------------------
/**
* Accepts the visitor and applies it this instance. The visitors pre-visit method is called
* and, if returning <tt>true</tt>, the visitor is recursively applied on the single input.
* After the recursion returned, the post-visit method is called.
*
* @param visitor
* The visitor.
* @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor)
*/
@Overridepublic void accept(Visitor<Operator<?>> visitor) {
if (visitor.preVisit(this)) {
this.input.accept(visitor);
for (Operator<?> c : this.broadcastInputs.values()) {
c.accept(visitor);
}
visitor.postVisit(this);
}
} | 3.26 |
flink_SingleInputOperator_setInput_rdh | /**
* Sets the given operator as the input to this operator.
*
* @param input
* The operator to use as the input.
*/
public void setInput(Operator<IN> input) {this.input = input;
}
/**
* Sets the input to the union of the given operators.
*
* @param input
* The operator(s) that form the input.
* @deprecated This method will be removed in future versions. Use the {@link Union} | 3.26 |
flink_SingleInputOperator_clearInputs_rdh | /**
* Removes all inputs.
*/
public void clearInputs() {
this.input = null;
} | 3.26 |
flink_SingleInputOperator_getSemanticProperties_rdh | // --------------------------------------------------------------------------------------------
public SingleInputSemanticProperties getSemanticProperties() {
return this.semanticProperties;} | 3.26 |
flink_SingleInputOperator_getInput_rdh | /**
* Returns the input operator or data source, or null, if none is set.
*
* @return This operator's input.
*/
public Operator<IN> getInput() {
return this.input;
} | 3.26 |
flink_SingleInputOperator_getOperatorInfo_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the information about the operators input/output types.
*/
@Override
@SuppressWarnings("unchecked")
public UnaryOperatorInformation<IN, OUT> getOperatorInfo() {
return ((UnaryOperatorInformation<IN, OUT>) (this.operatorInfo));
} | 3.26 |
flink_SingleInputOperator_getNumberOfInputs_rdh | // --------------------------------------------------------------------------------------------
@Override
public final int getNumberOfInputs() {
return 1;
} | 3.26 |
flink_BatchExecLegacySink_validateType_rdh | /**
* Validate if class represented by the typeInfo is static and globally accessible.
*
* @param dataType
* type to check
* @throws TableException
* if type does not meet these criteria
*/
private void validateType(DataType dataType) {
Class<?> clazz = dataType.getConversionClass();
if (clazz == null) {
clazz = ClassLogicalTypeConverter.getDefaultExternalClassForType(dataType.getLogicalType());
}
if (((clazz.isMemberClass() && (!Modifier.isStatic(clazz.getModifiers()))) || (!Modifier.isPublic(clazz.getModifiers()))) || (clazz.getCanonicalName() == null)) {
throw new TableException(String.format("Class '%s' described in type information '%s' must be static and globally accessible.", clazz, dataType));
}
} | 3.26 |
flink_DataTypeFactoryImpl_createSerializerExecutionConfig_rdh | // --------------------------------------------------------------------------------------------
/**
* Creates a lazy {@link ExecutionConfig} that contains options for {@link TypeSerializer}s with
* information from existing {@link ExecutionConfig} (if available) enriched with table {@link ReadableConfig}.
*/
private static Supplier<ExecutionConfig> createSerializerExecutionConfig(ClassLoader classLoader, ReadableConfig config, ExecutionConfig executionConfig) {
return () -> {
final ExecutionConfig newExecutionConfig = new ExecutionConfig();
if (executionConfig != null) {
if (executionConfig.isForceKryoEnabled()) {
newExecutionConfig.enableForceKryo();
}
if (executionConfig.isForceAvroEnabled())
{
newExecutionConfig.enableForceAvro();
}
executionConfig.getDefaultKryoSerializers().forEach((c, s) -> newExecutionConfig.addDefaultKryoSerializer(c, s.getSerializer()));
executionConfig.getDefaultKryoSerializerClasses().forEach(newExecutionConfig::addDefaultKryoSerializer);
executionConfig.getRegisteredKryoTypes().forEach(newExecutionConfig::registerKryoType);executionConfig.getRegisteredTypesWithKryoSerializerClasses().forEach(newExecutionConfig::registerTypeWithKryoSerializer);
executionConfig.getRegisteredTypesWithKryoSerializers().forEach((c,
s) -> newExecutionConfig.registerTypeWithKryoSerializer(c, s.getSerializer()));
}
newExecutionConfig.configure(config, classLoader);
return newExecutionConfig;
};
} | 3.26 |
flink_DefaultCheckpointPlanCalculator_collectTaskRunningStatus_rdh | /**
* Collects the task running status for each job vertex.
*
* @return The task running status for each job vertex.
*/
@VisibleForTesting
Map<JobVertexID, BitSet> collectTaskRunningStatus() {
Map<JobVertexID, BitSet> runningStatusByVertex = new HashMap<>();
for (ExecutionJobVertex vertex : jobVerticesInTopologyOrder) {
BitSet runningTasks = new BitSet(vertex.getTaskVertices().length);
for (int i = 0; i < vertex.getTaskVertices().length; ++i) {
if (!vertex.getTaskVertices()[i].getCurrentExecutionAttempt().isFinished()) {
runningTasks.set(i);
}
}
runningStatusByVertex.put(vertex.getJobVertexId(), runningTasks);
}
return runningStatusByVertex;
} | 3.26 |
flink_DefaultCheckpointPlanCalculator_calculateAfterTasksFinished_rdh | /**
* Calculates the checkpoint plan after some tasks have finished. We iterate the job graph to
* find the task that is still running, but do not has precedent running tasks.
*
* @return The plan of this checkpoint.
*/
private CheckpointPlan calculateAfterTasksFinished() {
// First collect the task running status into BitSet so that we could
// do JobVertex level judgement for some vertices and avoid time-consuming
// access to volatile isFinished flag of Execution.
Map<JobVertexID, BitSet> taskRunningStatusByVertex = collectTaskRunningStatus();
List<Execution> tasksToTrigger = new ArrayList<>();
List<Execution> tasksToWaitFor = new ArrayList<>();
List<ExecutionVertex> tasksToCommitTo = new ArrayList<>();
List<Execution> finishedTasks = new ArrayList<>();
List<ExecutionJobVertex> fullyFinishedJobVertex = new ArrayList<>();
for (ExecutionJobVertex jobVertex : jobVerticesInTopologyOrder) {
BitSet taskRunningStatus = taskRunningStatusByVertex.get(jobVertex.getJobVertexId());
if (taskRunningStatus.cardinality() == 0) {
fullyFinishedJobVertex.add(jobVertex);
for (ExecutionVertex task : jobVertex.getTaskVertices()) {
finishedTasks.add(task.getCurrentExecutionAttempt());
}
continue;
}
List<JobEdge> prevJobEdges = jobVertex.getJobVertex().getInputs();// this is an optimization: we determine at the JobVertex level if some tasks can even
// be eligible for being in the "triggerTo" set.
boolean someTasksMustBeTriggered = someTasksMustBeTriggered(taskRunningStatusByVertex, prevJobEdges);
for (int i = 0; i < jobVertex.getTaskVertices().length; ++i) {
ExecutionVertex task = jobVertex.getTaskVertices()[i];
if (taskRunningStatus.get(task.getParallelSubtaskIndex())) {
tasksToWaitFor.add(task.getCurrentExecutionAttempt());
tasksToCommitTo.add(task);
if (someTasksMustBeTriggered) {
boolean hasRunningPrecedentTasks = hasRunningPrecedentTasks(task, prevJobEdges,
taskRunningStatusByVertex);
if (!hasRunningPrecedentTasks) {
tasksToTrigger.add(task.getCurrentExecutionAttempt());
}
}
} else {
finishedTasks.add(task.getCurrentExecutionAttempt());
}
}
}
return new DefaultCheckpointPlan(Collections.unmodifiableList(tasksToTrigger), Collections.unmodifiableList(tasksToWaitFor), Collections.unmodifiableList(tasksToCommitTo), Collections.unmodifiableList(finishedTasks), Collections.unmodifiableList(fullyFinishedJobVertex), allowCheckpointsAfterTasksFinished);
} | 3.26 |
flink_DefaultCheckpointPlanCalculator_checkTasksStarted_rdh | /**
* Checks if all tasks to trigger have already been in RUNNING state. This method should be
* called from JobMaster main thread executor.
*
* @throws CheckpointException
* if some tasks to trigger have not turned into RUNNING yet.
*/
private void checkTasksStarted(List<Execution> toTrigger) throws CheckpointException {
for (Execution execution : toTrigger) {
if (execution.getState() != ExecutionState.RUNNING) {
throw new CheckpointException(String.format("Checkpoint triggering task %s of job %s is not being executed at the moment. " + "Aborting checkpoint.", execution.getVertex().getTaskNameWithSubtaskIndex(), jobId), CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
} | 3.26 |
flink_DefaultCheckpointPlanCalculator_calculateWithAllTasksRunning_rdh | /**
* Computes the checkpoint plan when all tasks are running. It would simply marks all the source
* tasks as need to trigger and all the tasks as need to wait and commit.
*
* @return The plan of this checkpoint.
*/
private CheckpointPlan calculateWithAllTasksRunning() {
List<Execution> executionsToTrigger = sourceTasks.stream().map(ExecutionVertex::getCurrentExecutionAttempt).collect(Collectors.toList());List<Execution> tasksToWaitFor = createTaskToWaitFor(allTasks);return new DefaultCheckpointPlan(Collections.unmodifiableList(executionsToTrigger), Collections.unmodifiableList(tasksToWaitFor), Collections.unmodifiableList(allTasks), Collections.emptyList(), Collections.emptyList(), allowCheckpointsAfterTasksFinished);} | 3.26 |
flink_DefaultCheckpointPlanCalculator_checkAllTasksInitiated_rdh | /**
* Checks if all tasks are attached with the current Execution already. This method should be
* called from JobMaster main thread executor.
*
* @throws CheckpointException
* if some tasks do not have attached Execution.
*/
private void checkAllTasksInitiated() throws CheckpointException {
for (ExecutionVertex task : allTasks) {
if (task.getCurrentExecutionAttempt() == null) {
throw new CheckpointException(String.format("task %s of job %s is not being executed at the moment. Aborting checkpoint.", task.getTaskNameWithSubtaskIndex(), jobId), CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
} | 3.26 |
flink_DefaultCheckpointPlanCalculator_hasActiveUpstreamVertex_rdh | /**
* Every task must have active upstream tasks if
*
* <ol>
* <li>ALL_TO_ALL connection and some predecessors are still running.
* <li>POINTWISE connection and all predecessors are still running.
* </ol>
*
* @param distribution
* The distribution pattern between the upstream vertex and the current
* vertex.
* @param upstreamRunningTasks
* The running tasks of the upstream vertex.
* @return Whether every task of the current vertex is connected to some active predecessors.
*/
private boolean hasActiveUpstreamVertex(DistributionPattern distribution, BitSet upstreamRunningTasks) {
return ((distribution == DistributionPattern.ALL_TO_ALL) && (upstreamRunningTasks.cardinality() > 0)) || ((distribution == DistributionPattern.POINTWISE) && (upstreamRunningTasks.cardinality() == upstreamRunningTasks.size()));
} | 3.26 |
flink_BoundedFIFOQueue_size_rdh | /**
* Returns the number of currently stored elements.
*
* @return The number of currently stored elements.
*/
public int size() {
return this.elements.size();} | 3.26 |
flink_BoundedFIFOQueue_iterator_rdh | /**
* Returns the {@code BoundedFIFOQueue}'s {@link Iterator}.
*
* @return The queue's {@code Iterator}.
*/
@Override public Iterator<T> iterator() {
return elements.iterator();
} | 3.26 |
flink_RocksDBNativeMetricMonitor_setProperty_rdh | /**
* Updates the value of metricView if the reference is still valid.
*/
private void setProperty(RocksDBNativePropertyMetricView metricView) {
if (metricView.isClosed()) {
return;
}
try {
synchronized(lock) {
if (rocksDB != null) {
long value = rocksDB.getLongProperty(metricView.handle, metricView.property);
metricView.setValue(value);
}
}
} catch (RocksDBException e) {
metricView.close();
LOG.warn("Failed to read native metric {} from RocksDB.", metricView.property, e);
}
} | 3.26 |
flink_RocksDBNativeMetricMonitor_registerStatistics_rdh | /**
* Register gauges to pull native metrics for the database.
*/ private void registerStatistics() {
if (statistics != null) {
for (TickerType tickerType : options.getMonitorTickerTypes()) {
metricGroup.gauge(String.format("rocksdb.%s", tickerType.name().toLowerCase()), new
RocksDBNativeStatisticsMetricView(tickerType));
}
}
} | 3.26 |
flink_RocksDBNativeMetricMonitor_registerColumnFamily_rdh | /**
* Register gauges to pull native metrics for the column family.
*
* @param columnFamilyName
* group name for the new gauges
* @param handle
* native handle to the column family
*/
void registerColumnFamily(String columnFamilyName, ColumnFamilyHandle handle) {
boolean columnFamilyAsVariable = options.isColumnFamilyAsVariable();
MetricGroup group = (columnFamilyAsVariable) ? metricGroup.addGroup(COLUMN_FAMILY_KEY, columnFamilyName) : metricGroup.addGroup(columnFamilyName);
for (String property : options.getProperties()) {
RocksDBNativePropertyMetricView gauge = new RocksDBNativePropertyMetricView(handle, property);
group.gauge(property, gauge);
}
} | 3.26 |
flink_RocksDBResourceContainer_getReadOptions_rdh | /**
* Gets the RocksDB {@link ReadOptions} to be used for read operations.
*/
public ReadOptions getReadOptions() {
ReadOptions opt = new ReadOptions();
handlesToClose.add(opt);
// add user-defined options factory, if specified
if (f0 != null) {
opt = f0.createReadOptions(opt, handlesToClose);
}
return opt;
} | 3.26 |
flink_RocksDBResourceContainer_createBaseCommonDBOptions_rdh | /**
* Create a {@link DBOptions} for RocksDB, including some common settings.
*/
DBOptions createBaseCommonDBOptions() {
return new DBOptions().setUseFsync(false).setStatsDumpPeriodSec(0);
} | 3.26 |
flink_RocksDBResourceContainer_internalGetOption_rdh | /**
* Get a value for option from pre-defined option and configurable option settings. The priority
* relationship is as below.
*
* <p>Configured value > pre-defined value > default value.
*
* @param option
* the wanted option
* @param <T>
* the value type
* @return the final value for the option according to the priority above.
*/
@Nullable
private <T> T internalGetOption(ConfigOption<T> option) {
return configuration.getOptional(option).orElseGet(() -> predefinedOptions.getValue(option));
} | 3.26 |
flink_RocksDBResourceContainer_getWriteOptions_rdh | /**
* Gets the RocksDB {@link WriteOptions} to be used for write operations.
*/
public WriteOptions getWriteOptions() {
// Disable WAL by default
WriteOptions opt = new WriteOptions().setDisableWAL(true);
handlesToClose.add(opt);
// add user-defined options factory, if specified
if (f0 != null) {
opt = f0.createWriteOptions(opt, handlesToClose);
}
return opt;
} | 3.26 |
flink_RocksDBResourceContainer_getColumnOptions_rdh | /**
* Gets the RocksDB {@link ColumnFamilyOptions} to be used for all RocksDB instances.
*/
public ColumnFamilyOptions getColumnOptions() {
// initial options from common profile
ColumnFamilyOptions opt = createBaseCommonColumnOptions();
handlesToClose.add(opt);
// load configurable options on top of pre-defined profile
setColumnFamilyOptionsFromConfigurableOptions(opt, handlesToClose);
// add user-defined options, if specified
if (f0 != null)
{
opt = f0.createColumnOptions(opt, handlesToClose);
}
// if sharedResources is non-null, use the block cache from it and
// set necessary options for performance consideration with memory control
if (sharedResources != null) {
final RocksDBSharedResources rocksResources = sharedResources.getResourceHandle();
final Cache blockCache = rocksResources.getCache();
TableFormatConfig tableFormatConfig
= opt.tableFormatConfig();
BlockBasedTableConfig blockBasedTableConfig;
if (tableFormatConfig == null) {
blockBasedTableConfig = new BlockBasedTableConfig();
} else {
Preconditions.checkArgument(tableFormatConfig instanceof BlockBasedTableConfig, "We currently only support BlockBasedTableConfig When bounding total memory.");
blockBasedTableConfig = ((BlockBasedTableConfig) (tableFormatConfig));
}
if (rocksResources.isUsingPartitionedIndexFilters() && overwriteFilterIfExist(blockBasedTableConfig)) {
blockBasedTableConfig.setIndexType(IndexType.kTwoLevelIndexSearch);
blockBasedTableConfig.setPartitionFilters(true);
blockBasedTableConfig.setPinTopLevelIndexAndFilter(true);
}
blockBasedTableConfig.setBlockCache(blockCache);
blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);
blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true);
opt.setTableFormatConfig(blockBasedTableConfig);
}
return opt;
} | 3.26 |
flink_RocksDBResourceContainer_resolveFileLocation_rdh | /**
* Verify log file location.
*
* @param logFilePath
* Path to log file
* @return File or null if not a valid log file
*/
private File resolveFileLocation(String logFilePath) {
File logFile = new File(logFilePath);
return logFile.exists() && logFile.canRead() ? logFile : null;
} | 3.26 |
flink_RocksDBResourceContainer_createBaseCommonColumnOptions_rdh | /**
* Create a {@link ColumnFamilyOptions} for RocksDB, including some common settings.
*/
ColumnFamilyOptions createBaseCommonColumnOptions() {
return new ColumnFamilyOptions();
} | 3.26 |
flink_RocksDBResourceContainer_getDbOptions_rdh | /**
* Gets the RocksDB {@link DBOptions} to be used for RocksDB instances.
*/
public DBOptions getDbOptions() {
// initial options from common profile
DBOptions opt = createBaseCommonDBOptions();
handlesToClose.add(opt);
// load configurable options on top of pre-defined profile
setDBOptionsFromConfigurableOptions(opt);
// add user-defined options factory, if specified
if (f0 != null) {
opt = f0.createDBOptions(opt, handlesToClose);
}
// add necessary default options
opt = opt.setCreateIfMissing(true);
// if sharedResources is non-null, use the write buffer manager from it.
if (sharedResources != null) {opt.setWriteBufferManager(sharedResources.getResourceHandle().getWriteBufferManager());
}
if (enableStatistics) {
Statistics statistics = new Statistics();
opt.setStatistics(statistics);
handlesToClose.add(statistics);
} return opt;
} | 3.26 |
flink_RocksDBResourceContainer_relocateDefaultDbLogDir_rdh | /**
* Relocates the default log directory of RocksDB with the Flink log directory. Finds the Flink
* log directory using log.file Java property that is set during startup.
*
* @param dbOptions
* The RocksDB {@link DBOptions}.
*/
private void relocateDefaultDbLogDir(DBOptions dbOptions) {
String logFilePath = System.getProperty("log.file");
if (logFilePath != null) {File logFile = resolveFileLocation(logFilePath);
if ((logFile != null) && (resolveFileLocation(logFile.getParent()) != null)) {
dbOptions.setDbLogDir(logFile.getParent());
}
}
} | 3.26 |
flink_RocksDBResourceContainer_overwriteFilterIfExist_rdh | /**
* Overwrite configured {@link Filter} if enable partitioned filter. Partitioned filter only
* worked in full bloom filter, not blocked based.
*/
private boolean overwriteFilterIfExist(BlockBasedTableConfig blockBasedTableConfig) {
if (blockBasedTableConfig.filterPolicy() != null) {
// TODO Can get filter's config in the future RocksDB version, and build new filter use
// existing config.
BloomFilter newFilter = new BloomFilter(10, false);
LOG.info("Existing filter has been overwritten to full filters since partitioned index filters is enabled.");
blockBasedTableConfig.setFilterPolicy(newFilter);
handlesToClose.add(newFilter);
}
return true;
} | 3.26 |
flink_PekkoRpcService_connect_rdh | // this method does not mutate state and is thus thread-safe
@Override
public <F extends Serializable, C extends FencedRpcGateway<F>> CompletableFuture<C> connect(String address, F fencingToken, Class<C> clazz) {
return connectInternal(address, clazz, (ActorRef actorRef) -> {
Tuple2<String, String> addressHostname = extractAddressHostname(actorRef);
return new FencedPekkoInvocationHandler<>(addressHostname.f0, addressHostname.f1, actorRef, configuration.getTimeout(), configuration.getMaximumFramesize(), configuration.isForceRpcInvocationSerialization(), null, () -> fencingToken, captureAskCallstacks, flinkClassLoader);
});
} | 3.26 |
flink_PekkoRpcService_extractAddressHostname_rdh | // ---------------------------------------------------------------------------------------
// Private helper methods
// ---------------------------------------------------------------------------------------
private Tuple2<String, String> extractAddressHostname(ActorRef actorRef) {
final String actorAddress = PekkoUtils.getRpcURL(actorSystem, actorRef);
final String hostname;
Option<String> host = actorRef.path().address().host();
if (host.isEmpty()) {
hostname = "localhost";
} else {
hostname = host.get();
}
return Tuple2.of(actorAddress, hostname);
} | 3.26 |
flink_TaskStateSnapshot_getOutputRescalingDescriptor_rdh | /**
* Returns the output channel mapping for rescaling with in-flight data or {@link InflightDataRescalingDescriptor#NO_RESCALE}.
*/
public InflightDataRescalingDescriptor getOutputRescalingDescriptor() {
return getMapping(OperatorSubtaskState::getOutputRescalingDescriptor);
} | 3.26 |
flink_TaskStateSnapshot_getMapping_rdh | /**
* Returns the only valid mapping as ensured by {@link StateAssignmentOperation}.
*/
private InflightDataRescalingDescriptor getMapping(Function<OperatorSubtaskState, InflightDataRescalingDescriptor> mappingExtractor) {
return Iterators.getOnlyElement(subtaskStatesByOperatorID.values().stream().map(mappingExtractor).filter(mapping -> !mapping.equals(NO_RESCALE)).iterator(), NO_RESCALE);
} | 3.26 |
flink_TaskStateSnapshot_isTaskFinished_rdh | /**
* Returns whether all the operators of the task have called finished methods.
*/
public boolean isTaskFinished() {
return isTaskFinished;
} | 3.26 |
flink_TaskStateSnapshot_m0_rdh | /**
* Returns whether all the operators of the task are already finished on restoring.
*/
public boolean
m0() {
return isTaskDeployedAsFinished;
} | 3.26 |
flink_TaskStateSnapshot_putSubtaskStateByOperatorID_rdh | /**
* Maps the given operator id to the given subtask state. Returns the subtask state of a
* previous mapping, if such a mapping existed or null otherwise.
*/
public OperatorSubtaskState putSubtaskStateByOperatorID(@Nonnull
OperatorID operatorID, @Nonnull
OperatorSubtaskState state) {
return subtaskStatesByOperatorID.put(operatorID, Preconditions.checkNotNull(state));
} | 3.26 |
flink_TaskStateSnapshot_getInputRescalingDescriptor_rdh | /**
* Returns the input channel mapping for rescaling with in-flight data or {@link InflightDataRescalingDescriptor#NO_RESCALE}.
*/
public InflightDataRescalingDescriptor getInputRescalingDescriptor()
{
return getMapping(OperatorSubtaskState::getInputRescalingDescriptor);
} | 3.26 |
flink_TaskStateSnapshot_hasState_rdh | /**
* Returns true if at least one {@link OperatorSubtaskState} in subtaskStatesByOperatorID has
* state.
*/
public boolean hasState() {
for (OperatorSubtaskState operatorSubtaskState : subtaskStatesByOperatorID.values()) {
if ((operatorSubtaskState != null) && operatorSubtaskState.hasState()) {
return true;
}
}
return isTaskDeployedAsFinished; } | 3.26 |
flink_TaskStateSnapshot_getSubtaskStateByOperatorID_rdh | /**
* Returns the subtask state for the given operator id (or null if not contained).
*/
@Nullable
public OperatorSubtaskState getSubtaskStateByOperatorID(OperatorID operatorID) {
return subtaskStatesByOperatorID.get(operatorID);
} | 3.26 |
flink_TaskStateSnapshot_getSubtaskStateMappings_rdh | /**
* Returns the set of all mappings from operator id to the corresponding subtask state.
*/
public Set<Map.Entry<OperatorID, OperatorSubtaskState>> getSubtaskStateMappings() {
return subtaskStatesByOperatorID.entrySet();
} | 3.26 |
flink_FinalizeOnMaster_finalizeGlobal_rdh | /**
* The method is invoked on the master (JobManager) after all (parallel) instances of an
* OutputFormat finished.
*
* @param context
* The context to get finalization infos.
* @throws IOException
* The finalization may throw exceptions, which may cause the job to abort.
*/
default void finalizeGlobal(FinalizationContext context) throws IOException {
finalizeGlobal(context.getParallelism());
} | 3.26 |
flink_RocksDBIncrementalRestoreOperation_restoreBaseDBFromLocalState_rdh | /**
* Restores RocksDB instance from local state.
*/
private void restoreBaseDBFromLocalState(IncrementalLocalKeyedStateHandle localKeyedStateHandle) throws Exception {
KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(localKeyedStateHandle.getMetaDataStateHandle());
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots();
Path restoreSourcePath = localKeyedStateHandle.getDirectoryStateHandle().getDirectory();
logger.debug("Restoring keyed backend uid in operator {} from incremental snapshot to {}.", operatorIdentifier, backendUID);
this.rocksHandle.openDB(createColumnFamilyDescriptors(stateMetaInfoSnapshots, true), stateMetaInfoSnapshots, restoreSourcePath);
} | 3.26 |
flink_RocksDBIncrementalRestoreOperation_restoreWithoutRescaling_rdh | /**
* Recovery from a single remote incremental state without rescaling.
*/
@SuppressWarnings("unchecked")
private void restoreWithoutRescaling(KeyedStateHandle keyedStateHandle) throws Exception {
logger.info("Starting to restore from state handle: {} without rescaling.", keyedStateHandle);
if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) {
IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle = ((IncrementalRemoteKeyedStateHandle) (keyedStateHandle));
restorePreviousIncrementalFilesStatus(incrementalRemoteKeyedStateHandle);
restoreBaseDBFromRemoteState(incrementalRemoteKeyedStateHandle);
} else if (keyedStateHandle instanceof IncrementalLocalKeyedStateHandle) {
IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle = ((IncrementalLocalKeyedStateHandle) (keyedStateHandle));
restorePreviousIncrementalFilesStatus(incrementalLocalKeyedStateHandle);
restoreBaseDBFromLocalState(incrementalLocalKeyedStateHandle);
} else {
throw unexpectedStateHandleException(new Class[]{ IncrementalRemoteKeyedStateHandle.class, IncrementalLocalKeyedStateHandle.class }, keyedStateHandle.getClass());
}logger.info("Finished restoring from state handle: {} without rescaling.", keyedStateHandle);
} | 3.26 |
flink_RocksDBIncrementalRestoreOperation_restoreWithRescaling_rdh | /**
* Recovery from multi incremental states with rescaling. For rescaling, this method creates a
* temporary RocksDB instance for a key-groups shard. All contents from the temporary instance
* are copied into the real restore instance and then the temporary instance is discarded.
*/
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception {
Preconditions.checkArgument((restoreStateHandles != null) && (!restoreStateHandles.isEmpty()));
final List<StateHandleDownloadSpec> allDownloadSpecs = new ArrayList<>();
final List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles = new ArrayList<>(restoreStateHandles.size());
final Path absolutInstanceBasePath = instanceBasePath.getAbsoluteFile().toPath();
// Prepare and collect all the download request to pull remote state to a local directory
for (KeyedStateHandle stateHandle : restoreStateHandles) {
if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) {
StateHandleDownloadSpec downloadRequest = new StateHandleDownloadSpec(((IncrementalRemoteKeyedStateHandle) (stateHandle)), absolutInstanceBasePath.resolve(UUID.randomUUID().toString()));
allDownloadSpecs.add(downloadRequest);
} else if (stateHandle instanceof IncrementalLocalKeyedStateHandle) {
localKeyedStateHandles.add(((IncrementalLocalKeyedStateHandle) (stateHandle)));
} else {
throw unexpectedStateHandleException(IncrementalRemoteKeyedStateHandle.class, stateHandle.getClass());
}
}
allDownloadSpecs.stream().map(StateHandleDownloadSpec::createLocalStateHandleForDownloadedState).forEach(localKeyedStateHandles::add);
// Choose the best state handle for the initial DB
final IncrementalLocalKeyedStateHandle selectedInitialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial(localKeyedStateHandles, keyGroupRange, overlapFractionThreshold);
Preconditions.checkNotNull(selectedInitialHandle);
// Remove the selected handle from the list so that we don't restore it twice.
localKeyedStateHandles.remove(selectedInitialHandle);
try {
// Process all state downloads
transferRemoteStateToLocalDirectory(allDownloadSpecs);
// Init the base DB instance with the initial state
initBaseDBForRescaling(selectedInitialHandle);
// Transfer remaining key-groups from temporary instance into base DB
byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
CompositeKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);
byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
CompositeKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);
// Insert all remaining state through creating temporary RocksDB instances
for (IncrementalLocalKeyedStateHandle stateHandle
: localKeyedStateHandles) {
logger.info("Starting to restore from state handle: {} with rescaling.", stateHandle);
try (RestoredDBInstance tmpRestoreDBInfo = restoreTempDBInstanceFromLocalState(stateHandle);RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) {
List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors;
List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles;
// iterating only the requested descriptors automatically skips the default
// column
// family handle
for (int descIdx = 0; descIdx < tmpColumnFamilyDescriptors.size(); ++descIdx) {
ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(descIdx);
ColumnFamilyHandle targetColumnFamilyHandle = this.rocksHandle.getOrRegisterStateColumnFamilyHandle(null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(descIdx)).columnFamilyHandle;try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle, tmpRestoreDBInfo.readOptions)) {
iterator.seek(startKeyGroupPrefixBytes);
while (iterator.isValid()) {
if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) {
writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value());
} else {
// Since the iterator will visit the record according to the
// sorted
// order,
// we can just break here.
break;
}
iterator.next();
}
}// releases native iterator resources
}
logger.info("Finished restoring from state handle: {} with rescaling.", stateHandle);
}}
} finally {
// Cleanup all download directories
allDownloadSpecs.stream().map(StateHandleDownloadSpec::getDownloadDestination).forEach(this::cleanUpPathQuietly);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.