name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_RocksDBIncrementalRestoreOperation_readMetaData_rdh | /**
* Reads Flink's state meta data file from the state handle.
*/
private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle) throws Exception {
InputStream inputStream = null;
try {
inputStream = metaStateHandle.openInputStream();
cancelStreamRegistry.registerCloseable(inputStream);
DataInputView in = new DataInputViewStreamWrapper(inputStream);
return
readMetaData(in);
} finally {
if (cancelStreamRegistry.unregisterCloseable(inputStream)) {inputStream.close();
}
}
} | 3.26 |
flink_RocksDBIncrementalRestoreOperation_restore_rdh | /**
* Root method that branches for different implementations of {@link KeyedStateHandle}.
*/
@Override
public RocksDBRestoreResult restore() throws Exception {
if ((restoreStateHandles == null) || restoreStateHandles.isEmpty()) {
return null;
}
final KeyedStateHandle theFirstStateHandle = restoreStateHandles.iterator().next();
boolean isRescaling = (restoreStateHandles.size() > 1) || (!Objects.equals(theFirstStateHandle.getKeyGroupRange(), keyGroupRange));
if (isRescaling) {
restoreWithRescaling(restoreStateHandles);
} else {
restoreWithoutRescaling(theFirstStateHandle);
}
return new RocksDBRestoreResult(this.rocksHandle.getDb(), this.rocksHandle.getDefaultColumnFamilyHandle(), this.rocksHandle.getNativeMetricMonitor(), lastCompletedCheckpointId, backendUID, restoredSstFiles);
} | 3.26 |
flink_RocksDBIncrementalRestoreOperation_createColumnFamilyDescriptors_rdh | /**
* This method recreates and registers all {@link ColumnFamilyDescriptor} from Flink's state
* meta data snapshot.
*/
private List<ColumnFamilyDescriptor> createColumnFamilyDescriptors(List<StateMetaInfoSnapshot> stateMetaInfoSnapshots, boolean registerTtlCompactFilter) {
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(stateMetaInfoSnapshots.size());
for (StateMetaInfoSnapshot v35 : stateMetaInfoSnapshots) {
RegisteredStateMetaInfoBase metaInfoBase = RegisteredStateMetaInfoBase.fromMetaInfoSnapshot(v35);
ColumnFamilyDescriptor columnFamilyDescriptor = RocksDBOperationUtils.createColumnFamilyDescriptor(metaInfoBase, this.rocksHandle.getColumnFamilyOptionsFactory(), registerTtlCompactFilter ? this.rocksHandle.getTtlCompactFiltersManager() : null, this.rocksHandle.getWriteBufferManagerCapacity());
columnFamilyDescriptors.add(columnFamilyDescriptor);
}
return columnFamilyDescriptors;
} | 3.26 |
flink_SubtaskState_getManagedOperatorState_rdh | // --------------------------------------------------------------------------------------------
public ChainedStateHandle<OperatorStateHandle> getManagedOperatorState() {
return managedOperatorState;
} | 3.26 |
flink_SubtaskState_equals_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {return true;
}
if ((o == null) || (getClass() != o.getClass()))
{
return false;
}
SubtaskState that = ((SubtaskState) (o));
if (stateSize != that.stateSize) {return false;
}
if (managedOperatorState != null ? !managedOperatorState.equals(that.managedOperatorState) : that.managedOperatorState != null) { return false;
}
if (rawOperatorState != null ? !rawOperatorState.equals(that.rawOperatorState) : that.rawOperatorState != null) {
return false;
}
if (managedKeyedState
!= null ? !managedKeyedState.equals(that.managedKeyedState) : that.managedKeyedState != null) {
return false;
}
return rawKeyedState != null ? rawKeyedState.equals(that.rawKeyedState)
: that.rawKeyedState == null;
} | 3.26 |
flink_Signature_of_rdh | /**
* Creates an immutable instance of {@link Signature}.
*/
public static Signature of(List<Argument> arguments) {
return
new Signature(arguments);
} | 3.26 |
flink_AggregatorRegistry_registerAggregator_rdh | // --------------------------------------------------------------------------------------------
public void registerAggregator(String name, Aggregator<?> aggregator) {
if ((name == null) || (aggregator == null)) {
throw new
IllegalArgumentException("Name and aggregator must not be null");}
if (this.registry.containsKey(name)) {
throw new RuntimeException("An aggregator is already registered under the given name.");
}
this.registry.put(name, aggregator);
} | 3.26 |
flink_PackagedProgram_deleteExtractedLibraries_rdh | /**
* Deletes all temporary files created for contained packaged libraries.
*/
private void deleteExtractedLibraries() {
deleteExtractedLibraries(this.extractedTempLibraries);
this.extractedTempLibraries.clear();
} | 3.26 |
flink_PackagedProgram_getJobJarAndDependencies_rdh | /**
* Returns all provided libraries needed to run the program.
*/
public static List<URL> getJobJarAndDependencies(File jarFile, @Nullable
String entryPointClassName) throws ProgramInvocationException {
URL jarFileUrl = loadJarFile(jarFile);
List<File> extractedTempLibraries = (jarFileUrl == null) ? Collections.emptyList() : extractContainedLibraries(jarFileUrl);
List<URL> libs = new ArrayList<URL>(extractedTempLibraries.size() + 1);
if (jarFileUrl != null) {
libs.add(jarFileUrl);
}
for (File tmpLib : extractedTempLibraries) {
try {
libs.add(tmpLib.getAbsoluteFile().toURI().toURL());
}
catch (MalformedURLException e) {
throw new RuntimeException("URL is invalid. This should not happen.", e);
}
}
if (isPython(entryPointClassName)) {
libs.add(PackagedProgramUtils.getPythonJar());
}
return libs;
} | 3.26 |
flink_PackagedProgram_invokeInteractiveModeForExecution_rdh | /**
* This method assumes that the context environment is prepared, or the execution will be a
* local execution by default.
*/
public void invokeInteractiveModeForExecution() throws ProgramInvocationException {
FlinkSecurityManager.monitorUserSystemExitForCurrentThread();
try {
callMainMethod(mainClass, args);
} finally {
FlinkSecurityManager.unmonitorUserSystemExitForCurrentThread();
}
} | 3.26 |
flink_PackagedProgram_extractContainedLibraries_rdh | /**
* Takes all JAR files that are contained in this program's JAR file and extracts them to the
* system's temp directory.
*
* @return The file names of the extracted temporary files.
* @throws ProgramInvocationException
* Thrown, if the extraction process failed.
*/
public static List<File> extractContainedLibraries(URL jarFile) throws ProgramInvocationException {
try (final JarFile jar = new JarFile(new File(jarFile.toURI()))) {
final List<JarEntry> containedJarFileEntries = getContainedJarEntries(jar); if (containedJarFileEntries.isEmpty()) {
return Collections.emptyList();}
final List<File> extractedTempLibraries = new ArrayList<>(containedJarFileEntries.size());
boolean v19 = true;
try {
final Random rnd = new Random();
final byte[] v21 = new byte[4096];
for (final JarEntry entry : containedJarFileEntries) {
// '/' as in case of zip, jar
// java.util.zip.ZipEntry#isDirectory always looks only for '/' not for
// File.separator
final String name = entry.getName().replace('/', '_');
final File tempFile = copyLibToTempFile(name, rnd, jar, entry, v21);
extractedTempLibraries.add(tempFile);
}
v19 = false;
} finally {
if (v19) {
deleteExtractedLibraries(extractedTempLibraries);
}
}
return extractedTempLibraries;
} catch (Throwable t) {
throw new ProgramInvocationException("Unknown I/O error while extracting contained jar files.", t);
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_peek_rdh | /**
* Get the first element from the queue without removing it.
*
* @return the first element in the queue, or Null if the queue is empty.
*/
public T peek() {
lock.lock();
try {
return queue.peek();
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_getAvailableFuture_rdh | // ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
@SuppressWarnings("unchecked")
private static CompletableFuture<Void> getAvailableFuture() {// this is a way to obtain the AvailabilityProvider.AVAILABLE future until we decide to
// move the class from the runtime module to the core module
try {
final Class<?> clazz = Class.forName("org.apache.flink.runtime.io.AvailabilityProvider");final Field field = clazz.getDeclaredField("AVAILABLE");return ((CompletableFuture<Void>) (field.get(null)));
} catch (Throwable t) {return CompletableFuture.completedFuture(null);
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_put_rdh | // ------------------------------------------------------------------------
// Blocking Queue Logic
// ------------------------------------------------------------------------
/**
* Put an element into the queue. The thread blocks if the queue is full.
*
* @param threadIndex
* the index of the thread.
* @param element
* the element to put.
* @return true if the element has been successfully put into the queue, false otherwise.
* @throws InterruptedException
* when the thread is interrupted.
*/
public boolean put(int
threadIndex, T element) throws InterruptedException {
if (element == null) {
throw new NullPointerException();
}
lock.lockInterruptibly();
try {
while (queue.size() >= capacity) {if (getAndResetWakeUpFlag(threadIndex)) {
return false;
}
waitOnPut(threadIndex);
}
enqueue(element);
return true;
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_wakeUpPuttingThread_rdh | /**
* Gracefully wakes up the thread with the given {@code threadIndex} if it is blocked in adding
* an element. to the queue. If the thread is blocked in {@link #put(int, Object)} it will
* immediately return from the method with a return value of false.
*
* <p>If this method is called, the next time the thread with the given index is about to be
* blocked in adding an element, it may immediately wake up and return.
*
* @param threadIndex
* The number identifying the thread.
*/
public void wakeUpPuttingThread(int threadIndex) {
lock.lock();
try {
maybeCreateCondition(threadIndex);
ConditionAndFlag caf = putConditionAndFlags[threadIndex];
if (caf != null) {
caf.setWakeUp(true);
caf.condition().signal();
}
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_size_rdh | /**
* Gets the size of the queue.
*/
public int size() {
lock.lock();
try {
return queue.size();
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_remainingCapacity_rdh | /**
* Checks the remaining capacity in the queue. That is the difference between the maximum
* capacity and the current number of elements in the queue.
*/
public int
remainingCapacity() {
lock.lock();
try {
return capacity - queue.size();
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_getAvailabilityFuture_rdh | // ------------------------------------------------------------------------
// Future / Notification logic
// ------------------------------------------------------------------------
/**
* Returns the availability future. If the queue is non-empty, then this future will already be
* complete. Otherwise the obtained future is guaranteed to get completed the next time the
* queue becomes non-empty, or a notification happens via {@link #notifyAvailable()}.
*
* <p>It is important that a completed future is no guarantee that the next call to {@link #poll()} will return a non-null element. If there are concurrent consumer, another consumer
* may have taken the available element. Or there was no element in the first place, because the
* future was completed through a call to {@link #notifyAvailable()}.
*
* <p>For that reason, it is important to call this method (to obtain a new future) every time
* again after {@link #poll()} returned null and you want to wait for data.
*/
public CompletableFuture<Void> getAvailabilityFuture()
{
return currentFuture;
} | 3.26 |
flink_FutureCompletingBlockingQueue_m1_rdh | /**
* Checks whether the queue is empty.
*/
public boolean m1() {
lock.lock();
try {
return queue.isEmpty();
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_poll_rdh | /**
* Get and remove the first element from the queue. Null is returned if the queue is empty. If
* this makes the queue empty (takes the last element) or finds the queue already empty, then
* this resets the availability notifications. The next call to {@link #getAvailabilityFuture()}
* will then return a non-complete future that completes only the next time that the queue
* becomes non-empty or the {@link #notifyAvailable()} method is called.
*
* @return the first element from the queue, or Null if the queue is empty.
*/
public T poll() {
lock.lock();
try {
if (queue.size() == 0) {
m0();
return null;
}
return dequeue();
} finally {
lock.unlock();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_moveToAvailable_rdh | /**
* Internal utility to make sure that the current future futures are complete (until reset).
*/
@GuardedBy("lock")
private void moveToAvailable() {
final CompletableFuture<Void> current = currentFuture;
if (current != AVAILABLE) {
currentFuture = AVAILABLE;
current.complete(null);
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_enqueue_rdh | // --------------- private helpers -------------------------
@GuardedBy("lock")
private void enqueue(T element) {
final int sizeBefore = queue.size();
queue.add(element);
if (sizeBefore == 0) {
moveToAvailable();
}
if ((sizeBefore < (capacity - 1)) && (!notFull.isEmpty())) {
signalNextPutter();
}
} | 3.26 |
flink_FutureCompletingBlockingQueue_take_rdh | /**
* <b>Warning:</b> This is a dangerous method and should only be used for testing convenience. A
* method that blocks until availability does not go together well with the concept of
* asynchronous notifications and non-blocking polling.
*
* <p>Get and remove the first element from the queue. The call blocks if the queue is empty.
* The problem with this method is that it may loop internally until an element is available and
* that way eagerly reset the availability future. If a consumer thread is blocked in taking an
* element, it will receive availability notifications from {@link #notifyAvailable()} and
* immediately reset them by calling {@link #poll()} and finding the queue empty.
*
* @return the first element in the queue.
* @throws InterruptedException
* when the thread is interrupted.
*/
@VisibleForTesting
public T take() throws InterruptedException {
T next;
while ((next = poll()) ==
null) {
// use the future to wait for availability to avoid busy waiting
try {
getAvailabilityFuture().get();
} catch (ExecutionException | CompletionException e) {
// this should never happen, but we propagate just in case
throw new FlinkRuntimeException("exception in queue future completion", e);
}
}
return next;
} | 3.26 |
flink_FactoryUtil_createCatalogStoreFactoryHelper_rdh | /**
* Creates a utility that helps validating options for a {@link CatalogStoreFactory}.
*
* <p>Note: This utility checks for left-over options in the final step.
*/public static CatalogStoreFactoryHelper createCatalogStoreFactoryHelper(CatalogStoreFactory factory, CatalogStoreFactory.Context context) {
return new CatalogStoreFactoryHelper(factory, context);
} | 3.26 |
flink_FactoryUtil_createModuleFactoryHelper_rdh | /**
* Creates a utility that helps validating options for a {@link ModuleFactory}.
*
* <p>Note: This utility checks for left-over options in the final step.
*/
public static ModuleFactoryHelper createModuleFactoryHelper(ModuleFactory factory, ModuleFactory.Context context) {
return
new ModuleFactoryHelper(factory, context);
} | 3.26 |
flink_FactoryUtil_validateFactoryOptions_rdh | /**
* Validates the required and optional {@link ConfigOption}s of a factory.
*
* <p>Note: It does not check for left-over options.
*/
public static void validateFactoryOptions(Factory factory, ReadableConfig options) {
validateFactoryOptions(factory.requiredOptions(), factory.optionalOptions(),
options);
} | 3.26 |
flink_FactoryUtil_discoverEncodingFormat_rdh | /**
* Discovers a {@link EncodingFormat} of the given type using the given option as factory
* identifier.
*/
public <I, F extends EncodingFormatFactory<I>> EncodingFormat<I> discoverEncodingFormat(Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalEncodingFormat(formatFactoryClass, formatOption).orElseThrow(() -> new ValidationException(String.format("Could not find required sink format '%s'.", formatOption.key())));
} | 3.26 |
flink_FactoryUtil_discoverOptionalEncodingFormat_rdh | /**
* Discovers a {@link EncodingFormat} of the given type using the given option (if present)
* as factory identifier.
*/
public <I, F extends EncodingFormatFactory<I>> Optional<EncodingFormat<I>> discoverOptionalEncodingFormat(Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalFormatFactory(formatFactoryClass, formatOption).map(formatFactory -> {
String formatPrefix = formatPrefix(formatFactory, formatOption); try {
return formatFactory.createEncodingFormat(context, createFormatOptions(formatPrefix, formatFactory));
} catch (Throwable t) {
throw new <t>ValidationException(String.format("Error creating sink format '%s' in option space '%s'.", formatFactory.factoryIdentifier(), formatPrefix));
}
});
} | 3.26 |
flink_FactoryUtil_forwardOptions_rdh | // ----------------------------------------------------------------------------------------
/**
* Forwards the options declared in {@link DynamicTableFactory#forwardOptions()} and
* possibly {@link FormatFactory#forwardOptions()} from {@link DynamicTableFactory.Context#getEnrichmentOptions()} to the final options, if present.
*/@SuppressWarnings({ "unchecked" })
private void forwardOptions() {
for (ConfigOption<?> option : factory.forwardOptions()) {
enrichingOptions.getOptional(option).ifPresent(o -> allOptions.set(((ConfigOption<? extends Object>) (option)), o));
}
} | 3.26 |
flink_FactoryUtil_discoverFactory_rdh | /**
* Discovers a factory using the given factory base class and identifier.
*
* <p>This method is meant for cases where {@link #createTableFactoryHelper(DynamicTableFactory,
* DynamicTableFactory.Context)} {@link #createTableSource(Catalog, ObjectIdentifier,
* ResolvedCatalogTable, ReadableConfig, ClassLoader, boolean)}, and {@link #createTableSink(Catalog, ObjectIdentifier, ResolvedCatalogTable, ReadableConfig,
* ClassLoader, boolean)} are not applicable.
*/
@SuppressWarnings("unchecked")
public static <T extends Factory> T discoverFactory(ClassLoader classLoader, Class<T> factoryClass, String factoryIdentifier) {
final List<Factory> v17 = discoverFactories(classLoader);
final List<Factory> foundFactories = v17.stream().filter(f -> factoryClass.isAssignableFrom(f.getClass())).collect(Collectors.toList());
if (foundFactories.isEmpty()) {throw new ValidationException(String.format("Could not find any factories that implement '%s' in the classpath.", factoryClass.getName()));
}
final List<Factory> matchingFactories = foundFactories.stream().filter(f -> f.factoryIdentifier().equals(factoryIdentifier)).collect(Collectors.toList());
if (matchingFactories.isEmpty()) {
throw new ValidationException(String.format(("Could not find any factory for identifier '%s' that implements '%s' in the classpath.\n\n" + "Available factory identifiers are:\n\n") + "%s", factoryIdentifier, factoryClass.getName(), foundFactories.stream().map(Factory::factoryIdentifier).filter(identifier -> !DEFAULT_IDENTIFIER.equals(identifier)).distinct().sorted().collect(Collectors.joining("\n"))));}
if (matchingFactories.size()
> 1) {
throw new ValidationException(String.format(("Multiple factories for identifier '%s' that implement '%s' found in the classpath.\n\n" + "Ambiguous factory classes are:\n\n") + "%s", factoryIdentifier, factoryClass.getName(), matchingFactories.stream().map(f -> f.getClass().getName()).sorted().collect(Collectors.joining("\n"))));
}
return ((T) (matchingFactories.get(0)));
} | 3.26 |
flink_FactoryUtil_m1_rdh | /**
* Validates the options of the factory. It checks for unconsumed option keys.
*/public void m1() {
validateFactoryOptions(factory, allOptions);
validateUnconsumedKeys(factory.factoryIdentifier(), allOptions.keySet(), consumedOptionKeys, deprecatedOptionKeys);
validateWatermarkOptions(factory.factoryIdentifier(), allOptions);
} | 3.26 |
flink_FactoryUtil_createDynamicTableSink_rdh | /**
* Creates a {@link DynamicTableSink} from a {@link CatalogTable}.
*
* <p>If {@param preferredFactory} is passed, the table sink is created from that factory.
* Otherwise, an attempt is made to discover a matching factory using Java SPI (see {@link Factory} for details).
*/
public static DynamicTableSink createDynamicTableSink(@Nullable
DynamicTableSinkFactory preferredFactory, ObjectIdentifier objectIdentifier, ResolvedCatalogTable catalogTable, Map<String, String> enrichmentOptions, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) {
final DefaultDynamicTableContext context = new DefaultDynamicTableContext(objectIdentifier, catalogTable, enrichmentOptions, configuration, classLoader, isTemporary);
try {final DynamicTableSinkFactory factory = (preferredFactory != null) ? preferredFactory
: discoverTableFactory(DynamicTableSinkFactory.class, context);
return factory.createDynamicTableSink(context);
} catch (Throwable t) {
throw new ValidationException(String.format(("Unable to create a sink for writing table '%s'.\n\n" + "Table options are:\n\n") + "%s", objectIdentifier.asSummaryString(), catalogTable.getOptions().entrySet().stream().map(e -> stringifyOption(e.getKey(), e.getValue())).sorted().collect(Collectors.joining("\n"))), t);
}
}
/**
*
* @deprecated Use {@link #createDynamicTableSink(DynamicTableSinkFactory, ObjectIdentifier,
ResolvedCatalogTable, Map, ReadableConfig, ClassLoader, boolean)} | 3.26 |
flink_FactoryUtil_checkFormatIdentifierMatchesWithEnrichingOptions_rdh | /**
* This function assumes that the format config is used only and only if the original
* configuration contains the format config option. It will fail if there is a mismatch of
* the identifier between the format in the plan table map and the one in enriching table
* map.
*/
private void checkFormatIdentifierMatchesWithEnrichingOptions(ConfigOption<String> formatOption, String identifierFromPlan) {
Optional<String> identifierFromEnrichingOptions = enrichingOptions.getOptional(formatOption);
if (!identifierFromEnrichingOptions.isPresent()) {
return;
}
if (identifierFromPlan == null) {
throw new ValidationException(String.format(("The persisted plan has no format option '%s' specified, while the catalog table has it with value '%s'. " + "This is invalid, as either only the persisted plan table defines the format, ") + "or both the persisted plan table and the catalog table defines the same format.", formatOption, identifierFromEnrichingOptions.get()));
}
if (!Objects.equals(identifierFromPlan, identifierFromEnrichingOptions.get())) {
throw new ValidationException(String.format("Both persisted plan table and catalog table define the format option '%s', " + "but they mismatch: '%s' != '%s'.",
formatOption, identifierFromPlan, identifierFromEnrichingOptions.get()));
}
} | 3.26 |
flink_FactoryUtil_createTableFactoryHelper_rdh | /**
* Creates a utility that helps in discovering formats, merging options with {@link DynamicTableFactory.Context#getEnrichmentOptions()} and validating them all for a {@link DynamicTableFactory}.
*
* <p>The following example sketches the usage:
*
* <pre>{@code // in createDynamicTableSource()
* helper = FactoryUtil.createTableFactoryHelper(this, context);
*
* keyFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, KEY_FORMAT);
* valueFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, VALUE_FORMAT);
*
* helper.validate();
*
* ... // construct connector with discovered formats}</pre>
*
* <p>Note: The format option parameter of {@link TableFactoryHelper#discoverEncodingFormat(Class, ConfigOption)} and {@link TableFactoryHelper#discoverDecodingFormat(Class, ConfigOption)} must be {@link #FORMAT} or
* end with {@link #FORMAT_SUFFIX}. The discovery logic will replace 'format' with the factory
* identifier value as the format prefix. For example, assuming the identifier is 'json', if the
* format option key is 'format', then the format prefix is 'json.'. If the format option key is
* 'value.format', then the format prefix is 'value.json'. The format prefix is used to project
* the options for the format factory.
*
* <p>Note: When created, this utility merges the options from {@link DynamicTableFactory.Context#getEnrichmentOptions()} using {@link DynamicTableFactory#forwardOptions()}. When invoking {@link TableFactoryHelper#validate()},
* this utility checks for left-over options in the final step.
*/
public static TableFactoryHelper createTableFactoryHelper(DynamicTableFactory factory, DynamicTableFactory.Context context) {
return new TableFactoryHelper(factory, context);
} | 3.26 |
flink_FactoryUtil_discoverOptionalDecodingFormat_rdh | /**
* Discovers a {@link DecodingFormat} of the given type using the given option (if present)
* as factory identifier.
*/
public <I, F extends DecodingFormatFactory<I>> Optional<DecodingFormat<I>> discoverOptionalDecodingFormat(Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalFormatFactory(formatFactoryClass, formatOption).map(formatFactory -> {
String formatPrefix = formatPrefix(formatFactory, formatOption);
try {
return formatFactory.createDecodingFormat(context, createFormatOptions(formatPrefix, formatFactory));
} catch (Throwable t) {
throw new <t>ValidationException(String.format("Error creating scan format '%s' in option space '%s'.", formatFactory.factoryIdentifier(), formatPrefix));
}
});
} | 3.26 |
flink_FactoryUtil_validateWatermarkOptions_rdh | // --------------------------------------------------------------------------------------------
/**
* Validate watermark options from table options.
*
* @param factoryIdentifier
* identifier of table
* @param conf
* table options
*/
public static void validateWatermarkOptions(String factoryIdentifier, ReadableConfig conf) {
Optional<String> errMsgOptional = checkWatermarkOptions(conf);
if (errMsgOptional.isPresent()) {
throw new ValidationException(String.format("Error configuring watermark for '%s', %s", factoryIdentifier, errMsgOptional.get()));
}
} | 3.26 |
flink_FactoryUtil_createCatalog_rdh | /**
* Attempts to discover an appropriate catalog factory and creates an instance of the catalog.
*
* <p>This first uses the legacy {@link TableFactory} stack to discover a matching {@link CatalogFactory}. If none is found, it falls back to the new stack using {@link Factory}
* instead.
*/ public static Catalog createCatalog(String catalogName, Map<String, String> options, ReadableConfig configuration, ClassLoader classLoader) {
// Use the legacy mechanism first for compatibility
try {
final CatalogFactory legacyFactory = TableFactoryService.find(CatalogFactory.class, options, classLoader);
return legacyFactory.createCatalog(catalogName, options);
} catch (NoMatchingTableFactoryException e) {
// No matching legacy factory found, try using the new stack
final DefaultCatalogContext discoveryContext = new DefaultCatalogContext(catalogName, options, configuration, classLoader);
try {
final CatalogFactory factory = getCatalogFactory(discoveryContext);
// The type option is only used for discovery, we don't actually want to forward it
// to the catalog factory itself.
final Map<String, String> factoryOptions = options.entrySet().stream().filter(entry -> !CommonCatalogOptions.CATALOG_TYPE.key().equals(entry.getKey())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
final DefaultCatalogContext context = new DefaultCatalogContext(catalogName, factoryOptions, configuration, classLoader);
return factory.createCatalog(context);
} catch (Throwable t) {
throw new ValidationException(String.format("Unable to create catalog '%s'.%n%nCatalog options are:%n%s", catalogName, options.entrySet().stream().map(optionEntry -> stringifyOption(optionEntry.getKey(), optionEntry.getValue())).sorted().collect(Collectors.joining("\n"))), t);
}
}
} | 3.26 |
flink_FactoryUtil_createModule_rdh | /**
* Discovers a matching module factory and creates an instance of it.
*
* <p>This first uses the legacy {@link TableFactory} stack to discover a matching {@link ModuleFactory}. If none is found, it falls back to the new stack using {@link Factory}
* instead.
*/
public static Module createModule(String moduleName, Map<String, String> options, ReadableConfig configuration, ClassLoader classLoader) {
if (options.containsKey(MODULE_TYPE.key())) {
throw new ValidationException(String.format("Option '%s' = '%s' is not supported since module name " + "is used to find module", MODULE_TYPE.key(), options.get(MODULE_TYPE.key())));
}
try {
final Map<String, String> optionsWithType = new HashMap<>(options);
optionsWithType.put(MODULE_TYPE.key(), moduleName);
final ModuleFactory legacyFactory =
TableFactoryService.find(ModuleFactory.class, optionsWithType,
classLoader);
return legacyFactory.createModule(optionsWithType);
} catch (NoMatchingTableFactoryException e) {
final DefaultModuleContext discoveryContext = new DefaultModuleContext(options, configuration, classLoader);
try {
final ModuleFactory factory = discoverFactory(((ModuleFactory.Context) (discoveryContext)).getClassLoader(), ModuleFactory.class, moduleName);
final DefaultModuleContext context = new DefaultModuleContext(options, configuration, classLoader);
return factory.createModule(context);
} catch (Throwable t) {
throw new ValidationException(String.format("Unable to create module '%s'.%n%nModule options are:%n%s", moduleName, options.entrySet().stream().map(optionEntry -> stringifyOption(optionEntry.getKey(), optionEntry.getValue())).sorted().collect(Collectors.joining("\n"))), t);
}
}
} | 3.26 |
flink_FactoryUtil_validateUnconsumedKeys_rdh | /**
* Validates unconsumed option keys.
*/
public static void validateUnconsumedKeys(String factoryIdentifier, Set<String> allOptionKeys, Set<String> consumedOptionKeys) {
validateUnconsumedKeys(factoryIdentifier, allOptionKeys, consumedOptionKeys, Collections.emptySet());
} | 3.26 |
flink_FactoryUtil_validateExcept_rdh | /**
* Validates the options of the factory. It checks for unconsumed option keys while ignoring
* the options with given prefixes.
*
* <p>The option keys that have given prefix {@code prefixToSkip} would just be skipped for
* validation.
*
* @param prefixesToSkip
* Set of option key prefixes to skip validation
*/
public void validateExcept(String... prefixesToSkip) {
Preconditions.checkArgument(prefixesToSkip.length > 0, "Prefixes to skip can not be empty.");
final List<String> prefixesList = Arrays.asList(prefixesToSkip);
consumedOptionKeys.addAll(allOptions.keySet().stream().filter(key -> prefixesList.stream().anyMatch(key::startsWith)).collect(Collectors.toSet()));
m1();
} | 3.26 |
flink_FactoryUtil_getOptions_rdh | /**
* Returns all options currently being consumed by the factory. This method returns the
* options already merged with {@link DynamicTableFactory.Context#getEnrichmentOptions()},
* using {@link DynamicTableFactory#forwardOptions()} as reference of mergeable options.
*/
@Override
public ReadableConfig getOptions() {
return super.getOptions();
} | 3.26 |
flink_FactoryUtil_getFormatPrefix_rdh | /**
* Returns the required option prefix for options of the given format.
*/
public static String getFormatPrefix(ConfigOption<String> formatOption, String formatIdentifier) {
final String formatOptionKey = formatOption.key();
if (formatOptionKey.equals(FORMAT.key())) {
return formatIdentifier + ".";
} else
if (formatOptionKey.endsWith(FORMAT_SUFFIX)) {
// extract the key prefix, e.g. extract 'key' from 'key.format'
String keyPrefix = formatOptionKey.substring(0, formatOptionKey.length() - FORMAT_SUFFIX.length());
return ((keyPrefix + ".") + formatIdentifier) + ".";
} else {
throw new ValidationException((("Format identifier key should be 'format' or suffix with '.format', " + "don't support format identifier key '") + formatOptionKey) + "'.");
}
} | 3.26 |
flink_FactoryUtil_discoverDecodingFormat_rdh | /**
* Discovers a {@link DecodingFormat} of the given type using the given option as factory
* identifier.
*/
public <I, F extends DecodingFormatFactory<I>> DecodingFormat<I> discoverDecodingFormat(Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalDecodingFormat(formatFactoryClass, formatOption).orElseThrow(() -> new ValidationException(String.format("Could not find required scan format '%s'.", formatOption.key())));
} | 3.26 |
flink_FactoryUtil_getDynamicTableFactory_rdh | // --------------------------------------------------------------------------------------------
// Helper methods
// --------------------------------------------------------------------------------------------
private static <T extends DynamicTableFactory> T getDynamicTableFactory(Class<T> factoryClass, @Nullable
Catalog catalog, DynamicTableFactory.Context context) {
return getDynamicTableFactory(factoryClass, catalog).orElseGet(() -> discoverTableFactory(factoryClass, context));
} | 3.26 |
flink_FactoryUtil_createDynamicTableSource_rdh | /**
* Creates a {@link DynamicTableSource} from a {@link CatalogTable}.
*
* <p>If {@param preferredFactory} is passed, the table source is created from that factory.
* Otherwise, an attempt is made to discover a matching factory using Java SPI (see {@link Factory} for details).
*/public static DynamicTableSource createDynamicTableSource(@Nullable
DynamicTableSourceFactory preferredFactory, ObjectIdentifier objectIdentifier, ResolvedCatalogTable catalogTable, Map<String, String> enrichmentOptions, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) {
final DefaultDynamicTableContext context = new DefaultDynamicTableContext(objectIdentifier, catalogTable, enrichmentOptions, configuration, classLoader, isTemporary);
try {
final DynamicTableSourceFactory factory = (preferredFactory != null) ? preferredFactory : discoverTableFactory(DynamicTableSourceFactory.class, context);
return factory.createDynamicTableSource(context);
} catch (Throwable t) {
throw new ValidationException(String.format(("Unable to create a source for reading table '%s'.\n\n" + "Table options are:\n\n") + "%s", objectIdentifier.asSummaryString(), catalogTable.getOptions().entrySet().stream().map(e -> stringifyOption(e.getKey(), e.getValue())).sorted().collect(Collectors.joining("\n"))), t);
}}
/**
*
* @deprecated Use {@link #createDynamicTableSource(DynamicTableSourceFactory, ObjectIdentifier,
ResolvedCatalogTable, Map, ReadableConfig, ClassLoader, boolean)} | 3.26 |
flink_FactoryUtil_checkWatermarkOptions_rdh | /**
* Check watermark-related options and return error messages.
*
* @param conf
* table options
* @return Optional of error messages
*/
public static Optional<String> checkWatermarkOptions(ReadableConfig conf) {
// try to validate watermark options by parsing it
f1.forEach(option -> readOption(conf, option));
// check watermark alignment options
Optional<String> groupOptional = conf.getOptional(WATERMARK_ALIGNMENT_GROUP);
Optional<Duration> maxDriftOptional = conf.getOptional(WATERMARK_ALIGNMENT_MAX_DRIFT);
Optional<Duration> updateIntervalOptional = conf.getOptional(WATERMARK_ALIGNMENT_UPDATE_INTERVAL);
if (((groupOptional.isPresent() || maxDriftOptional.isPresent()) || updateIntervalOptional.isPresent()) && ((!groupOptional.isPresent()) || (!maxDriftOptional.isPresent()))) {
String errMsg
= String.format("'%s' and '%s' must be set when configuring watermark alignment", WATERMARK_ALIGNMENT_GROUP.key(), WATERMARK_ALIGNMENT_MAX_DRIFT.key());
return Optional.of(errMsg);
}
return Optional.empty();
} | 3.26 |
flink_FactoryUtil_createCatalogFactoryHelper_rdh | /**
* Creates a utility that helps validating options for a {@link CatalogFactory}.
*
* <p>Note: This utility checks for left-over options in the final step.
*/
public static CatalogFactoryHelper createCatalogFactoryHelper(CatalogFactory factory, CatalogFactory.Context context) {
return new CatalogFactoryHelper(factory, context);
} | 3.26 |
flink_SharingPhysicalSlotRequestBulk_clearPendingRequests_rdh | /**
* Clear the pending requests.
*
* <p>The method can be used to make the bulk fulfilled and stop the fulfillability check in
* {@link PhysicalSlotRequestBulkChecker}.
*/
void clearPendingRequests() {
pendingRequests.clear();
} | 3.26 |
flink_SharingPhysicalSlotRequestBulk_markFulfilled_rdh | /**
* Moves a pending request to fulfilled.
*
* @param group
* {@link ExecutionSlotSharingGroup} of the pending request
* @param allocationId
* {@link AllocationID} of the fulfilled request
*/
void markFulfilled(ExecutionSlotSharingGroup group, AllocationID allocationId) {
pendingRequests.remove(group);
fulfilledRequests.put(group, allocationId);
} | 3.26 |
flink_TimeEvictor_hasTimestamp_rdh | /**
* Returns true if the first element in the Iterable of {@link TimestampedValue} has a
* timestamp.
*/
private boolean hasTimestamp(Iterable<TimestampedValue<Object>> elements) {
Iterator<TimestampedValue<Object>> it = elements.iterator();if (it.hasNext()) {
return it.next().hasTimestamp();}
return false;} | 3.26 |
flink_TimeEvictor_m0_rdh | /**
*
* @param elements
* The elements currently in the pane.
* @return The maximum value of timestamp among the elements.
*/
private long m0(Iterable<TimestampedValue<Object>> elements) {
long
currentTime = Long.MIN_VALUE;
for (Iterator<TimestampedValue<Object>> iterator = elements.iterator(); iterator.hasNext();) {
TimestampedValue<Object> record = iterator.next();
currentTime = Math.max(currentTime, record.getTimestamp());
}
return currentTime;
} | 3.26 |
flink_TimeEvictor_of_rdh | /**
* Creates a {@code TimeEvictor} that keeps the given number of elements. Eviction is done
* before/after the window function based on the value of doEvictAfter.
*
* @param windowSize
* The amount of time for which to keep elements.
* @param doEvictAfter
* Whether eviction is done after window function.
*/
public static <W extends Window> TimeEvictor<W> of(Time windowSize, boolean doEvictAfter) {
return new TimeEvictor<>(windowSize.toMilliseconds(), doEvictAfter);
} | 3.26 |
flink_TaskManagerLocation_getNodeId_rdh | /**
* Return the ID of node where the task manager is located on.
*
* @return The ID of node where the task manager is located on.
*/
public String getNodeId() {
return nodeId;
} | 3.26 |
flink_TaskManagerLocation_getFQDNHostname_rdh | /**
* Returns the fully-qualified domain name of the TaskManager provided by {@link #hostNameSupplier}.
*
* @return The fully-qualified domain name of the TaskManager.
*/
public String getFQDNHostname() {
return hostNameSupplier.getFqdnHostName();
} | 3.26 |
flink_TaskManagerLocation_getHostName_rdh | /**
* Returns the textual representation of the TaskManager's IP address as host name.
*
* @return The textual representation of the TaskManager's IP address.
*/
@Override
public String getHostName() {
return inetAddress.getHostAddress();
} | 3.26 |
flink_TaskManagerLocation_getResourceID_rdh | // ------------------------------------------------------------------------
// Getters
// ------------------------------------------------------------------------
/**
* Gets the ID of the resource in which the TaskManager is started. The format of this depends
* on how the TaskManager is started:
*
* <ul>
* <li>If the TaskManager is started via YARN, this is the YARN container ID.
* <li>If the TaskManager is started in standalone mode, or via a MiniCluster, this is a
* random ID.
* <li>Other deployment modes can set the resource ID in other ways.
* </ul>
*
* @return The ID of the resource in which the TaskManager is started
*/
public ResourceID getResourceID() {
return resourceID;
} | 3.26 |
flink_TaskManagerLocation_getFqdnHostName_rdh | /**
* Returns the textual representation of the TaskManager's IP address as FQDN host name.
*
* @return The textual representation of the TaskManager's IP address.
*/
@Overridepublic String getFqdnHostName() {
return inetAddress.getHostAddress();
} | 3.26 |
flink_IntValueComparator_m0_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean m0() {
return false;
} | 3.26 |
flink_NonClosingCheckpointOutputStream_acquireLease_rdh | /**
* Returns a {@link org.apache.flink.util.ResourceGuard.Lease} that prevents closing this
* stream. To allow the system to close this stream, each of the acquired leases need to call
* {@link Lease#close()}, on their acquired leases.
*/
public final Lease acquireLease() throws IOException {
return resourceGuard.acquireResource();} | 3.26 |
flink_NonClosingCheckpointOutputStream_getDelegate_rdh | /**
* This method should not be public so as to not expose internals to user code.
*/
CheckpointStateOutputStream getDelegate() {
return delegate;
} | 3.26 |
flink_ParquetColumnarRowInputFormat_createPartitionedFormat_rdh | /**
* Create a partitioned {@link ParquetColumnarRowInputFormat}, the partition columns can be
* generated by {@link Path}.
*/
public static <SplitT extends FileSourceSplit> ParquetColumnarRowInputFormat<SplitT> createPartitionedFormat(Configuration hadoopConfig, RowType producedRowType, TypeInformation<RowData> producedTypeInfo, List<String> partitionKeys, PartitionFieldExtractor<SplitT> extractor, int batchSize, boolean isUtcTimestamp, boolean isCaseSensitive) {
// TODO FLINK-25113 all this partition keys code should be pruned from the parquet format,
// because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition
// keys.
RowType v0
= new RowType(producedRowType.getFields().stream().filter(field -> !partitionKeys.contains(field.getName())).collect(Collectors.toList()));
List<String> projectedNames = v0.getFieldNames();
ColumnBatchFactory<SplitT> factory = (SplitT split,ColumnVector[] parquetVectors) -> {
// create and initialize the row batch
ColumnVector[] v3 = new ColumnVector[producedRowType.getFieldCount()];
for (int i = 0; i < vectors.length; i++) {
RowType.RowField field = producedRowType.getFields().get(i);
v3[i] = (partitionKeys.contains(field.getName())) ? createVectorFromConstant(field.getType(), extractor.extract(split, field.getName(), field.getType()), batchSize) : parquetVectors[projectedNames.indexOf(field.getName())];
}
return new VectorizedColumnBatch(v3);
};
return new ParquetColumnarRowInputFormat<>(hadoopConfig, v0, producedTypeInfo, factory, batchSize, isUtcTimestamp,
isCaseSensitive);
} | 3.26 |
flink_FsCheckpointStreamFactory_flush_rdh | /**
* Flush buffers to file if their size is above {@link #localStateThreshold}.
*/
@Override
public void flush() throws IOException {
if ((outStream != null) || (pos > localStateThreshold)) {
flushToFile();
}
} | 3.26 |
flink_FsCheckpointStreamFactory_isClosed_rdh | /**
* Checks whether the stream is closed.
*
* @return True if the stream was closed, false if it is still open.
*/
public boolean
isClosed() {
return closed;
} | 3.26 |
flink_FsCheckpointStreamFactory_toString_rdh | // ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "File Stream Factory @ " + checkpointDirectory;
} | 3.26 |
flink_FsCheckpointStreamFactory_createCheckpointStateOutputStream_rdh | // ------------------------------------------------------------------------
@Override
public FsCheckpointStateOutputStream createCheckpointStateOutputStream(CheckpointedStateScope scope) throws IOException {
Path target = getTargetPath(scope);
int bufferSize = Math.max(writeBufferSize, fileStateThreshold);
// Whether the file system dynamically injects entropy into the file paths.
final boolean entropyInjecting = EntropyInjector.isEntropyInjecting(filesystem, target);
final boolean absolutePath = entropyInjecting || (scope == CheckpointedStateScope.SHARED);
return new FsCheckpointStateOutputStream(target, filesystem, bufferSize, fileStateThreshold, !absolutePath);
} | 3.26 |
flink_FsCheckpointStreamFactory_close_rdh | /**
* If the stream is only closed, we remove the produced file (cleanup through the auto close
* feature, for example). This method throws no exception if the deletion fails, but only
* logs the error.
*/
@Override
public void close() {
if (!closed) {closed = true;
// make sure write requests need to go to 'flushToFile()' where they recognized
// that the stream is closed
pos = f1.length;
if (outStream != null) {
try {
outStream.close();
} catch (Throwable throwable) {
LOG.warn("Could not close the state stream for {}.", statePath, throwable);
} finally {
try {
fs.delete(statePath, false);
} catch (Exception e) {
LOG.warn("Cannot delete closed and discarded state stream for {}.", statePath, e);
}}
}
}
} | 3.26 |
flink_CatalogTable_m0_rdh | /**
* Serializes this instance into a map of string-based properties.
*
* <p>Compared to the pure table options in {@link #getOptions()}, the map includes schema,
* partitioning, and other characteristics in a serialized form.
*
* @deprecated Only a {@link ResolvedCatalogTable} is serializable to properties.
*/
@Deprecated
default Map<String, String> m0() {
return Collections.emptyMap();
} | 3.26 |
flink_CatalogTable_getSnapshot_rdh | /**
* Return the snapshot specified for the table. Return Optional.empty() if not specified.
*/
default Optional<Long> getSnapshot() {
return Optional.empty();
} | 3.26 |
flink_CatalogTable_of_rdh | /**
* Creates an instance of {@link CatalogTable} with a specific snapshot.
*
* @param schema
* unresolved schema
* @param comment
* optional comment
* @param partitionKeys
* list of partition keys or an empty list if not partitioned
* @param options
* options to configure the connector
* @param snapshot
* table snapshot of the table
*/
static CatalogTable of(Schema schema, @Nullable
String comment, List<String> partitionKeys, Map<String, String> options, @Nullable
Long snapshot) {
return new DefaultCatalogTable(schema, comment, partitionKeys, options, snapshot);
} | 3.26 |
flink_CatalogTable_fromProperties_rdh | /**
* Creates an instance of {@link CatalogTable} from a map of string properties that were
* previously created with {@link ResolvedCatalogTable#toProperties()}.
*
* <p>Note that the serialization and deserialization of catalog tables are not symmetric. The
* framework will resolve functions and perform other validation tasks. A catalog implementation
* must not deal with this during a read operation.
*
* @param properties
* serialized version of a {@link CatalogTable} that includes schema,
* partition keys, and connector options
*/
static CatalogTable fromProperties(Map<String, String> properties) {
return CatalogPropertiesUtil.deserializeCatalogTable(properties);
} | 3.26 |
flink_TaskManagerServicesConfiguration_fromConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Parsing of Flink configuration
// --------------------------------------------------------------------------------------------
/**
* Utility method to extract TaskManager config parameters from the configuration and to sanity
* check them.
*
* @param configuration
* The configuration.
* @param resourceID
* resource ID of the task manager
* @param externalAddress
* identifying the IP address under which the TaskManager will be
* accessible
* @param localCommunicationOnly
* True if only local communication is possible. Use only in cases
* where only one task manager runs.
* @param taskExecutorResourceSpec
* resource specification of the TaskManager to start
* @param workingDirectory
* working directory of the TaskManager
* @return configuration of task manager services used to create them
*/
public static TaskManagerServicesConfiguration fromConfiguration(Configuration configuration, ResourceID resourceID, String externalAddress, boolean localCommunicationOnly, TaskExecutorResourceSpec taskExecutorResourceSpec, WorkingDirectory workingDirectory) throws Exception {
String[] localStateRootDirs = ConfigurationUtils.parseLocalStateDirectories(configuration);
final Reference<File[]> localStateDirs;
if (localStateRootDirs.length == 0) {
localStateDirs = Reference.borrowed(new File[]{ workingDirectory.getLocalStateDirectory() });
} else {
File[] createdLocalStateDirs = new File[localStateRootDirs.length];
final String localStateDirectoryName = LOCAL_STATE_SUB_DIRECTORY_ROOT + resourceID;
for (int i = 0; i < localStateRootDirs.length; i++) {
createdLocalStateDirs[i] = new File(localStateRootDirs[i], localStateDirectoryName);
}
localStateDirs = Reference.owned(createdLocalStateDirs);
}
boolean localRecoveryMode = configuration.getBoolean(CheckpointingOptions.LOCAL_RECOVERY);
final QueryableStateConfiguration
queryableStateConfig = QueryableStateConfiguration.fromConfiguration(configuration);
long timerServiceShutdownTimeout = configuration.get(AkkaOptions.ASK_TIMEOUT_DURATION).toMillis();
final RetryingRegistrationConfiguration retryingRegistrationConfiguration = RetryingRegistrationConfiguration.fromConfiguration(configuration);
final
int externalDataPort = configuration.getInteger(NettyShuffleEnvironmentOptions.DATA_PORT);
String bindAddr = configuration.getString(TaskManagerOptions.BIND_HOST, NetUtils.getWildcardIPAddress());
InetAddress bindAddress = InetAddress.getByName(bindAddr);
final String classLoaderResolveOrder = configuration.getString(CoreOptions.CLASSLOADER_RESOLVE_ORDER);
final String[] alwaysParentFirstLoaderPatterns = CoreOptions.getParentFirstLoaderPatterns(configuration);
final int numIoThreads = ClusterEntrypointUtils.getPoolSize(configuration);
final String[] tmpDirs = ConfigurationUtils.parseTempDirectories(configuration);
// If TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID is not set, use the external address
// as the node id.
final String nodeId = configuration.getOptional(TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID).orElse(externalAddress);
return new TaskManagerServicesConfiguration(configuration, resourceID, externalAddress, bindAddress, externalDataPort, localCommunicationOnly, tmpDirs, localStateDirs, localRecoveryMode, queryableStateConfig, ConfigurationParserUtils.getSlot(configuration), ConfigurationParserUtils.getPageSize(configuration), taskExecutorResourceSpec, timerServiceShutdownTimeout, retryingRegistrationConfiguration, ConfigurationUtils.getSystemResourceMetricsProbingInterval(configuration), FlinkUserCodeClassLoaders.ResolveOrder.fromString(classLoaderResolveOrder), alwaysParentFirstLoaderPatterns, numIoThreads, nodeId);
} | 3.26 |
flink_TaskManagerServicesConfiguration_getConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Getter/Setter
// --------------------------------------------------------------------------------------------
public Configuration getConfiguration() {
return configuration;
} | 3.26 |
flink_EnumTypeInfo_toString_rdh | // ------------------------------------------------------------------------
// Standard utils
// ------------------------------------------------------------------------
@Override
public String toString() {
return ("EnumTypeInfo<" + typeClass.getName()) + ">";
} | 3.26 |
flink_CrossDriver_setup_rdh | // ------------------------------------------------------------------------
@Override
public void setup(TaskContext<CrossFunction<T1, T2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
} | 3.26 |
flink_SourceOutputWithWatermarks_collect_rdh | // ------------------------------------------------------------------------
// SourceOutput Methods
//
// Note that the two methods below are final, as a partial enforcement
// of the performance design goal mentioned in the class-level comment.
// ------------------------------------------------------------------------
@Override
public final void collect(T record) {
collect(record, TimestampAssigner.NO_TIMESTAMP);
} | 3.26 |
flink_SourceOutputWithWatermarks_createWithSeparateOutputs_rdh | // ------------------------------------------------------------------------
// Factories
// ------------------------------------------------------------------------
/**
* Creates a new SourceOutputWithWatermarks that emits records to the given DataOutput and
* watermarks to the different WatermarkOutputs.
*/
public static <E> SourceOutputWithWatermarks<E> createWithSeparateOutputs(PushingAsyncDataInput.DataOutput<E> recordsOutput, WatermarkOutput onEventWatermarkOutput, WatermarkOutput periodicWatermarkOutput, TimestampAssigner<E> timestampAssigner, WatermarkGenerator<E> watermarkGenerator) {
return new SourceOutputWithWatermarks<>(recordsOutput, onEventWatermarkOutput, periodicWatermarkOutput, timestampAssigner, watermarkGenerator);
} | 3.26 |
flink_EndOfSuperstepEvent_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode()
{
return 41;
} | 3.26 |
flink_CheckpointConfig_setCheckpointIdOfIgnoredInFlightData_rdh | /**
* Setup the checkpoint id for which the in-flight data will be ignored for all operators in
* case of the recovery from this checkpoint.
*
* @param checkpointIdOfIgnoredInFlightData
* Checkpoint id for which in-flight data should be
* ignored.
* @see #setCheckpointIdOfIgnoredInFlightData
*/
@PublicEvolving
public void setCheckpointIdOfIgnoredInFlightData(long checkpointIdOfIgnoredInFlightData) {
configuration.set(ExecutionCheckpointingOptions.CHECKPOINT_ID_OF_IGNORED_IN_FLIGHT_DATA, checkpointIdOfIgnoredInFlightData);
} | 3.26 |
flink_CheckpointConfig_getAlignedCheckpointTimeout_rdh | /**
*
* @return value of alignment timeout, as configured via {@link #setAlignedCheckpointTimeout(Duration)} or {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT}.
*/
@PublicEvolving
public Duration getAlignedCheckpointTimeout() {
return configuration.get(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT);
} | 3.26 |
flink_CheckpointConfig_configure_rdh | /**
* Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link ExecutionCheckpointingOptions#CHECKPOINTING_MODE}.
*
* <p>It will change the value of a setting only if a corresponding option was set in the {@code configuration}. If a key is not present, the current value of a field will remain untouched.
*
* @param configuration
* a configuration to read the values from
*/
public void configure(ReadableConfig configuration) {
configuration.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_MODE).ifPresent(this::setCheckpointingMode);
configuration.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL).ifPresent(i -> this.setCheckpointInterval(i.toMillis()));
configuration.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG).ifPresent(i -> this.setCheckpointIntervalDuringBacklog(i.toMillis()));
configuration.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_TIMEOUT).ifPresent(t -> this.setCheckpointTimeout(t.toMillis()));
configuration.getOptional(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS).ifPresent(this::setMaxConcurrentCheckpoints);configuration.getOptional(ExecutionCheckpointingOptions.MIN_PAUSE_BETWEEN_CHECKPOINTS).ifPresent(m -> this.setMinPauseBetweenCheckpoints(m.toMillis()));
configuration.getOptional(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER).ifPresent(this::setTolerableCheckpointFailureNumber);
configuration.getOptional(ExecutionCheckpointingOptions.EXTERNALIZED_CHECKPOINT).ifPresent(this::setExternalizedCheckpointCleanup);
configuration.getOptional(ExecutionCheckpointingOptions.ENABLE_UNALIGNED).ifPresent(this::enableUnalignedCheckpoints);
configuration.getOptional(ExecutionCheckpointingOptions.CHECKPOINT_ID_OF_IGNORED_IN_FLIGHT_DATA).ifPresent(this::setCheckpointIdOfIgnoredInFlightData);
configuration.getOptional(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT).ifPresent(this::setAlignedCheckpointTimeout);
configuration.getOptional(ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE).ifPresent(this::setMaxSubtasksPerChannelStateFile); configuration.getOptional(ExecutionCheckpointingOptions.FORCE_UNALIGNED).ifPresent(this::setForceUnalignedCheckpoints);
configuration.getOptional(CheckpointingOptions.CHECKPOINTS_DIRECTORY).ifPresent(this::setCheckpointStorage);}
/**
*
* @return A copy of internal {@link #configuration}. Note it is missing all options that are
stored as plain java fields in {@link CheckpointConfig}, for example {@link #storage} | 3.26 |
flink_CheckpointConfig_getTolerableCheckpointFailureNumber_rdh | /**
* Get the defined number of consecutive checkpoint failures that will be tolerated, before the
* whole job is failed over.
*
* <p>If the {@link ExecutionCheckpointingOptions#TOLERABLE_FAILURE_NUMBER} has not been
* configured, this method would return 0 which means the checkpoint failure manager would not
* tolerate any declined checkpoint failure.
*/
public int getTolerableCheckpointFailureNumber() {
return configuration.getOptional(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER).orElse(0);
} | 3.26 |
flink_CheckpointConfig_setFailOnCheckpointingErrors_rdh | /**
* Sets the expected behaviour for tasks in case that they encounter an error when
* checkpointing. If this is set as true, which is equivalent to set
* tolerableCheckpointFailureNumber as zero, job manager would fail the whole job once it
* received a decline checkpoint message. If this is set as false, which is equivalent to set
* tolerableCheckpointFailureNumber as the maximum of integer (means unlimited), job manager
* would not fail the whole job no matter how many declined checkpoints it received.
*
* <p>{@link #setTolerableCheckpointFailureNumber(int)} would always overrule this deprecated
* method if they have conflicts.
*
* @deprecated Use {@link #setTolerableCheckpointFailureNumber(int)}.
*/
@Deprecated
public void setFailOnCheckpointingErrors(boolean failOnCheckpointingErrors) {
if (configuration.getOptional(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER).isPresent()) {
LOG.warn((("Since ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER has been configured as {}, deprecated " + "#setFailOnCheckpointingErrors(boolean) method would not take any effect and please use ") + "#setTolerableCheckpointFailureNumber(int) method to ") + "determine your expected behaviour when checkpoint errors on task side.", getTolerableCheckpointFailureNumber());
return;
}
if (failOnCheckpointingErrors) {
setTolerableCheckpointFailureNumber(0);
} else {
setTolerableCheckpointFailureNumber(UNLIMITED_TOLERABLE_FAILURE_NUMBER);
}
} | 3.26 |
flink_CheckpointConfig_getMinPauseBetweenCheckpoints_rdh | /**
* Gets the minimal pause between checkpointing attempts. This setting defines how soon the
* checkpoint coordinator may trigger another checkpoint after it becomes possible to trigger
* another checkpoint with respect to the maximum number of concurrent checkpoints (see {@link #getMaxConcurrentCheckpoints()}).
*
* @return The minimal pause before the next checkpoint is triggered.
*/
public long getMinPauseBetweenCheckpoints() {
return configuration.get(ExecutionCheckpointingOptions.MIN_PAUSE_BETWEEN_CHECKPOINTS).toMillis();
} | 3.26 |
flink_CheckpointConfig_setCheckpointingMode_rdh | /**
* Sets the checkpointing mode (exactly-once vs. at-least-once).
*
* @param checkpointingMode
* The checkpointing mode.
*/
public void setCheckpointingMode(CheckpointingMode checkpointingMode) {
configuration.set(ExecutionCheckpointingOptions.CHECKPOINTING_MODE, checkpointingMode);
}
/**
* Gets the interval in which checkpoints are periodically scheduled.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #getMaxConcurrentCheckpoints()} and {@link #getMinPauseBetweenCheckpoints()} | 3.26 |
flink_CheckpointConfig_setForceCheckpointing_rdh | /**
* Checks whether checkpointing is forced, despite currently non-checkpointable iteration
* feedback.
*
* @param forceCheckpointing
* The flag to force checkpointing.
* @deprecated This will be removed once iterations properly participate in checkpointing.
*/
@Deprecated
@PublicEvolving
public void setForceCheckpointing(boolean forceCheckpointing) {
configuration.set(ExecutionCheckpointingOptions.FORCE_CHECKPOINTING, forceCheckpointing);
} | 3.26 |
flink_CheckpointConfig_setAlignedCheckpointTimeout_rdh | /**
* Only relevant if {@link ExecutionCheckpointingOptions.ENABLE_UNALIGNED} is enabled.
*
* <p>If {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT} has value equal to
* <code>0</code>, checkpoints will
*
* <p>always start unaligned.
*
* <p>If {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT} has value greater then
* <code>0</code>, checkpoints will start aligned. If during checkpointing, checkpoint start
* delay exceeds this {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT},
* alignment will timeout and checkpoint will start working as unaligned checkpoint.
*/
@PublicEvolving
public void setAlignedCheckpointTimeout(Duration alignedCheckpointTimeout) {
configuration.set(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT, alignedCheckpointTimeout);
} | 3.26 |
flink_CheckpointConfig_setAlignmentTimeout_rdh | /**
* Only relevant if {@link #isUnalignedCheckpointsEnabled} is enabled.
*
* <p>If {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT} has value equal to
* <code>0</code>, checkpoints will always start unaligned.
*
* <p>If {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT} has value greater then
* <code>0</code>, checkpoints will start aligned. If during checkpointing, checkpoint start
* delay exceeds this {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT},
* alignment will timeout and checkpoint will start working as unaligned checkpoint.
*
* @deprecated Use {@link #setAlignedCheckpointTimeout(Duration)} instead.
*/
@Deprecated
@PublicEvolving
public void setAlignmentTimeout(Duration alignmentTimeout) {
setAlignedCheckpointTimeout(alignmentTimeout);
}
/**
*
* @return value of alignment timeout, as configured via {@link #setAlignmentTimeout(Duration)}
or {@link ExecutionCheckpointingOptions#ALIGNMENT_TIMEOUT} | 3.26 |
flink_CheckpointConfig_enableUnalignedCheckpoints_rdh | /**
* Enables unaligned checkpoints, which greatly reduce checkpointing times under backpressure.
*
* <p>Unaligned checkpoints contain data stored in buffers as part of the checkpoint state,
* which allows checkpoint barriers to overtake these buffers. Thus, the checkpoint duration
* becomes independent of the current throughput as checkpoint barriers are effectively not
* embedded into the stream of data anymore.
*
* <p>Unaligned checkpoints can only be enabled if {@link ExecutionCheckpointingOptions#CHECKPOINTING_MODE} is {@link CheckpointingMode#EXACTLY_ONCE}.
*/
@PublicEvolving
public void enableUnalignedCheckpoints() {
enableUnalignedCheckpoints(true);
} | 3.26 |
flink_CheckpointConfig_getCheckpointIntervalDuringBacklog_rdh | /**
* Gets the interval in which checkpoints are periodically scheduled during backlog.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #getMaxConcurrentCheckpoints()} and {@link #getMinPauseBetweenCheckpoints()}.
*
* <p>If not explicitly configured, checkpoint interval during backlog will be the same as that
* in normal situation(see {@link #getCheckpointInterval()}). If the return value is zero, it
* means that checkpoints would be disabled during backlog.
*
* @return The checkpoint interval, in milliseconds.
*/
public long getCheckpointIntervalDuringBacklog()
{
long v0 = configuration.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG).map(Duration::toMillis).orElseGet(this::getCheckpointInterval);
if (v0 < MINIMAL_CHECKPOINT_TIME) {
v0 = CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL;
}
long checkpointInterval = getCheckpointInterval();
if (checkpointInterval < MINIMAL_CHECKPOINT_TIME) {
checkpointInterval = CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL;
}
if (v0 < checkpointInterval) {
throw new IllegalArgumentException("Checkpoint interval during backlog must " + "be larger than or equal to that in normal situation.");
}
return v0;
} | 3.26 |
flink_CheckpointConfig_setCheckpointTimeout_rdh | /**
* Sets the maximum time that a checkpoint may take before being discarded.
*
* @param checkpointTimeout
* The checkpoint timeout, in milliseconds.
*/
public void setCheckpointTimeout(long checkpointTimeout) {
if (checkpointTimeout < MINIMAL_CHECKPOINT_TIME) {
throw new IllegalArgumentException(String.format("Checkpoint timeout must be larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME));
} configuration.set(ExecutionCheckpointingOptions.CHECKPOINTING_TIMEOUT, Duration.ofMillis(checkpointTimeout));
} | 3.26 |
flink_CheckpointConfig_getMaxSubtasksPerChannelStateFile_rdh | /**
*
* @return the number of subtasks to share the same channel state file, as configured via {@link #setMaxSubtasksPerChannelStateFile(int)} or {@link ExecutionCheckpointingOptions#UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE}.
*/
@PublicEvolving
public int
getMaxSubtasksPerChannelStateFile() {
return configuration.get(ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE);
} | 3.26 |
flink_CheckpointConfig_disableCheckpointing_rdh | // ------------------------------------------------------------------------
/**
* Disables checkpointing.
*/
public void disableCheckpointing() {
configuration.removeConfig(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL);
} | 3.26 |
flink_CheckpointConfig_setForceUnalignedCheckpoints_rdh | /**
* Checks whether unaligned checkpoints are forced, despite currently non-checkpointable
* iteration feedback or custom partitioners.
*
* @param forceUnalignedCheckpoints
* The flag to force unaligned checkpoints.
*/
@PublicEvolving
public void setForceUnalignedCheckpoints(boolean forceUnalignedCheckpoints) {
configuration.set(ExecutionCheckpointingOptions.FORCE_UNALIGNED, forceUnalignedCheckpoints);
}
/**
* This determines the behaviour when meeting checkpoint errors. If this returns true, which is
* equivalent to get tolerableCheckpointFailureNumber as zero, job manager would fail the whole
* job once it received a decline checkpoint message. If this returns false, which is equivalent
* to get tolerableCheckpointFailureNumber as the maximum of integer (means unlimited), job
* manager would not fail the whole job no matter how many declined checkpoints it received.
*
* @deprecated Use {@link #getTolerableCheckpointFailureNumber()} | 3.26 |
flink_CheckpointConfig_isForceCheckpointing_rdh | /**
* Checks whether checkpointing is forced, despite currently non-checkpointable iteration
* feedback.
*
* @return True, if checkpointing is forced, false otherwise.
* @deprecated This will be removed once iterations properly participate in checkpointing.
*/@Deprecated
@PublicEvolving
public boolean isForceCheckpointing() {
return configuration.get(ExecutionCheckpointingOptions.FORCE_CHECKPOINTING);
} | 3.26 |
flink_CheckpointConfig_m1_rdh | /**
* CheckpointStorage defines how {@link StateBackend}'s checkpoint their state for fault
* tolerance in streaming applications. Various implementations store their checkpoints in
* different fashions and have different requirements and availability guarantees.
*
* <p>For example, {@link org.apache.flink.runtime.state.storage.JobManagerCheckpointStorage
* JobManagerCheckpointStorage} stores checkpoints in the memory of the JobManager. It is
* lightweight and without additional dependencies but is not highly available and only supports
* small state sizes. This checkpoint storage policy is convenient for local testing and
* development.
*
* <p>{@link org.apache.flink.runtime.state.storage.FileSystemCheckpointStorage
* FileSystemCheckpointStorage} stores checkpoints in a filesystem. For systems like HDFS, NFS
* Drives, S3, and GCS, this storage policy supports large state size, in the magnitude of many
* terabytes while providing a highly available foundation for stateful applications. This
* checkpoint storage policy is recommended for most production deployments.
*
* @param storage
* The checkpoint storage policy.
*/@PublicEvolvingpublic void m1(CheckpointStorage storage) {
Preconditions.checkNotNull(storage, "Checkpoint storage must not be null");
this.storage = storage;
} | 3.26 |
flink_CheckpointConfig_isExternalizedCheckpointsEnabled_rdh | /**
* Returns whether checkpoints should be persisted externally.
*
* @return <code>true</code> if checkpoints should be externalized.
*/
@PublicEvolving
public boolean isExternalizedCheckpointsEnabled() {
return getExternalizedCheckpointCleanup() != ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS;
} | 3.26 |
flink_CheckpointConfig_isForceUnalignedCheckpoints_rdh | /**
* Checks whether unaligned checkpoints are forced, despite iteration feedback.
*
* @return True, if unaligned checkpoints are forced, false otherwise.
*/
@PublicEvolving
public boolean isForceUnalignedCheckpoints() {
return configuration.get(ExecutionCheckpointingOptions.FORCE_UNALIGNED);
} | 3.26 |
flink_CheckpointConfig_setCheckpointIntervalDuringBacklog_rdh | /**
* Sets the interval in which checkpoints are periodically scheduled during backlog.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #setMaxConcurrentCheckpoints(int)} and {@link #setMinPauseBetweenCheckpoints(long)}.
*
* <p>If not explicitly configured, checkpoint interval during backlog will be the same as that
* in normal situation(see {@link #setCheckpointInterval(long)}). If configured to zero,
* checkpoints would be disabled during backlog.
*
* @param checkpointInterval
* The checkpoint interval, in milliseconds.
*/
public void setCheckpointIntervalDuringBacklog(long checkpointInterval) {
if ((checkpointInterval != 0) && (checkpointInterval < MINIMAL_CHECKPOINT_TIME)) {
throw new IllegalArgumentException(String.format("Checkpoint interval must be zero or larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME));
}
configuration.set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL_DURING_BACKLOG, Duration.ofMillis(checkpointInterval));
} | 3.26 |
flink_CheckpointConfig_enableApproximateLocalRecovery_rdh | /**
* Enables the approximate local recovery mode.
*
* <p>In this recovery mode, when a task fails, the entire downstream of the tasks (including
* the failed task) restart.
*
* <p>Notice that 1. Approximate recovery may lead to data loss. The amount of data which leads
* the failed task from the state of the last completed checkpoint to the state when the task
* fails is lost. 2. In the next version, we will support restarting the set of failed set of
* tasks only. In this version, we only support downstream restarts when a task fails. 3. It is
* only an internal feature for now.
*
* @param enabled
* Flag to indicate whether approximate local recovery is enabled .
*/
@Experimental
public void enableApproximateLocalRecovery(boolean enabled) {
configuration.set(ExecutionCheckpointingOptions.APPROXIMATE_LOCAL_RECOVERY, enabled);
} | 3.26 |
flink_CheckpointConfig_setMaxSubtasksPerChannelStateFile_rdh | /**
* The number of subtasks to share the same channel state file. If {@link ExecutionCheckpointingOptions#UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE} has value equal
* to <code>1</code>, each subtask will create a new channel state file.
*/
@PublicEvolving
public void setMaxSubtasksPerChannelStateFile(int maxSubtasksPerChannelStateFile) {
configuration.set(ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE, maxSubtasksPerChannelStateFile);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.