name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_EvictingWindowOperator_getEvictor_rdh | // ------------------------------------------------------------------------
// Getters for testing
// ------------------------------------------------------------------------
@VisibleForTesting
public Evictor<? super IN, ? super W>
getEvictor()
{
return evictor;
} | 3.26 |
flink_RecordMapperWrapperRecordIterator_wrapReader_rdh | /**
* Wrap a {@link BulkFormat.Reader} applying a {@link RecordMapper} on the returned iterator.
*
* @param <I>
* Input type
* @param <O>
* Mapped output type
*/
public static <I, O> BulkFormat.Reader<O> wrapReader(BulkFormat.Reader<I> wrappedReader, RecordMapper<I, O> recordMapper) {
return new BulkFormat.Reader<O>() {
@Nullable
@Override
public BulkFormat.RecordIterator<O> readBatch() throws IOException {
BulkFormat.RecordIterator<I> iterator
= wrappedReader.readBatch();
if (iterator == null) {
return null;
}
return new RecordMapperWrapperRecordIterator<>(iterator, recordMapper);
}
@Override
public void close() throws IOException {
wrappedReader.close();
}
};
} | 3.26 |
flink_PipelinedResultPartition_checkResultPartitionType_rdh | // miscellaneous utils
// ------------------------------------------------------------------------
private static ResultPartitionType checkResultPartitionType(ResultPartitionType type) {
checkArgument(((type == ResultPartitionType.PIPELINED) || (type == ResultPartitionType.PIPELINED_BOUNDED)) || (type == ResultPartitionType.PIPELINED_APPROXIMATE));
return type;
} | 3.26 |
flink_SessionEnvironment_equals_rdh | // -------------------------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof SessionEnvironment)) {return false;
}SessionEnvironment v0 = ((SessionEnvironment) (o));return ((((Objects.equals(sessionName, v0.sessionName) && Objects.equals(version, v0.version)) && Objects.equals(registeredCatalogCreators, v0.registeredCatalogCreators)) && Objects.equals(registeredModuleCreators, v0.registeredModuleCreators)) && Objects.equals(defaultCatalog, v0.defaultCatalog)) && Objects.equals(sessionConfig, v0.sessionConfig);
} | 3.26 |
flink_SessionEnvironment_newBuilder_rdh | // -------------------------------------------------------------------------------------------
// Builder
// -------------------------------------------------------------------------------------------
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
flink_SessionEnvironment_getSessionName_rdh | // -------------------------------------------------------------------------------------------
// Getter
// -------------------------------------------------------------------------------------------
public Optional<String> getSessionName() {
return Optional.ofNullable(sessionName);
} | 3.26 |
flink_TtlIncrementalCleanup_setTtlState_rdh | /**
* As TTL state wrapper depends on this class through access callback, it has to be set here
* after its construction is done.
*/
public void setTtlState(@Nonnull
AbstractTtlState<K, N, ?, S, ?> ttlState) {
this.ttlState = ttlState;
} | 3.26 |
flink_SourceReader_pauseOrResumeSplits_rdh | /**
* Pauses or resumes reading of individual source splits.
*
* <p>Note that no other methods can be called in parallel, so updating subscriptions can be
* done atomically. This method is simply providing connectors with more expressive APIs the
* opportunity to update all subscriptions at once.
*
* <p>This is currently used to align the watermarks of splits, if watermark alignment is used
* and the source reads from more than one split.
*
* <p>The default implementation throws an {@link UnsupportedOperationException} where the
* default implementation will be removed in future releases. To be compatible with future
* releases, it is recommended to implement this method and override the default implementation.
*
* @param splitsToPause
* the splits to pause
* @param splitsToResume
* the splits to resume
*/
@PublicEvolving
default void pauseOrResumeSplits(Collection<String> splitsToPause, Collection<String> splitsToResume) {
throw new UnsupportedOperationException(((((("This source reader does not support pausing or resuming splits which can lead to unaligned splits.\n" + "Unaligned splits are splits where the output watermarks of the splits have diverged more than the allowed limit.\n") + "It is highly discouraged to use unaligned source splits, as this leads to unpredictable\n") + "watermark alignment if there is more than a single split per reader. It is recommended to implement pausing splits\n") + "for this source. At your own risk, you can allow unaligned source splits by setting the\n") + "configuration parameter `pipeline.watermark-alignment.allow-unaligned-source-splits' to true.\n") + "Beware that this configuration parameter will be dropped in a future Flink release.");
} | 3.26 |
flink_SourceReader_notifyCheckpointComplete_rdh | /**
* We have an empty default implementation here because most source readers do not have to
* implement the method.
*
* @see CheckpointListener#notifyCheckpointComplete(long)
*/
@Override
default void notifyCheckpointComplete(long checkpointId) throws Exception {
} | 3.26 |
flink_SourceReader_handleSourceEvents_rdh | /**
* Handle a custom source event sent by the {@link SplitEnumerator}. This method is called when
* the enumerator sends an event via {@link SplitEnumeratorContext#sendEventToSourceReader(int,
* SourceEvent)}.
*
* <p>This method has a default implementation that does nothing, because most sources do not
* require any custom events.
*
* @param sourceEvent
* the event sent by the {@link SplitEnumerator}.
*/
default void handleSourceEvents(SourceEvent sourceEvent) {
} | 3.26 |
flink_HiveParserDMLHelper_isTypeConversionNeeded_rdh | // to check whether it's needed to do type conversion
private static boolean isTypeConversionNeeded(RelNode queryRelNode, List<RelDataType> targetCalcTypes) {
List<RelDataTypeField> fields = queryRelNode.getRowType().getFieldList();
Preconditions.checkState(fields.size() == targetCalcTypes.size());
for (int i = 0; i < fields.size(); i++) {
if (fields.get(i).getType().getSqlTypeName()
!= targetCalcTypes.get(i).getSqlTypeName()) {
return true;
}
}
return false;
} | 3.26 |
flink_TestingReaderOutput_getEmittedRecords_rdh | // ------------------------------------------------------------------------
public ArrayList<E> getEmittedRecords() {
return emittedRecords;
} | 3.26 |
flink_MutableByteArrayInputStream_setBuffer_rdh | /**
* Set buffer that can be read via the InputStream interface and reset the input stream. This
* has the same effect as creating a new ByteArrayInputStream with a new buffer.
*
* @param buf
* the new buffer to read.
*/
public void setBuffer(byte[] buf) {this.buf = buf;
this.pos = 0;
this.count
= buf.length;
} | 3.26 |
flink_TSetClientInfoResp_findByThriftId_rdh | /**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch
(fieldId) {
case 1 :
// STATUS
return STATUS;
default :
return
null;
}
} | 3.26 |
flink_TSetClientInfoResp_isSet_rdh | /**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case STATUS :
return
isSetStatus();
}throw new IllegalStateException();
} | 3.26 |
flink_TSetClientInfoResp_m0_rdh | /**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields m0(int fieldId) {
_Fields fields =
findByThriftId(fieldId);
if
(fields == null)
throw new IllegalArgumentException(("Field " + fieldId) + " doesn't exist!");
return fields;
} | 3.26 |
flink_TSetClientInfoResp_findByName_rdh | /**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String
name) {
return byName.get(name);
} | 3.26 |
flink_TSetClientInfoResp_isSetStatus_rdh | /**
* Returns true if field status is set (has been assigned a value) and false otherwise
*/public boolean isSetStatus() {
return this.status != null;
} | 3.26 |
flink_RpcInvocation_convertRpcToString_rdh | /**
* Converts a rpc call into its string representation.
*
* @param declaringClassName
* declaringClassName declares the specified rpc
* @param methodName
* methodName of the rpc
* @param parameterTypes
* parameterTypes of the rpc
* @return string representation of the rpc
*/
static String convertRpcToString(String declaringClassName, String methodName, Class<?>[] parameterTypes) {
final StringBuilder paramTypeStringBuilder = new StringBuilder(parameterTypes.length * 5);
if (parameterTypes.length > 0) {
paramTypeStringBuilder.append(parameterTypes[0].getSimpleName());
for (int i = 1; i < parameterTypes.length; i++) {
paramTypeStringBuilder.append(", ").append(parameterTypes[i].getSimpleName());
}
}
return ((((declaringClassName + '.') + methodName) + '(') + paramTypeStringBuilder) + ')';
} | 3.26 |
flink_CatalogManager_getCatalogOrError_rdh | /**
* Gets a catalog by name.
*
* @param catalogName
* name of the catalog to retrieve
* @return the requested catalog
* @throws CatalogNotExistException
* if the catalog does not exist
*/
public Catalog getCatalogOrError(String catalogName) throws CatalogNotExistException {
return getCatalog(catalogName).orElseThrow(() -> new CatalogNotExistException(catalogName));
} | 3.26 |
flink_CatalogManager_getBuiltInDatabaseName_rdh | /**
* Gets the built-in database name in the built-in catalog. The built-in database is used for
* storing all non-serializable transient meta-objects.
*
* @return the built-in database name
*/
public String getBuiltInDatabaseName() {
// The default database of the built-in catalog is also the built-in database.
return getCatalogOrThrowException(getBuiltInCatalogName()).getDefaultDatabase();
} | 3.26 |
flink_CatalogManager_resolveCatalogBaseTable_rdh | /**
* Resolves a {@link CatalogBaseTable} to a validated {@link ResolvedCatalogBaseTable}.
*/
public ResolvedCatalogBaseTable<?> resolveCatalogBaseTable(CatalogBaseTable baseTable) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (baseTable instanceof CatalogTable) {
return resolveCatalogTable(((CatalogTable) (baseTable)));
} else if (baseTable instanceof CatalogView) {
return m7(((CatalogView) (baseTable)));
}
throw new IllegalArgumentException("Unknown kind of catalog base table: " + baseTable.getClass());
} | 3.26 |
flink_CatalogManager_getBuiltInCatalogName_rdh | /**
* Gets the built-in catalog name. The built-in catalog is used for storing all non-serializable
* transient meta-objects.
*
* @return the built-in catalog name
*/
public String getBuiltInCatalogName() {
return builtInCatalogName;
} | 3.26 |
flink_CatalogManager_listViews_rdh | /**
* Returns an array of names of all views(both temporary and permanent) registered in the
* namespace of the given catalog and database.
*
* @return names of registered views
*/
public Set<String> listViews(String catalogName, String databaseName) {
Catalog catalog = getCatalogOrThrowException(catalogName);
if (catalog == null) {
throw new ValidationException(String.format("Catalog %s does not exist", catalogName));
}
try {
return Stream.concat(catalog.listViews(databaseName).stream(), listTemporaryViewsInternal(catalogName, databaseName).map(e -> e.getKey().getObjectName())).collect(Collectors.toSet());
} catch (DatabaseNotExistException e) {
throw new ValidationException(String.format("Database %s does not exist", databaseName), e);
}
} | 3.26 |
flink_CatalogManager_dropView_rdh | /**
* Drops a view in a given fully qualified path.
*
* @param objectIdentifier
* The fully qualified path of the view to drop.
* @param ignoreIfNotExists
* If false exception will be thrown if the view to drop does not
* exist.
*/
public void dropView(ObjectIdentifier objectIdentifier, boolean ignoreIfNotExists) {
dropTableInternal(objectIdentifier, ignoreIfNotExists, false);
} | 3.26 |
flink_CatalogManager_m7_rdh | /**
* Resolves a {@link CatalogView} to a validated {@link ResolvedCatalogView}.
*/
public ResolvedCatalogView m7(CatalogView view) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (view instanceof ResolvedCatalogView) {return ((ResolvedCatalogView) (view));
}
final
ResolvedSchema resolvedSchema = view.getUnresolvedSchema().resolve(schemaResolver);
return new ResolvedCatalogView(view, resolvedSchema);
} | 3.26 |
flink_CatalogManager_setCurrentCatalog_rdh | /**
* Sets the current catalog name that will be used when resolving table path.
*
* @param catalogName
* catalog name to set as current catalog
* @throws CatalogNotExistException
* thrown if the catalog doesn't exist
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
*/ public void setCurrentCatalog(@Nullable
String catalogName) throws CatalogNotExistException {
if (catalogName == null) {
this.currentCatalogName =
null;
this.currentDatabaseName = null;
return;
}
checkArgument(!StringUtils.isNullOrWhitespaceOnly(catalogName), "Catalog name cannot be empty.");
Catalog potentialCurrentCatalog = getCatalog(catalogName).orElseThrow(() -> new CatalogException(format("A catalog with name [%s] does not exist.", catalogName)));
if (!catalogName.equals(currentCatalogName)) {
currentCatalogName = catalogName;
currentDatabaseName =
potentialCurrentCatalog.getDefaultDatabase();
LOG.info("Set the current default catalog as [{}] and the current default database as [{}].", currentCatalogName, currentDatabaseName);
}
} | 3.26 |
flink_CatalogManager_listTemporaryTables_rdh | /**
* Returns an array of names of temporary tables registered in the namespace of the current
* catalog and database.
*
* @return names of registered temporary tables
*/
public Set<String> listTemporaryTables() {
return listTemporaryTablesInternal(getCurrentCatalog(), getCurrentDatabase()).map(e -> e.getKey().getObjectName()).collect(Collectors.toSet());
} | 3.26 |
flink_CatalogManager_schemaExists_rdh | /**
* Checks if there is a database with given name in a given catalog or is there a temporary
* object registered within a given catalog and database.
*
* <p><b>NOTE:</b>It is primarily used for interacting with Calcite's schema.
*
* @param catalogName
* filter for the catalog part of the schema
* @param databaseName
* filter for the database part of the schema
* @return true if a subschema exists
*/
public boolean schemaExists(String catalogName, String databaseName) {
return temporaryDatabaseExists(catalogName,
databaseName) || permanentDatabaseExists(catalogName, databaseName);
} | 3.26 |
flink_CatalogManager_m2_rdh | /**
* Retrieves a fully qualified table with a specific time. If the path is not yet fully
* qualified, use {@link #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param objectIdentifier
* full path of the table to retrieve
* @param timestamp
* Timestamp of the table snapshot, which is milliseconds since 1970-01-01
* 00:00:00 UTC
* @return table at a specific time that the path points to.
*/
public Optional<ContextResolvedTable> m2(ObjectIdentifier objectIdentifier, long timestamp) {
CatalogBaseTable v13 = temporaryTables.get(objectIdentifier);
if (v13 != null) {
final ResolvedCatalogBaseTable<?> resolvedTable = resolveCatalogBaseTable(v13);
return Optional.of(ContextResolvedTable.temporary(objectIdentifier, resolvedTable));
} else {
return m3(objectIdentifier, timestamp);
}
} | 3.26 |
flink_CatalogManager_getTable_rdh | /**
* Retrieves a fully qualified table. If the path is not yet fully qualified use {@link #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param objectIdentifier
* full path of the table to retrieve
* @return table that the path points to.
*/
public Optional<ContextResolvedTable> getTable(ObjectIdentifier objectIdentifier) {
CatalogBaseTable temporaryTable = temporaryTables.get(objectIdentifier);
if (temporaryTable != null) {
final
ResolvedCatalogBaseTable<?> resolvedTable = resolveCatalogBaseTable(temporaryTable);
return Optional.of(ContextResolvedTable.temporary(objectIdentifier, resolvedTable));
} else {
return m3(objectIdentifier, null);
}
} | 3.26 |
flink_CatalogManager_listCatalogs_rdh | /**
* Retrieves the set of names of all registered catalogs, including all initialized catalogs and
* all catalogs stored in the {@link CatalogStore}.
*
* @return a set of names of registered catalogs
*/
public Set<String> listCatalogs() {
return Collections.unmodifiableSet(Stream.concat(catalogs.keySet().stream(), catalogStoreHolder.catalogStore().listCatalogs().stream()).collect(Collectors.toSet()));
} | 3.26 |
flink_CatalogManager_getPartition_rdh | /**
* Retrieves a partition with a fully qualified table path and partition spec. If the path is
* not yet fully qualified use{@link #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param tableIdentifier
* full path of the table to retrieve
* @param partitionSpec
* full partition spec
* @return partition in the table.
*/
public Optional<CatalogPartition> getPartition(ObjectIdentifier tableIdentifier, CatalogPartitionSpec partitionSpec) {
Optional<Catalog> catalogOptional = getCatalog(tableIdentifier.getCatalogName());
if (catalogOptional.isPresent()) {
try {
return Optional.of(catalogOptional.get().getPartition(tableIdentifier.toObjectPath(), partitionSpec));
} catch (PartitionNotExistException ignored) {
}
}
return Optional.empty();
} | 3.26 |
flink_CatalogManager_listTemporaryViews_rdh | /**
* Returns an array of names of temporary views registered in the namespace of the current
* catalog and database.
*
* @return names of registered temporary views
*/
public Set<String> listTemporaryViews() {
return listTemporaryViewsInternal(getCurrentCatalog(), getCurrentDatabase()).map(e -> e.getKey().getObjectName()).collect(Collectors.toSet());
} | 3.26 |
flink_CatalogManager_getTableOrError_rdh | /**
* Like {@link #getTable(ObjectIdentifier)}, but throws an error when the table is not available
* in any of the catalogs.
*/
public ContextResolvedTable getTableOrError(ObjectIdentifier objectIdentifier) {
return getTable(objectIdentifier).orElseThrow(() -> new TableException(String.format("Cannot find table '%s' in any of the catalogs %s, nor as a temporary table.", objectIdentifier, listCatalogs())));
} | 3.26 |
flink_CatalogManager_getCatalogBaseTable_rdh | /**
* Retrieves a fully qualified table. If the path is not yet fully qualified use {@link #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param objectIdentifier
* full path of the table to retrieve
* @return resolved table that the path points to or empty if it does not exist.
*/
@Override
public Optional<ResolvedCatalogBaseTable<?>> getCatalogBaseTable(ObjectIdentifier objectIdentifier) {
ContextResolvedTable resolvedTable = getTable(objectIdentifier).orElse(null);
return resolvedTable == null ? Optional.empty() : Optional.of(resolvedTable.getResolvedTable());
} | 3.26 |
flink_CatalogManager_resolveCatalogTable_rdh | /**
* Resolves a {@link CatalogTable} to a validated {@link ResolvedCatalogTable}.
*/
public ResolvedCatalogTable resolveCatalogTable(CatalogTable table) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (table instanceof ResolvedCatalogTable) {
return ((ResolvedCatalogTable) (table));
}
final ResolvedSchema resolvedSchema = table.getUnresolvedSchema().resolve(schemaResolver);
// Validate partition keys are included in physical columns
final List<String> v48 = resolvedSchema.getColumns().stream().filter(Column::isPhysical).map(Column::getName).collect(Collectors.toList());
table.getPartitionKeys().forEach(partitionKey -> {if (!v48.contains(partitionKey)) {
throw new ValidationException(String.format(("Invalid partition key '%s'. A partition key must " + "reference a physical column in the schema. ") + "Available columns are: %s", partitionKey, v48));}
});
return new ResolvedCatalogTable(table, resolvedSchema);
} | 3.26 |
flink_CatalogManager_getSchemaResolver_rdh | /**
* Returns a {@link SchemaResolver} for creating {@link ResolvedSchema} from {@link Schema}.
*/
public SchemaResolver getSchemaResolver() {
return schemaResolver;
} | 3.26 |
flink_CatalogManager_createCatalog_rdh | /**
* Creates a catalog under the given name. The catalog name must be unique.
*
* @param catalogName
* the given catalog name under which to create the given catalog
* @param catalogDescriptor
* catalog descriptor for creating catalog
* @throws CatalogException
* If the catalog already exists in the catalog store or initialized
* catalogs, or if an error occurs while creating the catalog or storing the {@link CatalogDescriptor}
*/
public void createCatalog(String catalogName, CatalogDescriptor catalogDescriptor) throws CatalogException {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(catalogName), "Catalog name cannot be null or empty.");
checkNotNull(catalogDescriptor, "Catalog descriptor cannot be null");
if (catalogStoreHolder.catalogStore().contains(catalogName))
{
throw new CatalogException(format("Catalog %s already exists in catalog store.", catalogName));
}
if (catalogs.containsKey(catalogName)) {
throw new CatalogException(format("Catalog %s already exists in initialized catalogs.", catalogName));
}
Catalog catalog = initCatalog(catalogName, catalogDescriptor);
catalog.open();
catalogs.put(catalogName, catalog);
catalogStoreHolder.catalogStore().storeCatalog(catalogName, catalogDescriptor);
} | 3.26 |
flink_CatalogManager_resolveCompactManagedTableOptions_rdh | /**
* Resolve dynamic options for compact operation on a Flink's managed table.
*
* @param origin
* The resolved managed table with enriched options.
* @param tableIdentifier
* The fully qualified path of the managed table.
* @param partitionSpec
* User-specified unresolved partition spec.
* @return dynamic options which describe the metadata of compaction
*/
public Map<String, String> resolveCompactManagedTableOptions(ResolvedCatalogTable origin, ObjectIdentifier tableIdentifier, CatalogPartitionSpec partitionSpec) {
return managedTableListener.notifyTableCompaction(catalogs.getOrDefault(tableIdentifier.getCatalogName(), null), tableIdentifier,
origin, partitionSpec, false);
} | 3.26 |
flink_CatalogManager_dropTemporaryView_rdh | /**
* Drop a temporary view in a given fully qualified path.
*
* @param objectIdentifier
* The fully qualified path of the view to drop.
* @param ignoreIfNotExists
* If false exception will be thrown if the view to be dropped does not
* exist.
*/
public void dropTemporaryView(ObjectIdentifier objectIdentifier, boolean ignoreIfNotExists) {
m5(objectIdentifier, table -> table instanceof
CatalogView, ignoreIfNotExists, false);
} | 3.26 |
flink_CatalogManager_getDataTypeFactory_rdh | /**
* Returns a factory for creating fully resolved data types that can be used for planning.
*/
public DataTypeFactory getDataTypeFactory() {
return typeFactory;
} | 3.26 |
flink_CatalogManager_close_rdh | /**
* Closes the catalog manager and releases its resources.
*
* <p>This method closes all initialized catalogs and the catalog store.
*
* @throws CatalogException
* if an error occurs while closing the catalogs or the catalog store
*/
public void close() throws CatalogException {
// close the initialized catalogs
List<Throwable> errors = new ArrayList<>();
for (Map.Entry<String, Catalog> entry : catalogs.entrySet()) {
String catalogName = entry.getKey();
Catalog catalog = entry.getValue();
try {
catalog.close();
} catch (Throwable
e) {
LOG.error(String.format("Failed to close catalog %s: %s", catalogName, e.getMessage()), e);
errors.add(e);
}
}
// close the catalog store holder
try {
catalogStoreHolder.close();
} catch (Throwable e) {
errors.add(e);
LOG.error(String.format("Failed to close catalog store holder: %s", e.getMessage()), e);
}
if (!errors.isEmpty()) {
CatalogException exception = new CatalogException("Failed to close catalog manager");for (Throwable e : errors) {
exception.addSuppressed(e);
}
throw exception;
}
} | 3.26 |
flink_CatalogManager_alterTable_rdh | /**
* Alters a table in a given fully qualified path with table changes.
*
* @param table
* The table to put in the given path
* @param changes
* The table changes from the original table to the new table.
* @param objectIdentifier
* The fully qualified path where to alter the table.
* @param ignoreIfNotExists
* If false exception will be thrown if the table or database or
* catalog to be altered does not exist.
*/
public void alterTable(CatalogBaseTable table, List<TableChange> changes, ObjectIdentifier objectIdentifier,
boolean ignoreIfNotExists) {
execute((catalog, path) -> {
final CatalogBaseTable resolvedTable = resolveCatalogBaseTable(table);
catalog.alterTable(path, resolvedTable, changes, ignoreIfNotExists);
if (resolvedTable instanceof CatalogTable) {
catalogModificationListeners.forEach(listener -> listener.onEvent(AlterTableEvent.createEvent(CatalogContext.createContext(objectIdentifier.getCatalogName(), catalog), objectIdentifier, resolvedTable, ignoreIfNotExists)));
}
}, objectIdentifier, ignoreIfNotExists, "AlterTable");
} | 3.26 |
flink_CatalogManager_createDatabase_rdh | /**
* Create a database.
*
* @param catalogName
* Name of the catalog for database
* @param databaseName
* Name of the database to be created
* @param database
* The database definition
* @param ignoreIfExists
* Flag to specify behavior when a database with the given name already
* exists: if set to false, throw a DatabaseAlreadyExistException, if set to true, do
* nothing.
* @throws DatabaseAlreadyExistException
* if the given database already exists and ignoreIfExists
* is false
* @throws CatalogException
* in case of any runtime exception
*/
public void createDatabase(String catalogName, String databaseName, CatalogDatabase database, boolean ignoreIfExists) throws DatabaseAlreadyExistException, CatalogException {
Catalog catalog = getCatalogOrThrowException(catalogName);
catalog.createDatabase(databaseName, database, ignoreIfExists);
catalogModificationListeners.forEach(listener -> listener.onEvent(CreateDatabaseEvent.createEvent(CatalogContext.createContext(catalogName, catalog), databaseName, database, ignoreIfExists)));
} | 3.26 |
flink_CatalogManager_getCatalog_rdh | /**
* Gets a {@link Catalog} instance by name.
*
* <p>If the catalog has already been initialized, the initialized instance will be returned
* directly. Otherwise, the {@link CatalogDescriptor} will be obtained from the {@link CatalogStore}, and the catalog instance will be initialized.
*
* @param catalogName
* name of the catalog to retrieve
* @return the requested catalog or empty if it does not exist
*/
public Optional<Catalog> getCatalog(String catalogName) {
// Get catalog from the initialized catalogs.
if (catalogs.containsKey(catalogName)) {
return Optional.of(catalogs.get(catalogName));
}
// Get catalog from the CatalogStore.
Optional<CatalogDescriptor> optionalDescriptor = catalogStoreHolder.catalogStore().getCatalog(catalogName);
return optionalDescriptor.map(descriptor -> {
Catalog catalog = initCatalog(catalogName, descriptor);
catalog.open();catalogs.put(catalogName, catalog);
return catalog;});
} | 3.26 |
flink_CatalogManager_listSchemas_rdh | /**
* Lists all available schemas in the given catalog. It is not equivalent to listing databases
* within the given catalog as it includes also different database parts of the temporary
* objects identifiers.
*
* <p><b>NOTE:</b>It is primarily used for interacting with Calcite's schema.
*
* @param catalogName
* filter for the catalog part of the schema
* @return list of schemas with the given prefix
*/public Set<String> listSchemas(String catalogName) {
return Stream.concat(getCatalog(catalogName).map(Catalog::listDatabases).orElse(Collections.emptyList()).stream(), temporaryTables.keySet().stream().filter(i -> i.getCatalogName().equals(catalogName)).map(ObjectIdentifier::getDatabaseName)).collect(Collectors.toSet());
} | 3.26 |
flink_CatalogManager_dropTemporaryTable_rdh | /**
* Drop a temporary table in a given fully qualified path.
*
* @param objectIdentifier
* The fully qualified path of the table to drop.
* @param ignoreIfNotExists
* If false exception will be thrown if the table to be dropped does
* not exist.
*/
public void dropTemporaryTable(ObjectIdentifier objectIdentifier, boolean ignoreIfNotExists) {
m5(objectIdentifier, table -> table instanceof CatalogTable, ignoreIfNotExists, true);
} | 3.26 |
flink_CatalogManager_alterDatabase_rdh | /**
* Modify an existing database.
*
* @param catalogName
* Name of the catalog for database
* @param databaseName
* Name of the database to be dropped
* @param newDatabase
* The new database definition
* @param ignoreIfNotExists
* Flag to specify behavior when the given database does not exist: if
* set to false, throw an exception, if set to true, do nothing.
* @throws DatabaseNotExistException
* if the given database does not exist
* @throws CatalogException
* in case of any runtime exception
*/
public void alterDatabase(String catalogName, String databaseName, CatalogDatabase newDatabase, boolean ignoreIfNotExists) throws DatabaseNotExistException, CatalogException {
Catalog catalog = getCatalogOrError(catalogName);
catalog.alterDatabase(databaseName, newDatabase, ignoreIfNotExists);
catalogModificationListeners.forEach(listener -> listener.onEvent(AlterDatabaseEvent.createEvent(CatalogContext.createContext(catalogName, catalog), databaseName, newDatabase, ignoreIfNotExists)));
} | 3.26 |
flink_CatalogManager_getCurrentCatalog_rdh | /**
* Gets the current catalog that will be used when resolving table path.
*
* @return the current catalog
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
*/
public String getCurrentCatalog() {
return currentCatalogName;
} | 3.26 |
flink_CatalogManager_dropTable_rdh | /**
* Drops a table in a given fully qualified path.
*
* @param objectIdentifier
* The fully qualified path of the table to drop.
* @param ignoreIfNotExists
* If false exception will be thrown if the table to drop does not
* exist.
*/
public void dropTable(ObjectIdentifier objectIdentifier, boolean ignoreIfNotExists) {
dropTableInternal(objectIdentifier, ignoreIfNotExists, true);
} | 3.26 |
flink_CatalogManager_dropDatabase_rdh | /**
* Drop a database.
*
* @param catalogName
* Name of the catalog for database.
* @param databaseName
* Name of the database to be dropped.
* @param ignoreIfNotExists
* Flag to specify behavior when the database does not exist: if set to
* false, throw an exception, if set to true, do nothing.
* @param cascade
* Flag to specify behavior when the database contains table or function: if set
* to true, delete all tables and functions in the database and then delete the database, if
* set to false, throw an exception.
* @throws DatabaseNotExistException
* if the given database does not exist
* @throws DatabaseNotEmptyException
* if the given database is not empty and isRestrict is true
* @throws CatalogException
* in case of any runtime exception
*/
public void dropDatabase(String catalogName, String databaseName, boolean ignoreIfNotExists, boolean cascade) throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException {
if (Objects.equals(currentCatalogName, catalogName) && Objects.equals(currentDatabaseName, databaseName)) {throw new ValidationException("Cannot drop a database which is currently in use.");
}
Catalog catalog = getCatalogOrError(catalogName);
catalog.dropDatabase(databaseName, ignoreIfNotExists, cascade);
catalogModificationListeners.forEach(listener -> listener.onEvent(DropDatabaseEvent.createEvent(CatalogContext.createContext(catalogName, catalog), databaseName, ignoreIfNotExists, cascade)));
} | 3.26 |
flink_CatalogManager_listTables_rdh | /**
* Returns an array of names of all tables (tables and views, both temporary and permanent)
* registered in the namespace of the given catalog and database.
*
* @return names of all registered tables
*/
public Set<String> listTables(String catalogName, String databaseName) {
Catalog
catalog = getCatalogOrThrowException(catalogName);
if (catalog == null) {throw new ValidationException(String.format("Catalog %s does not exist", catalogName));
}
try {
return Stream.concat(catalog.listTables(databaseName).stream(), listTemporaryTablesInternal(catalogName, databaseName).map(e ->
e.getKey().getObjectName())).collect(Collectors.toSet());
} catch (DatabaseNotExistException e) {
throw new ValidationException(String.format("Database %s does not exist", databaseName), e);
}
} | 3.26 |
flink_CatalogManager_getCurrentDatabase_rdh | /**
* Gets the current database name that will be used when resolving table path.
*
* @return the current database
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
*/
public String getCurrentDatabase() {
return currentDatabaseName;
} | 3.26 |
flink_CatalogManager_m1_rdh | /**
* Initializes a {@link SchemaResolver} for {@link Schema} resolution.
*
* <p>Currently, the resolver cannot be passed in the constructor because of a chicken-and-egg
* problem between {@link Planner} and {@link CatalogManager}.
*
* @see TableEnvironmentImpl#create(EnvironmentSettings)
*/
public void m1(boolean isStreamingMode, ExpressionResolverBuilder expressionResolverBuilder) {
this.schemaResolver = new DefaultSchemaResolver(isStreamingMode, typeFactory, expressionResolverBuilder);
} | 3.26 |
flink_CatalogManager_setCurrentDatabase_rdh | /**
* Sets the current database name that will be used when resolving a table path. The database
* has to exist in the current catalog.
*
* @param databaseName
* database name to set as current database name
* @throws CatalogException
* thrown if the database doesn't exist in the current catalog
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
* @see CatalogManager#setCurrentCatalog(String)
*/
public void setCurrentDatabase(@Nullable
String databaseName) {
if (databaseName == null) {
this.currentDatabaseName = null;
return;
}
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName), "The database name cannot be empty.");
if (currentCatalogName == null) {
throw new CatalogException("Current catalog has not been set.");
}
if (!getCatalogOrThrowException(currentCatalogName).databaseExists(databaseName)) {
throw new CatalogException(format("A database with name [%s] does not exist in the catalog: [%s].", databaseName, currentCatalogName));
}
if (!databaseName.equals(currentDatabaseName))
{
currentDatabaseName
= databaseName;
LOG.info("Set the current default database as [{}] in the current default catalog [{}].", currentDatabaseName, currentCatalogName);
}
} | 3.26 |
flink_CatalogManager_unregisterCatalog_rdh | /**
* Unregisters a catalog under the given name. The catalog name must be existed.
*
* <p>If the catalog is in the initialized catalogs, it will be removed from the initialized
* catalogs. If the catalog is stored in the {@link CatalogStore}, it will be removed from the
* CatalogStore.
*
* @param catalogName
* name under which to unregister the given catalog.
* @param ignoreIfNotExists
* If false exception will be thrown if the table or database or
* catalog to be altered does not exist.
* @throws CatalogException
* If the catalog does not exist in the initialized catalogs and not in
* the {@link CatalogStore}, or if the remove operation failed.
*/
public void unregisterCatalog(String catalogName, boolean ignoreIfNotExists) {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(catalogName), "Catalog name cannot be null or empty.");
if (catalogs.containsKey(catalogName) || catalogStoreHolder.catalogStore().contains(catalogName))
{
if (catalogName.equals(currentCatalogName)) {
throw new CatalogException("Cannot drop a catalog which is currently in use.");
}
if (catalogs.containsKey(catalogName)) {
Catalog catalog = catalogs.remove(catalogName);
catalog.close();
}if (catalogStoreHolder.catalogStore().contains(catalogName)) {
catalogStoreHolder.catalogStore().removeCatalog(catalogName, ignoreIfNotExists);
}
} else if (!ignoreIfNotExists) {
throw new CatalogException(format("Catalog %s does not exist.", catalogName));
}
} | 3.26 |
flink_CatalogManager_qualifyIdentifier_rdh | /**
* Returns the full name of the given table path, this name may be padded with current
* catalog/database name based on the {@code identifier's} length.
*
* @param identifier
* an unresolved identifier
* @return a fully qualified object identifier
*/
public ObjectIdentifier qualifyIdentifier(UnresolvedIdentifier identifier) {
return ObjectIdentifier.of(identifier.getCatalogName().orElseGet(() -> {
final String currentCatalog = getCurrentCatalog();
if (StringUtils.isNullOrWhitespaceOnly(currentCatalog)) {
throw new ValidationException(((("A current catalog has not been set. Please use a" + " fully qualified identifier (such as") + " 'my_catalog.my_database.my_table') or") + " set a current catalog using") + " 'USE CATALOG my_catalog'.");
}return currentCatalog;
}), identifier.getDatabaseName().orElseGet(() -> {
final String currentDatabase = getCurrentDatabase();
if (StringUtils.isNullOrWhitespaceOnly(currentDatabase)) {
throw new ValidationException((((("A current database has not been set. Please use a" + " fully qualified identifier (such as") + " 'my_database.my_table' or") + " 'my_catalog.my_database.my_table') or") + " set a current database using") + " 'USE my_database'.");
}
return currentDatabase; }), identifier.getObjectName());
} | 3.26 |
flink_FallbackKey_createFallbackKey_rdh | // -------------------------
// Factory methods
// -------------------------
static FallbackKey createFallbackKey(String key) {
return new FallbackKey(key, false);
} | 3.26 |
flink_FallbackKey_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if ((o != null) && (o.getClass() == FallbackKey.class)) {
FallbackKey that = ((FallbackKey) (o));
return this.key.equals(that.key) && (this.isDeprecated == that.isDeprecated);
} else {
return false;
}
} | 3.26 |
flink_ThroughputCalculator_calculateThroughput_rdh | /**
*
* @return Calculated throughput based on the collected data for the last period.
*/
public long calculateThroughput() {
if (measurementStartTime != NOT_TRACKED) {
long absoluteTimeMillis = clock.absoluteTimeMillis();
currentMeasurementTime += absoluteTimeMillis - measurementStartTime;
measurementStartTime = absoluteTimeMillis;
}
long throughput = calculateThroughput(currentAccumulatedDataSize, currentMeasurementTime);
currentAccumulatedDataSize = currentMeasurementTime = 0;
return throughput;
} | 3.26 |
flink_ThroughputCalculator_resumeMeasurement_rdh | /**
* Mark when the time should be included to the throughput calculation.
*/
public void resumeMeasurement() {
if (measurementStartTime == NOT_TRACKED) {
measurementStartTime = clock.absoluteTimeMillis();
}
} | 3.26 |
flink_ThroughputCalculator_m0_rdh | /**
* Mark when the time should not be taken into account.
*/
public void m0() {
if (measurementStartTime != NOT_TRACKED) {
currentMeasurementTime +=
clock.absoluteTimeMillis() - measurementStartTime;
}
measurementStartTime = NOT_TRACKED;
} | 3.26 |
flink_ModuleFactory_createModule_rdh | /**
* Creates and configures a {@link Module}.
*/
default Module createModule(Context context) {
throw new ModuleException("Module factories must implement createModule(Context).");
} | 3.26 |
flink_ModuleFactory_requiredContext_rdh | /**
*
* @deprecated Implement the {@link Factory} based stack instead.
*/
@Deprecated
default Map<String, String> requiredContext() {
// Default implementation for modules implementing the new {@link Factory} stack instead.
return null;
} | 3.26 |
flink_ModuleFactory_supportedProperties_rdh | /**
*
* @deprecated Implement the {@link Factory} based stack instead.
*/
@Deprecated
default List<String> supportedProperties() {
// Default implementation for modules implementing the new {@link Factory} stack instead.
return null;
} | 3.26 |
flink_BulkIterationNode_acceptForStepFunction_rdh | // --------------------------------------------------------------------------------------------
// Iteration Specific Traversals
// --------------------------------------------------------------------------------------------
public void acceptForStepFunction(Visitor<OptimizerNode> visitor) {
this.singleRoot.accept(visitor);
} | 3.26 |
flink_BulkIterationNode_setNextPartialSolution_rdh | /**
* Sets the nextPartialSolution for this BulkIterationNode.
*
* @param nextPartialSolution
* The nextPartialSolution to set.
*/
public void setNextPartialSolution(OptimizerNode nextPartialSolution, OptimizerNode terminationCriterion) {
// check if the root of the step function has the same parallelism as the iteration
// or if the step function has any operator at all
if (((nextPartialSolution.getParallelism() != getParallelism()) || (nextPartialSolution == partialSolution)) || (nextPartialSolution instanceof BinaryUnionNode)) {
// add a no-op to the root to express the re-partitioning
NoOpNode noop = new NoOpNode();
noop.setParallelism(getParallelism());
DagConnection noOpConn = new DagConnection(nextPartialSolution, noop, ExecutionMode.PIPELINED);
noop.setIncomingConnection(noOpConn);nextPartialSolution.addOutgoingConnection(noOpConn);
nextPartialSolution = noop;
}
this.nextPartialSolution = nextPartialSolution;
this.terminationCriterion = terminationCriterion;if (terminationCriterion == null) {
this.singleRoot = nextPartialSolution;
this.rootConnection = new DagConnection(nextPartialSolution, ExecutionMode.PIPELINED);
} else {
// we have a termination criterion
SingleRootJoiner singleRootJoiner = new SingleRootJoiner();
this.rootConnection = new DagConnection(nextPartialSolution, singleRootJoiner, ExecutionMode.PIPELINED);
this.f0 = new DagConnection(terminationCriterion, singleRootJoiner, ExecutionMode.PIPELINED);
singleRootJoiner.setInputs(this.rootConnection, this.f0);
this.singleRoot = singleRootJoiner;// add connection to terminationCriterion for interesting properties visitor
terminationCriterion.addOutgoingConnection(f0);
}
nextPartialSolution.addOutgoingConnection(rootConnection);
} | 3.26 |
flink_BulkIterationNode_getOperatorName_rdh | // --------------------------------------------------------------------------------------------
@Override
public String getOperatorName() {
return "Bulk Iteration";
} | 3.26 |
flink_BulkIterationNode_getIterationContract_rdh | // --------------------------------------------------------------------------------------------
public BulkIterationBase<?> getIterationContract() {
return ((BulkIterationBase<?>) (getOperator()));
} | 3.26 |
flink_BulkIterationNode_setPartialSolution_rdh | /**
* Sets the partialSolution for this BulkIterationNode.
*
* @param partialSolution
* The partialSolution to set.
*/
public void setPartialSolution(BulkPartialSolutionNode partialSolution)
{
this.partialSolution = partialSolution;
} | 3.26 |
flink_BulkIterationNode_getPossibleProperties_rdh | // --------------------------------------------------------------------------------------------
// Properties and Optimization
// --------------------------------------------------------------------------------------------
protected List<OperatorDescriptorSingle> getPossibleProperties() {
return Collections.<OperatorDescriptorSingle>singletonList(new NoOpDescriptor());
} | 3.26 |
flink_JobExecutionResult_getAccumulatorResult_rdh | /**
* Gets the accumulator with the given name. Returns {@code null}, if no accumulator with that
* name was produced.
*
* @param accumulatorName
* The name of the accumulator.
* @param <T>
* The generic type of the accumulator value.
* @return The value of the accumulator with the given name.
*/
@SuppressWarnings("unchecked")
public <T> T getAccumulatorResult(String accumulatorName) {
OptionalFailure<Object> result = this.accumulatorResults.get(accumulatorName);
if (result != null) {
return ((T) (result.getUnchecked()));
} else {
return null;
}
} | 3.26 |
flink_JobExecutionResult_getIntCounterResult_rdh | /**
* Gets the accumulator with the given name as an integer.
*
* @param accumulatorName
* Name of the counter
* @return Result of the counter, or null if the counter does not exist
* @throws java.lang.ClassCastException
* Thrown, if the accumulator was not aggregating a {@link java.lang.Integer}
* @deprecated Will be removed in future versions. Use {@link #getAccumulatorResult} instead.
*/
@Deprecated
@PublicEvolving
public Integer
getIntCounterResult(String accumulatorName) {
Object result = this.accumulatorResults.get(accumulatorName).getUnchecked();
if (result == null) {
return null;
} if (!(result instanceof Integer)) {
throw new ClassCastException((("Requested result of the accumulator '" + accumulatorName) + "' should be Integer but has type ") + result.getClass());
}
return ((Integer) (result));
} | 3.26 |
flink_JobExecutionResult_fromJobSubmissionResult_rdh | /**
* Returns a dummy object for wrapping a JobSubmissionResult.
*
* @param result
* The SubmissionResult
* @return a JobExecutionResult
* @deprecated Will be removed in future versions.
*/
@Deprecated
public static JobExecutionResult fromJobSubmissionResult(JobSubmissionResult result) {
return new
JobExecutionResult(result.getJobID(), -1, null);
} | 3.26 |
flink_JobExecutionResult_m0_rdh | /**
* Gets all accumulators produced by the job. The map contains the accumulators as mappings from
* the accumulator name to the accumulator value.
*
* @return A map containing all accumulators produced by the job.
*/
public Map<String, Object> m0() {
return accumulatorResults.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getUnchecked()));
} | 3.26 |
flink_JobExecutionResult_getNetRuntime_rdh | /**
* Gets the net execution time of the job, i.e., the execution time in the parallel system,
* without the pre-flight steps like the optimizer in a desired time unit.
*
* @param desiredUnit
* the unit of the <tt>NetRuntime</tt>
* @return The net execution time in the desired unit.
*/
public long getNetRuntime(TimeUnit desiredUnit) {
return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS);
} | 3.26 |
flink_LogicalTypeChecks_hasWellDefinedString_rdh | /**
* Checks whether the given {@link LogicalType} has a well-defined string representation when
* calling {@link Object#toString()} on the internal data structure. The string representation
* would be similar in SQL or in a programming language.
*
* <p>Note: This method might not be necessary anymore, once we have implemented a utility that
* can convert any internal data structure to a well-defined string representation.
*/
public static boolean hasWellDefinedString(LogicalType logicalType) {
if (logicalType instanceof DistinctType) {
return hasWellDefinedString(((DistinctType) (logicalType)).getSourceType());
}
switch (logicalType.getTypeRoot())
{
case
CHAR :
case VARCHAR :
case BOOLEAN :case TINYINT :
case SMALLINT :case INTEGER :
case BIGINT :
case FLOAT :
case DOUBLE :
return true;
default :
return false;
}
} | 3.26 |
flink_LogicalTypeChecks_getPrecision_rdh | /**
* Returns the precision of all types that define a precision implicitly or explicitly.
*/
public static int getPrecision(LogicalType logicalType) {
return logicalType.accept(PRECISION_EXTRACTOR);
} | 3.26 |
flink_LogicalTypeChecks_getFieldCount_rdh | /**
* Returns the field count of row and structured types. Other types return 1.
*/
public static int getFieldCount(LogicalType logicalType) {
return
logicalType.accept(FIELD_COUNT_EXTRACTOR);
} | 3.26 |
flink_LogicalTypeChecks_getFieldTypes_rdh | /**
* Returns the field types of row and structured types.
*/
public static List<LogicalType>
getFieldTypes(LogicalType logicalType) {
if (logicalType instanceof DistinctType) {
return getFieldTypes(((DistinctType) (logicalType)).getSourceType());
}
return logicalType.getChildren();
} | 3.26 |
flink_LogicalTypeChecks_hasScale_rdh | /**
* Checks the scale of all types that define a scale implicitly or explicitly.
*/
public static boolean hasScale(LogicalType logicalType, int scale) {
return getScale(logicalType) == scale;
} | 3.26 |
flink_LogicalTypeChecks_hasLegacyTypes_rdh | /**
* Checks whether a (possibly nested) logical type contains {@link LegacyTypeInformationType} or
* {@link TypeInformationRawType}.
*/
public static boolean hasLegacyTypes(LogicalType logicalType) {
return hasNested(logicalType, t -> t instanceof LegacyTypeInformationType);
} | 3.26 |
flink_LogicalTypeChecks_isCompositeType_rdh | /**
* Checks if the given type is a composite type.
*
* <p>Use {@link #getFieldCount(LogicalType)}, {@link #getFieldNames(LogicalType)}, {@link #getFieldTypes(LogicalType)} for unified handling of composite types.
*
* @param logicalType
* Logical data type to check
* @return True if the type is composite type.
*/
public static boolean isCompositeType(LogicalType logicalType) {
if (logicalType instanceof DistinctType) {
return isCompositeType(((DistinctType) (logicalType)).getSourceType());
}
LogicalTypeRoot typeRoot = logicalType.getTypeRoot();return (typeRoot == STRUCTURED_TYPE) || (typeRoot == ROW);
} | 3.26 |
flink_LogicalTypeChecks_m1_rdh | /**
* Returns the field names of row and structured types.
*/
public static List<String> m1(LogicalType logicalType) {
return logicalType.accept(FIELD_NAMES_EXTRACTOR);
} | 3.26 |
flink_LogicalTypeChecks_hasNested_rdh | /**
* Checks whether a (possibly nested) logical type fulfills the given predicate.
*/
public static boolean hasNested(LogicalType logicalType, Predicate<LogicalType> predicate) {
final NestedTypeSearcher typeSearcher = new NestedTypeSearcher(predicate);
return logicalType.accept(typeSearcher).isPresent();
} | 3.26 |
flink_LogicalTypeChecks_hasPrecision_rdh | /**
* Checks the precision of a type that defines a precision implicitly or explicitly.
*/
public static boolean hasPrecision(LogicalType logicalType, int precision) {
return getPrecision(logicalType) == precision;
} | 3.26 |
flink_LogicalTypeChecks_getScale_rdh | /**
* Returns the scale of all types that define a scale implicitly or explicitly.
*/
public static int getScale(LogicalType logicalType) {
return logicalType.accept(SCALE_EXTRACTOR);
} | 3.26 |
flink_TestingSourceSettings_getCheckpointingMode_rdh | /**
* Checkpointing mode required for the source.
*/
public CheckpointingMode getCheckpointingMode() {
return checkpointingMode;
} | 3.26 |
flink_TestingSourceSettings_getBoundedness_rdh | /**
* Boundedness of the source.
*/
public Boundedness getBoundedness() {return boundedness;
} | 3.26 |
flink_Tuple2_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1> Tuple2<T0, T1> of(T0 f0, T1 f1) {
return new Tuple2<>(f0, f1);
} | 3.26 |
flink_Tuple2_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
*/
public void setFields(T0 f0, T1 f1) {
this.f0 = f0;
this.f1 = f1;
} | 3.26 |
flink_Tuple2_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/@Override
public boolean equals(Object o) {if (this == o) {
return true;
}
if (!(o instanceof Tuple2)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple2 tuple = ((Tuple2) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if
(f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple2_toString_rdh | // -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1), where the individual
* fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString()
{
return ((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ")";
} | 3.26 |
flink_StreamingJobGraphGenerator_setChaining_rdh | /**
* Sets up task chains from the source {@link StreamNode} instances.
*
* <p>This will recursively create all {@link JobVertex} instances.
*/
private void setChaining(Map<Integer, byte[]> hashes, List<Map<Integer, byte[]>> legacyHashes) {
// we separate out the sources that run as inputs to another operator (chained inputs)
// from the sources that needs to run as the main (head) operator.
final Map<Integer, OperatorChainInfo> chainEntryPoints = buildChainedInputsAndGetHeadInputs(hashes, legacyHashes);
final Collection<OperatorChainInfo> initialEntryPoints = chainEntryPoints.entrySet().stream().sorted(Comparator.comparing(Map.Entry::getKey)).map(Map.Entry::getValue).collect(Collectors.toList());
// iterate over a copy of the values, because this map gets concurrently modified
for (OperatorChainInfo info : initialEntryPoints) {
// operators start at position 1 because 0 is for chained source inputs
createChain(info.getStartNodeId(), 1,
info, chainEntryPoints);
}
} | 3.26 |
flink_StreamingJobGraphGenerator_setVertexParallelismsForDynamicGraphIfNecessary_rdh | /**
* This method is used to reset or set job vertices' parallelism for dynamic graph:
*
* <p>1. Reset parallelism for job vertices whose parallelism is not configured.
*
* <p>2. Set parallelism and maxParallelism for job vertices in forward group, to ensure the
* parallelism and maxParallelism of vertices in the same forward group to be the same; set the
* parallelism at early stage if possible, to avoid invalid partition reuse.
*/private void setVertexParallelismsForDynamicGraphIfNecessary() {
// Note that the jobVertices are reverse topological order
final List<JobVertex> topologicalOrderVertices = IterableUtils.toStream(jobVertices.values()).collect(Collectors.toList());
Collections.reverse(topologicalOrderVertices);
// reset parallelism for job vertices whose parallelism is not configured
jobVertices.forEach((startNodeId, jobVertex) -> { final OperatorChainInfo chainInfo = chainInfos.get(startNodeId);
if ((!jobVertex.isParallelismConfigured()) && streamGraph.isAutoParallelismEnabled()) {
jobVertex.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT);
chainInfo.getAllChainedNodes().forEach(n -> n.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT, false));
}});
final Map<JobVertex, Set<JobVertex>> forwardProducersByJobVertex = new HashMap<>();
jobVertices.forEach((startNodeId, jobVertex) -> {
Set<JobVertex> forwardConsumers =
chainInfos.get(startNodeId).getTransitiveOutEdges().stream().filter(edge -> edge.getPartitioner() instanceof ForwardPartitioner).map(StreamEdge::getTargetId).map(jobVertices::get).collect(Collectors.toSet());
for (JobVertex
forwardConsumer : forwardConsumers) {
forwardProducersByJobVertex.compute(forwardConsumer, (ignored, producers) -> {
if (producers == null) {producers = new HashSet<>();
}
producers.add(jobVertex);
return producers;
});
}
});
// compute forward groups
final Map<JobVertexID, ForwardGroup> v71 = ForwardGroupComputeUtil.computeForwardGroups(topologicalOrderVertices, jobVertex -> forwardProducersByJobVertex.getOrDefault(jobVertex, Collections.emptySet()));
jobVertices.forEach((startNodeId, jobVertex) -> {
ForwardGroup forwardGroup = v71.get(jobVertex.getID());
// set parallelism for vertices in forward group
if ((forwardGroup != null) &&
forwardGroup.isParallelismDecided()) {jobVertex.setParallelism(forwardGroup.getParallelism());
jobVertex.setParallelismConfigured(true);
chainInfos.get(startNodeId).getAllChainedNodes().forEach(streamNode -> streamNode.setParallelism(forwardGroup.getParallelism(), true));
}
// set max parallelism for vertices in forward group
if ((forwardGroup != null) && forwardGroup.isMaxParallelismDecided()) {
jobVertex.setMaxParallelism(forwardGroup.getMaxParallelism());
chainInfos.get(startNodeId).getAllChainedNodes().forEach(streamNode -> streamNode.setMaxParallelism(forwardGroup.getMaxParallelism()));
}
});
} | 3.26 |
flink_StreamingJobGraphGenerator_m5_rdh | /**
* Maps a vertex to its region slot sharing group. If {@link StreamGraph#isAllVerticesInSameSlotSharingGroupByDefault()} returns true, all regions will be
* in the same slot sharing group.
*/private Map<JobVertexID, SlotSharingGroup> m5() { final Map<JobVertexID, SlotSharingGroup> vertexRegionSlotSharingGroups = new HashMap<>();
final SlotSharingGroup defaultSlotSharingGroup = new SlotSharingGroup();
streamGraph.getSlotSharingGroupResource(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP).ifPresent(defaultSlotSharingGroup::setResourceProfile);
final boolean allRegionsInSameSlotSharingGroup = streamGraph.isAllVerticesInSameSlotSharingGroupByDefault();
final Iterable<DefaultLogicalPipelinedRegion> regions
= DefaultLogicalTopology.fromJobGraph(jobGraph).getAllPipelinedRegions();
for (DefaultLogicalPipelinedRegion region : regions) {
final SlotSharingGroup regionSlotSharingGroup;
if (allRegionsInSameSlotSharingGroup) {
regionSlotSharingGroup = defaultSlotSharingGroup;
} else {
regionSlotSharingGroup = new SlotSharingGroup();
streamGraph.getSlotSharingGroupResource(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP).ifPresent(regionSlotSharingGroup::setResourceProfile);
}
for (LogicalVertex vertex : region.getVertices()) {
vertexRegionSlotSharingGroups.put(vertex.getId(), regionSlotSharingGroup);
}}
return vertexRegionSlotSharingGroups;
} | 3.26 |
flink_StreamingJobGraphGenerator_getHeadOperator_rdh | /**
* Backtraces the head of an operator chain.
*/
private static StreamOperatorFactory<?> getHeadOperator(StreamNode upStreamVertex, StreamGraph streamGraph) {
if ((upStreamVertex.getInEdges().size() == 1) && isChainable(upStreamVertex.getInEdges().get(0), streamGraph)) { return getHeadOperator(streamGraph.getSourceVertex(upStreamVertex.getInEdges().get(0)), streamGraph);
}
return upStreamVertex.getOperatorFactory();
} | 3.26 |
flink_SortBuffer_append_rdh | /**
* No partial record will be written to this {@link SortBasedDataBuffer}, which means that
* either all data of target record will be written or nothing will be written.
*/
@Override
public boolean append(ByteBuffer source, int targetChannel, Buffer.DataType dataType) throws IOException { checkArgument(source.hasRemaining(), "Cannot append empty data.");
checkState(!isFinished, "Sort buffer is already finished.");
checkState(!isReleased, "Sort buffer is already released.");
int totalBytes = source.remaining();// return true directly if it can not allocate enough buffers for the given record
if (!allocateBuffersForRecord(totalBytes)) {
return true;
}
// write the index entry and record or event data
writeIndex(targetChannel, totalBytes, dataType);
writeRecord(source);
++numTotalRecords;
numTotalBytes += totalBytes;
return false;
} | 3.26 |
flink_TemporaryClassLoaderContext_of_rdh | /**
* Sets the context class loader to the given ClassLoader and returns a resource that sets it
* back to the current context ClassLoader when the resource is closed.
*
* <pre>{@code try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(classloader)) {
* // code that needs the context class loader
* }}</pre>
*/
public static TemporaryClassLoaderContext of(ClassLoader cl) {
final Thread t = Thread.currentThread();
final ClassLoader original = t.getContextClassLoader();
t.setContextClassLoader(cl);
return new TemporaryClassLoaderContext(t, original);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.