name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_OptimizerNode_getMinimalMemoryAcrossAllSubTasks_rdh | /**
* Gets the amount of memory that all subtasks of this task have jointly available.
*
* @return The total amount of memory across all subtasks.
*/
public long getMinimalMemoryAcrossAllSubTasks() {
return this.minimalMemoryPerSubTask == (-1) ? -1 : this.minimalMemoryPerSubTask * this.parallelism;
} | 3.26 |
flink_OptimizerNode_addOutgoingConnection_rdh | /**
* Adds a new outgoing connection to this node.
*
* @param connection
* The connection to add.
*/
public void addOutgoingConnection(DagConnection connection) {
if (this.outgoingConnections == null) {
this.outgoingConnections = new ArrayList<DagConnection>();
} else if (this.outgoingConnections.size() == 64) {
throw new CompilerException("Cannot currently handle nodes with more than 64 outputs.");
}
this.outgoingConnections.add(connection);
} | 3.26 |
flink_OptimizerNode_m3_rdh | /**
* The node IDs are assigned in graph-traversal order (pre-order), hence, each list is sorted by
* ID in ascending order and all consecutive lists start with IDs in ascending order.
*
* @param markJoinedBranchesAsPipelineBreaking
* True, if the
*/
protected final boolean m3(List<UnclosedBranchDescriptor> child1open, List<UnclosedBranchDescriptor> child2open, List<UnclosedBranchDescriptor> result, boolean markJoinedBranchesAsPipelineBreaking) {
// remove branches which have already been closed
removeClosedBranches(child1open);
removeClosedBranches(child2open);
result.clear();
// check how many open branches we have. the cases:
// 1) if both are null or empty, the result is null
// 2) if one side is null (or empty), the result is the other side.
// 3) both are set, then we need to merge.
if ((child1open == null) || child1open.isEmpty()) {
if ((child2open != null) && (!child2open.isEmpty())) {
result.addAll(child2open);
}
return false;
}
if ((child2open == null) || child2open.isEmpty()) {
result.addAll(child1open);
return false;
}
int index1 = child1open.size() - 1;
int index2 = child2open.size() - 1;
boolean didCloseABranch = false;
// as both lists (child1open and child2open) are sorted in ascending ID order
// we can do a merge-join-like loop which preserved the order in the result list
// and eliminates duplicates
while ((index1 >= 0) || (index2 >= 0)) {
int id1 = -1;
int id2 = (index2 >= 0) ? child2open.get(index2).getBranchingNode().getId() : -1;
while ((index1 >= 0) && ((id1 = child1open.get(index1).getBranchingNode().getId()) > id2)) {
result.add(child1open.get(index1));index1--;
}
while ((index2 >= 0) && ((id2 = child2open.get(index2).getBranchingNode().getId()) > id1)) {
result.add(child2open.get(index2));
index2--;
}
// match: they share a common branching child
if (id1 == id2) {
didCloseABranch = true;
// if this is the latest common child, remember it
OptimizerNode currBanchingNode = child1open.get(index1).getBranchingNode();
long v76 = child1open.get(index1).getJoinedPathsVector();
long vector2 = child2open.get(index2).getJoinedPathsVector();
// check if this is the same descriptor, (meaning that it contains the same paths)
// if it is the same, add it only once, otherwise process the join of the paths
if (v76 == vector2) {
result.add(child1open.get(index1));
} else {
// we merge (re-join) a branch
// mark the branch as a point where we break the pipeline
if (markJoinedBranchesAsPipelineBreaking) {
currBanchingNode.m1();
}
if (this.hereJoinedBranches == null) {
this.hereJoinedBranches = new ArrayList<OptimizerNode>(2); }
this.hereJoinedBranches.add(currBanchingNode);
// see, if this node closes the branch
long joinedInputs = v76 | vector2;
// this is 2^size - 1, which is all bits set at positions 0..size-1
long allInputs = (0x1L << currBanchingNode.getOutgoingConnections().size()) - 1;
if (joinedInputs == allInputs) {
// closed - we can remove it from the stack
addClosedBranch(currBanchingNode);
} else {
// not quite closed
result.add(new UnclosedBranchDescriptor(currBanchingNode, joinedInputs));
}
}
index1--;
index2--;
}
}
// merged. now we need to reverse the list, because we added the elements in reverse order
Collections.reverse(result);return didCloseABranch;
} | 3.26 |
flink_OptimizerNode_getPredecessors_rdh | // ------------------------------------------------------------------------
// Getters / Setters
// ------------------------------------------------------------------------
@Override
public Iterable<OptimizerNode> getPredecessors() {
List<OptimizerNode> allPredecessors = new ArrayList<OptimizerNode>();
for (DagConnection dagConnection : getIncomingConnections()) {
allPredecessors.add(dagConnection.getSource());
}
for (DagConnection conn : getBroadcastConnections()) {
allPredecessors.add(conn.getSource());
}
return allPredecessors;
} | 3.26 |
flink_OptimizerNode_addBroadcastConnection_rdh | /**
* Adds the broadcast connection identified by the given {@code name} to this node.
*
* @param broadcastConnection
* The connection to add.
*/
public void addBroadcastConnection(String name, DagConnection broadcastConnection) {
this.broadcastConnectionNames.add(name);
this.broadcastConnections.add(broadcastConnection);
} | 3.26 |
flink_OptimizerNode_getBroadcastConnections_rdh | /**
* Return the list of inputs associated with broadcast variables for this node.
*/public List<DagConnection> getBroadcastConnections() {
return this.broadcastConnections;
} | 3.26 |
flink_OptimizerNode_isBranching_rdh | /**
* Checks whether this node has branching output. A node's output is branched, if it has more
* than one output connection.
*
* @return True, if the node's output branches. False otherwise.
*/
public boolean isBranching() {
return (getOutgoingConnections() != null) && (getOutgoingConnections().size() > 1);
} | 3.26 |
flink_SqlResourceType_symbol_rdh | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.26 |
flink_Catalog_getTable_rdh | /**
* Returns a {@link CatalogTable} or {@link CatalogView} at a specific time identified by the
* given {@link ObjectPath}. The framework will resolve the metadata objects when necessary.
*
* @param tablePath
* Path of the table or view
* @param timestamp
* Timestamp of the table snapshot, which is milliseconds since 1970-01-01
* 00:00:00 UTC
* @return The requested table or view
* @throws TableNotExistException
* if the target does not exist
* @throws CatalogException
* in case of any runtime exception
*/
default CatalogBaseTable getTable(ObjectPath tablePath, long timestamp) throws TableNotExistException, CatalogException {
throw new UnsupportedOperationException(String.format("getTable(ObjectPath, long) is not implemented for %s.", this.getClass()));
} | 3.26 |
flink_Catalog_bulkGetPartitionColumnStatistics_rdh | /**
* Get a list of column statistics for given partitions.
*
* @param tablePath
* path of the table
* @param partitionSpecs
* partition specs of partitions that will be used to filter out all other
* unrelated statistics, i.e. the statistics fetch will be limited within the given
* partitions
* @return list of column statistics for given partitions
* @throws PartitionNotExistException
* if one partition does not exist
* @throws CatalogException
* in case of any runtime exception
*/
default List<CatalogColumnStatistics> bulkGetPartitionColumnStatistics(ObjectPath tablePath, List<CatalogPartitionSpec> partitionSpecs) throws
PartitionNotExistException, CatalogException {
checkNotNull(partitionSpecs, "partitionSpecs cannot be null");
List<CatalogColumnStatistics> result = new ArrayList<>(partitionSpecs.size());
for (CatalogPartitionSpec partitionSpec : partitionSpecs) {
result.add(m4(tablePath, partitionSpec));
}
return result;
} | 3.26 |
flink_Catalog_bulkGetPartitionStatistics_rdh | /**
* Get a list of statistics of given partitions.
*
* @param tablePath
* path of the table
* @param partitionSpecs
* partition specs of partitions that will be used to filter out all other
* unrelated statistics, i.e. the statistics fetch will be limited within the given
* partitions
* @return list of statistics of given partitions
* @throws PartitionNotExistException
* if one partition does not exist
* @throws CatalogException
* in case of any runtime exception
*/
default List<CatalogTableStatistics> bulkGetPartitionStatistics(ObjectPath tablePath, List<CatalogPartitionSpec> partitionSpecs) throws PartitionNotExistException, CatalogException {
checkNotNull(partitionSpecs, "partitionSpecs cannot be null");
List<CatalogTableStatistics> result = new ArrayList<>(partitionSpecs.size());
for (CatalogPartitionSpec partitionSpec : partitionSpecs) {
result.add(getPartitionStatistics(tablePath, partitionSpec));
}
return result;
} | 3.26 |
flink_Catalog_dropDatabase_rdh | /**
* Drop a database.
*
* @param name
* Name of the database to be dropped.
* @param ignoreIfNotExists
* Flag to specify behavior when the database does not exist: if set to
* false, throw an exception, if set to true, do nothing.
* @throws DatabaseNotExistException
* if the given database does not exist
* @throws CatalogException
* in case of any runtime exception
*/
default void dropDatabase(String name, boolean
ignoreIfNotExists) throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException {
dropDatabase(name, ignoreIfNotExists, false);
} | 3.26 |
flink_Catalog_m0_rdh | /**
* Returns a factory for creating instances from catalog objects.
*
* <p>This method enables bypassing the discovery process. Implementers can directly pass
* internal catalog-specific objects to their own factory. For example, a custom {@link CatalogTable} can be processed by a custom {@link DynamicTableFactory}.
*
* <p>Because all factories are interfaces, the returned {@link Factory} instance can implement
* multiple supported extension points. An {@code instanceof} check is performed by the caller
* that checks whether a required factory is implemented; otherwise the discovery process is
* used.
*/
default Optional<Factory> m0() {
return Optional.empty();
}
/**
* Get an optional {@link TableFactory} instance that's responsible for generating table-related
* instances stored in this catalog, instances such as source/sink.
*
* @return an optional TableFactory instance
* @deprecated Use {@link #getFactory()} | 3.26 |
flink_Catalog_m2_rdh | /**
* Modifies an existing table or view. Note that the new and old {@link CatalogBaseTable} must
* be of the same kind. For example, this doesn't allow altering a regular table to partitioned
* table, or altering a view to a table, and vice versa.
*
* <p>The framework will make sure to call this method with fully validated {@link ResolvedCatalogTable} or {@link ResolvedCatalogView}. Those instances are easy to serialize
* for a durable catalog implementation.
*
* @param tablePath
* path of the table or view to be modified
* @param newTable
* the new table definition
* @param tableChanges
* change to describe the modification between the newTable and the original
* table.
* @param ignoreIfNotExists
* flag to specify behavior when the table or view does not exist: if
* set to false, throw an exception, if set to true, do nothing.
* @throws TableNotExistException
* if the table does not exist
* @throws CatalogException
* in case of any runtime exception
*/
default void m2(ObjectPath tablePath, CatalogBaseTable newTable, List<TableChange> tableChanges, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException {
alterTable(tablePath, newTable, ignoreIfNotExists);
} | 3.26 |
flink_Catalog_listProcedures_rdh | /**
* List the names of all procedures in the given database. An empty list is returned if no
* procedure.
*
* @param dbName
* name of the database.
* @return a list of the names of the procedures in this database
* @throws DatabaseNotExistException
* if the database does not exist
* @throws CatalogException
* in case of any runtime exception
*/
default List<String> listProcedures(String dbName) throws DatabaseNotExistException, CatalogException {
throw new UnsupportedOperationException(String.format("listProcedures is not implemented for %s.", this.getClass()));
} | 3.26 |
flink_Catalog_getProcedure_rdh | /**
* Get the procedure. The procedure name should be handled in a case-insensitive way.
*
* @param procedurePath
* path of the procedure
* @return the requested procedure
* @throws ProcedureNotExistException
* if the procedure does not exist in the catalog
* @throws CatalogException
* in case of any runtime exception
*/
default Procedure getProcedure(ObjectPath procedurePath) throws ProcedureNotExistException, CatalogException {
throw new UnsupportedOperationException(String.format("getProcedure is not implemented for %s.", this.getClass()));
} | 3.26 |
flink_ByteParser_parseField_rdh | /**
* Static utility to parse a field of type byte from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws NumberFormatException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final byte parseField(byte[] bytes, int startPos, int length, char delimiter) {
long val = 0;
boolean neg = false;
if (bytes[startPos] == delimiter) {
throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] == '-') {
neg = true;
startPos++;
length--;
if ((length == 0) || (bytes[startPos] == delimiter)) {
throw new NumberFormatException("Orphaned minus sign."); }
}
for
(; length > 0; startPos++ , length--) {
if (bytes[startPos] == delimiter) {return ((byte) (neg ? -val : val));
}
if ((bytes[startPos] < 48) || (bytes[startPos] > 57)) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;val += bytes[startPos] - 48;
if ((val > Byte.MAX_VALUE) && ((!neg) ||
(val > (-Byte.MIN_VALUE)))) {
throw new NumberFormatException("Value overflow/underflow");
}
}
return
((byte) (neg ? -val : val));
} | 3.26 |
flink_AcknowledgeCheckpoint_getSubtaskState_rdh | // properties
// ------------------------------------------------------------------------
public TaskStateSnapshot getSubtaskState() {
return subtaskState;
} | 3.26 |
flink_AcknowledgeCheckpoint_equals_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof AcknowledgeCheckpoint)) {
return false;
}
if (!super.equals(o)) {
return false;
}
AcknowledgeCheckpoint that = ((AcknowledgeCheckpoint) (o));
return subtaskState != null ? subtaskState.equals(that.subtaskState) : that.subtaskState == null;
} | 3.26 |
flink_ZooKeeperUtils_generateZookeeperPath_rdh | /**
* Creates a ZooKeeper path of the form "/a/b/.../z".
*/
public static String generateZookeeperPath(String... paths) { return Arrays.stream(paths).map(ZooKeeperUtils::trimSlashes).filter(s -> !s.isEmpty()).collect(Collectors.joining("/", "/", ""));
} | 3.26 |
flink_ZooKeeperUtils_createFileSystemStateStorage_rdh | /**
* Creates a {@link FileSystemStateStorageHelper} instance.
*
* @param configuration
* {@link Configuration} object
* @param prefix
* Prefix for the created files
* @param <T>
* Type of the state objects
* @return {@link FileSystemStateStorageHelper} instance
* @throws IOException
* if file system state storage cannot be created
*/
public static <T extends Serializable>
FileSystemStateStorageHelper<T> createFileSystemStateStorage(Configuration configuration, String prefix) throws IOException {
return new FileSystemStateStorageHelper<>(HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(configuration), prefix);
} | 3.26 |
flink_ZooKeeperUtils_createZooKeeperStateHandleStore_rdh | /**
* Creates an instance of {@link ZooKeeperStateHandleStore}.
*
* @param client
* ZK client
* @param path
* Path to use for the client namespace
* @param stateStorage
* RetrievableStateStorageHelper that persist the actual state and whose
* returned state handle is then written to ZooKeeper
* @param <T>
* Type of state
* @return {@link ZooKeeperStateHandleStore} instance
* @throws Exception
* ZK errors
*/
public static <T extends Serializable> ZooKeeperStateHandleStore<T> createZooKeeperStateHandleStore(final CuratorFramework client, final String path, final RetrievableStateStorageHelper<T> stateStorage) throws Exception {
return new ZooKeeperStateHandleStore<>(useNamespaceAndEnsurePath(client, path), stateStorage);
} | 3.26 |
flink_ZooKeeperUtils_createCompletedCheckpoints_rdh | /**
* Creates a {@link DefaultCompletedCheckpointStore} instance with {@link ZooKeeperStateHandleStore}.
*
* @param client
* The {@link CuratorFramework} ZooKeeper client to use
* @param configuration
* {@link Configuration} object
* @param maxNumberOfCheckpointsToRetain
* The maximum number of checkpoints to retain
* @param executor
* to run ZooKeeper callbacks
* @param restoreMode
* the mode in which the job is being restored
* @return {@link DefaultCompletedCheckpointStore} instance
* @throws Exception
* if the completed checkpoint store cannot be created
*/
public static CompletedCheckpointStore createCompletedCheckpoints(CuratorFramework client, Configuration configuration, int maxNumberOfCheckpointsToRetain, SharedStateRegistryFactory sharedStateRegistryFactory, Executor ioExecutor, Executor executor, RestoreMode restoreMode) throws Exception {checkNotNull(configuration, "Configuration");
RetrievableStateStorageHelper<CompletedCheckpoint> stateStorage = createFileSystemStateStorage(configuration, HA_STORAGE_COMPLETED_CHECKPOINT);
final ZooKeeperStateHandleStore<CompletedCheckpoint> completedCheckpointStateHandleStore = createZooKeeperStateHandleStore(client, getCheckpointsPath(), stateStorage);
Collection<CompletedCheckpoint> completedCheckpoints = DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints(completedCheckpointStateHandleStore, ZooKeeperCheckpointStoreUtil.INSTANCE);
final CompletedCheckpointStore zooKeeperCompletedCheckpointStore = new DefaultCompletedCheckpointStore<>(maxNumberOfCheckpointsToRetain, completedCheckpointStateHandleStore, ZooKeeperCheckpointStoreUtil.INSTANCE, completedCheckpoints, sharedStateRegistryFactory.create(ioExecutor, completedCheckpoints, restoreMode), executor);
LOG.info("Initialized {} in '{}' with {}.", DefaultCompletedCheckpointStore.class.getSimpleName(), completedCheckpointStateHandleStore, getCheckpointsPath());
return zooKeeperCompletedCheckpointStore;
} | 3.26 |
flink_ZooKeeperUtils_isZooKeeperRecoveryMode_rdh | /**
* Returns whether {@link HighAvailabilityMode#ZOOKEEPER} is configured.
*/
public static boolean isZooKeeperRecoveryMode(Configuration flinkConf) {
return HighAvailabilityMode.fromConfig(flinkConf).equals(HighAvailabilityMode.ZOOKEEPER);
} | 3.26 |
flink_ZooKeeperUtils_createJobGraphs_rdh | /**
* Creates a {@link DefaultJobGraphStore} instance with {@link ZooKeeperStateHandleStore},
* {@link ZooKeeperJobGraphStoreWatcher} and {@link ZooKeeperJobGraphStoreUtil}.
*
* @param client
* The {@link CuratorFramework} ZooKeeper client to use
* @param configuration
* {@link Configuration} object
* @return {@link DefaultJobGraphStore} instance
* @throws Exception
* if the submitted job graph store cannot be created
*/
public static JobGraphStore createJobGraphs(CuratorFramework client, Configuration configuration) throws Exception
{
checkNotNull(configuration, "Configuration");
RetrievableStateStorageHelper<JobGraph> stateStorage = createFileSystemStateStorage(configuration, HA_STORAGE_SUBMITTED_JOBGRAPH_PREFIX);
// ZooKeeper submitted jobs root dir
String zooKeeperJobsPath = configuration.getString(HighAvailabilityOptions.HA_ZOOKEEPER_JOBGRAPHS_PATH);
// Ensure that the job graphs path exists
client.newNamespaceAwareEnsurePath(zooKeeperJobsPath).ensure(client.getZookeeperClient());
// All operations will have the path as root
CuratorFramework facade = client.usingNamespace(client.getNamespace() + zooKeeperJobsPath);
final String zooKeeperFullJobsPath = client.getNamespace() + zooKeeperJobsPath;
final ZooKeeperStateHandleStore<JobGraph> zooKeeperStateHandleStore = new ZooKeeperStateHandleStore<>(facade, stateStorage);
final PathChildrenCache pathCache = new PathChildrenCache(facade, "/", false);
return new DefaultJobGraphStore<>(zooKeeperStateHandleStore, new ZooKeeperJobGraphStoreWatcher(pathCache), ZooKeeperJobGraphStoreUtil.INSTANCE);
} | 3.26 |
flink_ZooKeeperUtils_useNamespaceAndEnsurePath_rdh | /**
* Returns a facade of the client that uses the specified namespace, and ensures that all nodes
* in the path exist.
*
* @param client
* ZK client
* @param path
* the new namespace
* @return ZK Client that uses the new namespace
* @throws Exception
* ZK errors
*/
public static CuratorFramework useNamespaceAndEnsurePath(final CuratorFramework client, final String path) throws Exception {
checkNotNull(client, "client must not be null");
checkNotNull(path, "path must not be null");
// Ensure that the checkpoints path exists
client.newNamespaceAwareEnsurePath(path).ensure(client.getZookeeperClient());
// All operations will have the path as root
final String newNamespace = generateZookeeperPath(client.getNamespace(), path);
return
// Curator prepends a '/' manually and throws an Exception if the
// namespace starts with a '/'.
client.usingNamespace(trimStartingSlash(newNamespace));
} | 3.26 |
flink_ZooKeeperUtils_getZooKeeperEnsemble_rdh | /**
* Returns the configured ZooKeeper quorum (and removes whitespace, because ZooKeeper does not
* tolerate it).
*/
public static String getZooKeeperEnsemble(Configuration flinkConf) throws IllegalConfigurationException {
String zkQuorum =
flinkConf.getValue(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM);
if ((zkQuorum == null) || StringUtils.isBlank(zkQuorum)) {
throw new IllegalConfigurationException("No ZooKeeper quorum specified in config.");
}
// Remove all whitespace
zkQuorum = zkQuorum.replaceAll("\\s+", "");
return zkQuorum;
} | 3.26 |
flink_ZooKeeperUtils_startCuratorFramework_rdh | /**
* Starts a {@link CuratorFramework} instance and connects it to the given ZooKeeper quorum from
* a builder.
*
* @param builder
* {@link CuratorFrameworkFactory.Builder} A builder for curatorFramework.
* @param fatalErrorHandler
* {@link FatalErrorHandler} fatalErrorHandler to handle unexpected
* errors of {@link CuratorFramework}
* @return {@link CuratorFrameworkWithUnhandledErrorListener} instance
*/
@VisibleForTesting
public static CuratorFrameworkWithUnhandledErrorListener startCuratorFramework(CuratorFrameworkFactory.Builder builder, FatalErrorHandler
fatalErrorHandler) {
CuratorFramework cf = builder.build();
UnhandledErrorListener unhandledErrorListener = (message, throwable) -> {LOG.error("Unhandled error in curator framework, error message: {}", message, throwable);
// The exception thrown in UnhandledErrorListener will be caught by
// CuratorFramework. So we mostly trigger exit process or interact with main
// thread to inform the failure in FatalErrorHandler.
fatalErrorHandler.onFatalError(throwable);
};
cf.getUnhandledErrorListenable().addListener(unhandledErrorListener);
cf.start();
return new CuratorFrameworkWithUnhandledErrorListener(cf, unhandledErrorListener);
} | 3.26 |
flink_ZooKeeperUtils_getDefaultAcl_rdh | /**
* Secure {@link ACLProvider} implementation.
*/public static class SecureAclProvider implements ACLProvider {
@Override
public List<ACL> getDefaultAcl() {
return Ids.CREATOR_ALL_ACL;
} | 3.26 |
flink_ZooKeeperUtils_createLeaderRetrievalDriverFactory_rdh | /**
* Creates a {@link LeaderRetrievalDriverFactory} implemented by ZooKeeper.
*
* @param client
* The {@link CuratorFramework} ZooKeeper client to use
* @param path
* The path for the leader zNode
* @param configuration
* configuration for further config options
* @return {@link LeaderRetrievalDriverFactory} instance.
*/
public static ZooKeeperLeaderRetrievalDriverFactory createLeaderRetrievalDriverFactory(final CuratorFramework client, final String path, final Configuration configuration) {
final ZooKeeperLeaderRetrievalDriver.LeaderInformationClearancePolicy leaderInformationClearancePolicy;
if (configuration.get(HighAvailabilityOptions.ZOOKEEPER_TOLERATE_SUSPENDED_CONNECTIONS)) {
leaderInformationClearancePolicy = LeaderInformationClearancePolicy.ON_LOST_CONNECTION;
} else {
leaderInformationClearancePolicy = LeaderInformationClearancePolicy.ON_SUSPENDED_CONNECTION;
}
return new ZooKeeperLeaderRetrievalDriverFactory(client, path, leaderInformationClearancePolicy);
} | 3.26 |
flink_ZooKeeperUtils_fromConfig_rdh | /**
* Return the configured {@link ZkClientACLMode}.
*
* @param config
* The config to parse
* @return Configured ACL mode or the default defined by {@link HighAvailabilityOptions#ZOOKEEPER_CLIENT_ACL} if not configured.
*/
public static ZkClientACLMode fromConfig(Configuration config) {
String aclMode = config.getString(HighAvailabilityOptions.ZOOKEEPER_CLIENT_ACL);
if ((aclMode == null) || aclMode.equalsIgnoreCase(OPEN.name())) {
return OPEN;
} else if (aclMode.equalsIgnoreCase(CREATOR.name())) {
return CREATOR;
} else {
String v44 = ("Unsupported ACL option: [" + aclMode) + "] provided";
LOG.error(v44);
throw new IllegalConfigurationException(v44);}
} | 3.26 |
flink_ZooKeeperUtils_createLeaderRetrievalService_rdh | /**
* Creates a {@link DefaultLeaderRetrievalService} instance with {@link ZooKeeperLeaderRetrievalDriver}.
*
* @param client
* The {@link CuratorFramework} ZooKeeper client to use
* @return {@link DefaultLeaderRetrievalService} instance.
*/
public static DefaultLeaderRetrievalService createLeaderRetrievalService(final CuratorFramework client) {
return createLeaderRetrievalService(client, "", new Configuration());
}
/**
* Creates a {@link DefaultLeaderRetrievalService} instance with {@link ZooKeeperLeaderRetrievalDriver}.
*
* @param client
* The {@link CuratorFramework} ZooKeeper client to use
* @param path
* The path for the leader retrieval
* @param configuration
* configuration for further config options
* @return {@link DefaultLeaderRetrievalService} | 3.26 |
flink_ZooKeeperUtils_createCheckpointIDCounter_rdh | /**
* Creates a {@link ZooKeeperCheckpointIDCounter} instance.
*
* @param client
* The {@link CuratorFramework} ZooKeeper client to use
* @return {@link ZooKeeperCheckpointIDCounter} instance
*/
public static ZooKeeperCheckpointIDCounter createCheckpointIDCounter(CuratorFramework client) {
return new ZooKeeperCheckpointIDCounter(client, new DefaultLastStateConnectionStateListener());
} | 3.26 |
flink_ZooKeeperUtils_getPathForJob_rdh | /**
* Returns the JobID as a String (with leading slash).
*/
public static String getPathForJob(JobID jobId) {
checkNotNull(jobId, "Job ID");
return String.format("/%s", jobId);
} | 3.26 |
flink_ZooKeeperUtils_createTreeCache_rdh | /**
* Creates a {@link TreeCache} that only observes a specific node.
*
* @param client
* ZK client
* @param pathToNode
* full path of the node to observe
* @param nodeChangeCallback
* callback to run if the node has changed
* @return tree cache
*/public static TreeCache createTreeCache(final CuratorFramework client, final String pathToNode, final RunnableWithException nodeChangeCallback) {
final TreeCache cache = createTreeCache(client,
pathToNode, ZooKeeperUtils.treeCacheSelectorForPath(pathToNode));
cache.getListenable().addListener(createTreeCacheListener(nodeChangeCallback));
return cache;
} | 3.26 |
flink_ZooKeeperUtils_treeCacheSelectorForPath_rdh | /**
* Returns a {@link TreeCacheSelector} that only accepts a specific node.
*
* @param fullPath
* node to accept
* @return tree cache selector
*/
private static TreeCacheSelector treeCacheSelectorForPath(String fullPath) {
return new TreeCacheSelector() {
@Override
public boolean traverseChildren(String childPath) {
return false;
}
@Override
public boolean m0(String childPath) {
return fullPath.equals(childPath);
}
};
} | 3.26 |
flink_ZooKeeperUtils_splitZooKeeperPath_rdh | /**
* Splits the given ZooKeeper path into its parts.
*
* @param path
* path to split
* @return splited path
*/
public static String[] splitZooKeeperPath(String path) {return path.split("/");
} | 3.26 |
flink_AccumulatorHelper_toResultMap_rdh | /**
* Transform the Map with accumulators into a Map containing only the results.
*/
public static Map<String, OptionalFailure<Object>> toResultMap(Map<String, Accumulator<?, ?>> accumulators) {
Map<String, OptionalFailure<Object>> resultMap = new HashMap<>();
for (Map.Entry<String, Accumulator<?, ?>> entry : accumulators.entrySet()) {
resultMap.put(entry.getKey(), wrapUnchecked(entry.getKey(), () -> entry.getValue().getLocalValue()));
}
return resultMap;} | 3.26 |
flink_AccumulatorHelper_deserializeAccumulators_rdh | /**
* Takes the serialized accumulator results and tries to deserialize them using the provided
* class loader.
*
* @param serializedAccumulators
* The serialized accumulator results.
* @param loader
* The class loader to use.
* @return The deserialized accumulator results.
*/public static Map<String, OptionalFailure<Object>> deserializeAccumulators(Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws
IOException, ClassNotFoundException {
if ((serializedAccumulators == null) || serializedAccumulators.isEmpty()) {
return Collections.emptyMap();
}
Map<String, OptionalFailure<Object>> accumulators = CollectionUtil.newHashMapWithExpectedSize(serializedAccumulators.size());
for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> v12 : serializedAccumulators.entrySet()) {
OptionalFailure<Object> value = null;
if (v12.getValue() != null) {
value = v12.getValue().deserializeValue(loader);
}
accumulators.put(v12.getKey(), value);
}
return accumulators;
} | 3.26 |
flink_AccumulatorHelper_deserializeAndUnwrapAccumulators_rdh | /**
* Takes the serialized accumulator results and tries to deserialize them using the provided
* class loader, and then try to unwrap the value unchecked.
*
* @param serializedAccumulators
* The serialized accumulator results.
* @param loader
* The class loader to use.
* @return The deserialized and unwrapped accumulator results.
*/
public static Map<String, Object> deserializeAndUnwrapAccumulators(Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException {
Map<String, OptionalFailure<Object>> v14 = deserializeAccumulators(serializedAccumulators, loader);
if (v14.isEmpty()) {
return Collections.emptyMap();
}
Map<String, Object> accumulators = CollectionUtil.newHashMapWithExpectedSize(serializedAccumulators.size());for (Map.Entry<String, OptionalFailure<Object>> entry : v14.entrySet()) {
accumulators.put(entry.getKey(), entry.getValue().getUnchecked());
}
return accumulators;
} | 3.26 |
flink_AccumulatorHelper_compareAccumulatorTypes_rdh | /**
* Compare both classes and throw {@link UnsupportedOperationException} if they differ.
*/
@SuppressWarnings("rawtypes")
public static void compareAccumulatorTypes(Object name, Class<? extends Accumulator> first, Class<? extends Accumulator> second) throws UnsupportedOperationException {
if ((first == null) || (second == null)) {
throw new NullPointerException();
}
if (first != second) {
if (!first.getName().equals(second.getName())) {
throw new UnsupportedOperationException((((("The accumulator object '" + name) + "' was created with two different types: ") + first.getName()) + " and ") + second.getName());
} else {
// damn, name is the same, but different classloaders
throw new UnsupportedOperationException((((((((((("The accumulator object '" + name) + "' was created with two different classes: ") + first) + " and ")
+ second) + " Both have the same type (") + first.getName()) + ") but different classloaders: ") + first.getClassLoader()) + " and ") + second.getClassLoader());
}
}
} | 3.26 |
flink_AccumulatorHelper_mergeSingle_rdh | /**
* Workaround method for type safety.
*/
private static <V, R extends Serializable> Accumulator<V, R> mergeSingle(Accumulator<?, ?> target, Accumulator<?, ?> toMerge) {
@SuppressWarnings("unchecked")
Accumulator<V, R> typedTarget = ((Accumulator<V, R>) (target));
@SuppressWarnings("unchecked")
Accumulator<V, R> typedToMerge = ((Accumulator<V, R>) (toMerge));
typedTarget.merge(typedToMerge);
return typedTarget;} | 3.26 |
flink_AccumulatorHelper_mergeInto_rdh | /**
* Merge two collections of accumulators. The second will be merged into the first.
*
* @param target
* The collection of accumulators that will be updated
* @param toMerge
* The collection of accumulators that will be merged into the other
*/public static void mergeInto(Map<String, OptionalFailure<Accumulator<?, ?>>> target, Map<String, Accumulator<?, ?>> toMerge) {
for (Map.Entry<String, Accumulator<?, ?>> otherEntry : toMerge.entrySet()) {
OptionalFailure<Accumulator<?, ?>> ownAccumulator = target.get(otherEntry.getKey());
if (ownAccumulator == null) {
// Create initial counter (copy!)
target.put(otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> otherEntry.getValue().clone()));
} else if (ownAccumulator.isFailure()) {
continue;
} else {
Accumulator<?, ?> accumulator = ownAccumulator.getUnchecked();
// Both should have the same type
compareAccumulatorTypes(otherEntry.getKey(), accumulator.getClass(), otherEntry.getValue().getClass());
// Merge target counter with other counter
target.put(otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> mergeSingle(accumulator, otherEntry.getValue().clone())));
}
}
} | 3.26 |
flink_TwoInputStreamTask_getCanEmitBatchOfRecords_rdh | // This is needed for StreamMultipleInputProcessor#processInput to preserve the existing
// behavior of choosing an input every time a record is emitted. This behavior is good for
// fairness between input consumption. But it can reduce throughput due to added control
// flow cost on the per-record code path.
@Override
public CanEmitBatchOfRecordsChecker getCanEmitBatchOfRecords() {
return () -> false;
} | 3.26 |
flink_SlotManagerUtils_generateDefaultSlotResourceProfile_rdh | /**
* This must be consist with {@link org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils#generateDefaultSlotResourceProfile}.
*/
public static ResourceProfile
generateDefaultSlotResourceProfile(WorkerResourceSpec workerResourceSpec, int numSlotsPerWorker) {
final ResourceProfile.Builder resourceProfileBuilder = ResourceProfile.newBuilder().setCpuCores(workerResourceSpec.getCpuCores().divide(numSlotsPerWorker)).setTaskHeapMemory(workerResourceSpec.getTaskHeapSize().divide(numSlotsPerWorker)).setTaskOffHeapMemory(workerResourceSpec.getTaskOffHeapSize().divide(numSlotsPerWorker)).setManagedMemory(workerResourceSpec.getManagedMemSize().divide(numSlotsPerWorker)).setNetworkMemory(workerResourceSpec.getNetworkMemSize().divide(numSlotsPerWorker));
workerResourceSpec.getExtendedResources().forEach((name, resource) -> resourceProfileBuilder.setExtendedResource(resource.divide(numSlotsPerWorker)));
return resourceProfileBuilder.build();
}
/**
* This must be consist with {@link org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils#generateDefaultSlotResourceProfile} | 3.26 |
flink_ResourceInformationReflector_getAllResourceInfos_rdh | /**
* Get the name and value of all resources from the {@link Resource}.
*/
@VisibleForTesting
Map<String, Long> getAllResourceInfos(Object resource)
{
if (!isYarnResourceTypesAvailable) {
return Collections.emptyMap();
}
final Map<String, Long> externalResources = new HashMap<>();
final Object[] externalResourcesInfo;
try {
externalResourcesInfo = ((Object[]) (resourceGetResourcesMethod.invoke(resource)));
for (int i = 0; i < externalResourcesInfo.length; i++) {
final String name = ((String) (resourceInformationGetNameMethod.invoke(externalResourcesInfo[i])));
final long
value = ((long) (resourceInformationGetValueMethod.invoke(externalResourcesInfo[i])));
externalResources.put(name, value);
}
} catch (Exception e) {
LOG.warn("Could not obtain the external resources supported by the given Resource.", e);
return Collections.emptyMap();
}
return externalResources;
} | 3.26 |
flink_ResourceInformationReflector_setResourceInformationUnSafe_rdh | /**
* Same as {@link #setResourceInformation(Resource, String, long)} but allows to pass objects
* that are not of type {@link Resource}.
*/
@VisibleForTesting
void setResourceInformationUnSafe(Object resource, String resourceName, long amount) {
if (!isYarnResourceTypesAvailable) {
LOG.info("Will not request extended resource {} because the used YARN version does not support it.", resourceName);return;
}
try {
resourceSetResourceInformationMethod.invoke(resource, resourceName, resourceInformationNewInstanceMethod.invoke(null, resourceName, amount));
} catch (Exception e) {
LOG.warn("Error in setting the external resource {}. Will not request this resource from YARN.", resourceName, e);
}
} | 3.26 |
flink_ResourceInformationReflector_getExternalResources_rdh | /**
* Get the name and value of external resources from the {@link Resource}.
*/
Map<String, Long> getExternalResources(Resource resource) {
return getExternalResourcesUnSafe(resource);
} | 3.26 |
flink_ResourceInformationReflector_setResourceInformation_rdh | /**
* Add the given resourceName and value to the {@link Resource}.
*/
void setResourceInformation(Resource resource, String resourceName, long amount) {
setResourceInformationUnSafe(resource, resourceName, amount);
} | 3.26 |
flink_ResourceInformationReflector_getExternalResourcesUnSafe_rdh | /**
* Same as {@link #getExternalResources(Resource)} but allows to pass objects that are not of
* type {@link Resource}.
*/
@VisibleForTesting
Map<String, Long> getExternalResourcesUnSafe(Object resource) {
if (!isYarnResourceTypesAvailable) {
return Collections.emptyMap();
}
final Map<String, Long> externalResources = new HashMap<>();
final Object[] externalResourcesInfo;
try {
externalResourcesInfo = ((Object[]) (resourceGetResourcesMethod.invoke(resource)));
// The first two element would be cpu and mem.
for (int i = 2; i < externalResourcesInfo.length; i++)
{
final String name = ((String)
(resourceInformationGetNameMethod.invoke(externalResourcesInfo[i])));
final long v12 = ((long) (resourceInformationGetValueMethod.invoke(externalResourcesInfo[i])));
externalResources.put(name, v12);
}
} catch (Exception e) {
LOG.warn("Could not obtain the external resources supported by the given Resource.", e);
return Collections.emptyMap();
}
return externalResources;
} | 3.26 |
flink_SorterInputGateway_m0_rdh | /**
* Signals the end of input. Will flush all buffers and notify later stages.
*/
public void
m0() {
if ((currentBuffer != null) && (!currentBuffer.getBuffer().isEmpty())) {
this.dispatcher.send(SortStage.SORT, currentBuffer);
}
// add the sentinel to notify the receivers that the work is done
// send the EOF marker
final CircularElement<E> EOF_MARKER = CircularElement.endMarker();
this.dispatcher.send(SortStage.SORT, EOF_MARKER);
LOG.debug("Reading thread done.");} | 3.26 |
flink_SorterInputGateway_writeRecord_rdh | /**
* Writes the given record for sorting.
*/
public void writeRecord(E record) throws IOException, InterruptedException
{
if (currentBuffer == null) {
this.currentBuffer = this.dispatcher.take(SortStage.READ);if (!currentBuffer.getBuffer().isEmpty()) {
throw new IOException("New buffer is not empty.");
}
}
InMemorySorter<E> sorter = currentBuffer.getBuffer();
long occupancyPreWrite = sorter.getOccupancy();
if (!sorter.write(record)) {
long recordSize = sorter.getCapacity() - occupancyPreWrite;
signalSpillingIfNecessary(recordSize);
boolean isLarge = occupancyPreWrite == 0;
if (isLarge) {
// did not fit in a fresh buffer, must be large...
writeLarge(record, sorter);
this.currentBuffer.getBuffer().reset();
} else {
this.dispatcher.send(SortStage.SORT, currentBuffer);
this.currentBuffer = null;
writeRecord(record);
}
} else {
long recordSize = sorter.getOccupancy() - occupancyPreWrite;
signalSpillingIfNecessary(recordSize);
}
} | 3.26 |
flink_ChecksumUtils_convertChecksumToString_rdh | /**
* Converts an int crc32 checksum to the string format used by Google storage, which is the
* base64 string for the int in big-endian format.
*
* @param checksum
* The int checksum
* @return The string checksum
*/
public static String convertChecksumToString(int checksum) {
ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES);
buffer.order(ByteOrder.BIG_ENDIAN);
buffer.putInt(checksum);return BASE64_ENCODER.encodeToString(buffer.array());
} | 3.26 |
flink_TypeExtractor_createTypeInfo_rdh | // --------------------------------------------------------------------------------------------
// Create type information
// --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> createTypeInfo(Class<T> type) {
return ((TypeInformation<T>) (createTypeInfo(((Type) (type)))));
} | 3.26 |
flink_TypeExtractor_createTypeInfoFromFactory_rdh | /**
* Creates type information using a given factory.
*/
@SuppressWarnings("unchecked")
private <IN1, IN2, OUT> TypeInformation<OUT> createTypeInfoFromFactory(Type t, TypeInformation<IN1> in1Type, TypeInformation<IN2> in2Type, List<Type> factoryHierarchy, TypeInfoFactory<? super OUT> factory, Type factoryDefiningType) {
// infer possible type parameters from input
final Map<String, TypeInformation<?>>
genericParams;
if (factoryDefiningType instanceof ParameterizedType) {
genericParams = new
HashMap<>();
final ParameterizedType paramDefiningType = ((ParameterizedType) (factoryDefiningType));
final Type[] args = typeToClass(paramDefiningType).getTypeParameters();
final TypeInformation<?>[] subtypeInfo = createSubTypesInfo(t, paramDefiningType, factoryHierarchy, in1Type,
in2Type, true);
assert subtypeInfo != null;
for (int i = 0; i < subtypeInfo.length; i++) {
genericParams.put(args[i].toString(), subtypeInfo[i]);
}
} else {
genericParams = Collections.emptyMap();}
final TypeInformation<OUT> v74 = ((TypeInformation<OUT>) (factory.createTypeInfo(t, genericParams)));
if (v74 == null) {
throw new InvalidTypesException("TypeInfoFactory returned invalid TypeInformation 'null'");
}
return v74;
} | 3.26 |
flink_TypeExtractor_getParameterType_rdh | // --------------------------------------------------------------------------------------------
// Extract type parameters
// --------------------------------------------------------------------------------------------
@PublicEvolving
public static Type getParameterType(Class<?> baseClass, Class<?> clazz, int pos) {
return getParameterType(baseClass, null, clazz, pos);} | 3.26 |
flink_TypeExtractor_getAllDeclaredFields_rdh | /**
* Recursively determine all declared fields This is required because class.getFields() is not
* returning fields defined in parent classes.
*
* @param clazz
* class to be analyzed
* @param ignoreDuplicates
* if true, in case of duplicate field names only the lowest one in a
* hierarchy will be returned; throws an exception otherwise
* @return list of fields
*/
@PublicEvolving
public static List<Field> getAllDeclaredFields(Class<?> clazz, boolean ignoreDuplicates) {
List<Field> result = new
ArrayList<>();
while
(clazz != null) {
Field[] fields = clazz.getDeclaredFields();
for (Field
field : fields) {
if (Modifier.isTransient(field.getModifiers()) || Modifier.isStatic(field.getModifiers())) {
continue;// we have no use for transient or static fields
}
if (hasFieldWithSameName(field.getName(), result)) {
if (ignoreDuplicates) {
continue;
} else {
throw
new InvalidTypesException((((("The field " + field) + " is already contained in the hierarchy of the ") + clazz) + ".") + "Please use unique field names through your classes hierarchy");
}
}
result.add(field);
}
clazz = clazz.getSuperclass();
}
return result;
} | 3.26 |
flink_TypeExtractor_getMapReturnTypes_rdh | // --------------------------------------------------------------------------------------------
// Function specific methods
// --------------------------------------------------------------------------------------------
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
} | 3.26 |
flink_TypeExtractor_isRecord_rdh | /**
* Determine whether the given class is a valid Java record.
*
* @param clazz
* class to check
* @return True if the class is a Java record
*/
@PublicEvolving
public static boolean isRecord(Class<?> clazz) {
return clazz.getSuperclass().getName().equals("java.lang.Record") && ((clazz.getModifiers() & Modifier.FINAL) != 0);
} | 3.26 |
flink_TypeExtractor_getTypeInfoFactory_rdh | /**
* Returns the type information factory for an annotated field.
*/
@Internal
@SuppressWarnings("unchecked")
public static <OUT> TypeInfoFactory<OUT> getTypeInfoFactory(Field field) {
if ((!isClassType(field.getType())) || (!field.isAnnotationPresent(TypeInfo.class))) { return null;
}
Class<?> factoryClass = field.getAnnotation(TypeInfo.class).value();
// check for valid factory class
if (!TypeInfoFactory.class.isAssignableFrom(factoryClass)) {
throw new InvalidTypesException("TypeInfo annotation does not specify a valid TypeInfoFactory.");
}
return ((TypeInfoFactory<OUT>) (InstantiationUtil.instantiate(factoryClass)));
} | 3.26 |
flink_TypeExtractor_validateIfWritable_rdh | // visible for testing
static void validateIfWritable(TypeInformation<?> typeInfo, Type type) {
try {
// try to load the writable type info
Class<?> v167 = Class.forName(HADOOP_WRITABLE_TYPEINFO_CLASS, false, typeInfo.getClass().getClassLoader());
if (v167.isAssignableFrom(typeInfo.getClass())) {
// this is actually a writable type info
// check if the type is a writable
if (!((type instanceof Class) && isHadoopWritable(((Class<?>) (type)))))
{throw new InvalidTypesException(HADOOP_WRITABLE_CLASS + " type expected.");
}
// check writable type contents
Class<?> clazz = ((Class<?>) (type));
if (typeInfo.getTypeClass() != clazz) {
throw new InvalidTypesException(((("Writable type '" + typeInfo.getTypeClass().getCanonicalName()) + "' expected but was '") + clazz.getCanonicalName())
+ "'.");
}
}
} catch (ClassNotFoundException e) {
// class not present at all, so cannot be that type info
// ignore
}
} | 3.26 |
flink_TypeExtractor_isValidPojoField_rdh | /**
* Checks if the given field is a valid pojo field: - it is public OR - there are getter and
* setter methods for the field.
*
* @param f
* field to check
* @param clazz
* class of field
* @param typeHierarchy
* type hierarchy for materializing generic types
*/
private boolean isValidPojoField(Field f, Class<?> clazz, List<Type> typeHierarchy) {
if (Modifier.isPublic(f.getModifiers())) {
return true;
} else {
boolean hasGetter = false;
boolean hasSetter = false;
final String fieldNameLow = f.getName().toLowerCase().replaceAll("_", "");
Type fieldType = f.getGenericType();
Class<?> fieldTypeWrapper = ClassUtils.primitiveToWrapper(f.getType());
TypeVariable<?> fieldTypeGeneric = null;
if (fieldType instanceof TypeVariable) {
fieldTypeGeneric = ((TypeVariable<?>) (fieldType));
fieldType = materializeTypeVariable(typeHierarchy, ((TypeVariable<?>)
(fieldType)));
}
for (Method m : clazz.getMethods()) {
final String methodNameLow = (m.getName().endsWith("_$eq")) ? m.getName().toLowerCase().replaceAll("_", "").replaceFirst("\\$eq$", "_\\$eq") : m.getName().toLowerCase().replaceAll("_", "");
// check for getter
// The name should be "get<FieldName>" or "<fieldName>" (for scala) or
// "is<fieldName>" for boolean fields.
if ((((methodNameLow.equals("get" + fieldNameLow) || methodNameLow.equals("is" + fieldNameLow)) || methodNameLow.equals(fieldNameLow)) && // no arguments for the getter
(m.getParameterCount() == 0))
&& // return type is same as field type (or the generic variant of it)
((m.getGenericReturnType().equals(fieldType) || m.getReturnType().equals(fieldTypeWrapper)) || m.getGenericReturnType().equals(fieldTypeGeneric))) {
hasGetter = true;
}
// check for setters (<FieldName>_$eq for scala)
if ((((methodNameLow.equals("set" + fieldNameLow) || methodNameLow.equals(fieldNameLow + "_$eq")) && (m.getParameterCount() == 1)) && // one parameter of the field's type
((m.getGenericParameterTypes()[0].equals(fieldType) || m.getParameterTypes()[0].equals(fieldTypeWrapper)) || m.getGenericParameterTypes()[0].equals(fieldTypeGeneric))) && // return type is void (or the class self).
(m.getReturnType().equals(Void.TYPE) ||
m.getReturnType().equals(clazz))) {
hasSetter = true;
}
}
if (hasGetter && hasSetter) {
return true;
} else {if ((!hasGetter) && (clazz != Row.class)) {
LOG.info((clazz + " does not contain a getter for field ") + f.getName());
}
if ((!hasSetter)
&& (clazz != Row.class)) {
LOG.info((clazz + " does not contain a setter for field ") + f.getName());
}
return false;
}
}
} | 3.26 |
flink_TypeExtractor_privateCreateTypeInfo_rdh | // for LambdaFunctions
private <IN1, IN2, OUT> TypeInformation<OUT> privateCreateTypeInfo(Type returnType,
TypeInformation<IN1> in1Type, TypeInformation<IN2> in2Type) {
List<Type> typeHierarchy = new ArrayList<>();
// get info from hierarchy
return createTypeInfoWithTypeHierarchy(typeHierarchy, returnType, in1Type, in2Type);
} | 3.26 |
flink_TypeExtractor_getForClass_rdh | /**
* Creates type information from a given Class such as Integer, String[] or POJOs.
*
* <p>This method does not support ParameterizedTypes such as Tuples or complex type
* hierarchies. In most cases {@link TypeExtractor#createTypeInfo(Type)} is the recommended
* method for type extraction (a Class is a child of Type).
*
* @param clazz
* a Class to create TypeInformation for
* @return TypeInformation that describes the passed Class
*/
public static <X> TypeInformation<X> getForClass(Class<X>
clazz)
{
final List<Type> typeHierarchy = new ArrayList<>();
typeHierarchy.add(clazz);return new TypeExtractor().privateGetForClass(clazz, typeHierarchy);
} | 3.26 |
flink_TypeExtractor_getClosestFactory_rdh | /**
* Traverses the type hierarchy up until a type information factory can be found.
*
* @param typeHierarchy
* hierarchy to be filled while traversing up
* @param t
* type for which a factory needs to be found
* @return closest type information factory or null if there is no factory in the type hierarchy
*/
private static <OUT>
TypeInfoFactory<? super OUT> getClosestFactory(List<Type> typeHierarchy, Type t) {
TypeInfoFactory<OUT> factory = null;
while (((factory == null) && isClassType(t)) && (!typeToClass(t).equals(Object.class))) {
typeHierarchy.add(t);
factory = getTypeInfoFactory(t);
t = typeToClass(t).getGenericSuperclass();
if (t == null) {
break;
}
}
return factory;
} | 3.26 |
flink_TypeExtractor_isHadoopWritable_rdh | // ------------------------------------------------------------------------
// Utilities to handle Hadoop's 'Writable' type via reflection
// ------------------------------------------------------------------------
// visible for testing
static boolean isHadoopWritable(Class<?> typeClass) {
// check if this is directly the writable interface
if (typeClass.getName().equals(HADOOP_WRITABLE_CLASS)) {
return false;
}
final HashSet<Class<?>> alreadySeen = new HashSet<>();
alreadySeen.add(typeClass);
return hasHadoopWritableInterface(typeClass, alreadySeen);
} | 3.26 |
flink_TypeExtractor_getBinaryOperatorReturnType_rdh | /**
* Returns the binary operator's return type.
*
* <p>This method can extract a type in 4 different ways:
*
* <p>1. By using the generics of the base class like MyFunction<X, Y, Z, IN, OUT>. This is what
* outputTypeArgumentIndex (in this example "4") is good for.
*
* <p>2. By using input type inference SubMyFunction<T, String, String, String, T>. This is what
* inputTypeArgumentIndex (in this example "0") and inType is good for.
*
* <p>3. By using the static method that a compiler generates for Java lambdas. This is what
* lambdaOutputTypeArgumentIndices is good for. Given that MyFunction has the following single
* abstract method:
*
* <pre>
* <code>
* void apply(IN value, Collector<OUT> value)
* </code>
* </pre>
*
* <p>Lambda type indices allow the extraction of a type from lambdas. To extract the output
* type <b>OUT</b> from the function one should pass {@code new int[] {1, 0}}. "1" for selecting
* the parameter and 0 for the first generic in this type. Use {@code TypeExtractor.NO_INDEX}
* for selecting the return type of the lambda for extraction or if the class cannot be a lambda
* because it is not a single abstract method interface.
*
* <p>4. By using interfaces such as {@link TypeInfoFactory} or {@link ResultTypeQueryable}.
*
* <p>See also comments in the header of this class.
*
* @param function
* Function to extract the return type from
* @param baseClass
* Base class of the function
* @param input1TypeArgumentIndex
* Index of first input generic type in the class specification
* (ignored if in1Type is null)
* @param input2TypeArgumentIndex
* Index of second input generic type in the class specification
* (ignored if in2Type is null)
* @param outputTypeArgumentIndex
* Index of output generic type in the class specification
* @param lambdaOutputTypeArgumentIndices
* Table of indices of the type argument specifying the
* output type. See example.
* @param in1Type
* Type of the left side input elements (In case of an iterable, it is the
* element type)
* @param in2Type
* Type of the right side input elements (In case of an iterable, it is the
* element type)
* @param functionName
* Function name
* @param allowMissing
* Can the type information be missing (this generates a MissingTypeInfo for
* postponing an exception)
* @param <IN1>
* Left side input type
* @param <IN2>
* Right side input type
* @param <OUT>
* Output type
* @return TypeInformation of the return type of the function
*/
@SuppressWarnings("unchecked")
@PublicEvolving
public static <IN1, IN2, OUT> TypeInformation<OUT> getBinaryOperatorReturnType(Function function, Class<?> baseClass, int input1TypeArgumentIndex, int input2TypeArgumentIndex, int outputTypeArgumentIndex, int[] lambdaOutputTypeArgumentIndices, TypeInformation<IN1> in1Type, TypeInformation<IN2> in2Type, String functionName, boolean allowMissing) {
Preconditions.checkArgument((in1Type == null) || (input1TypeArgumentIndex >= 0), "Input 1 type argument index was not provided");
Preconditions.checkArgument((in2Type == null) || (input2TypeArgumentIndex >= 0), "Input 2 type argument index was not provided");
Preconditions.checkArgument(outputTypeArgumentIndex >= 0, "Output type argument index was not provided");
Preconditions.checkArgument(lambdaOutputTypeArgumentIndices != null, "Indices for output type arguments within lambda not provided");
// explicit result type has highest precedence
if (function instanceof ResultTypeQueryable) {
return ((ResultTypeQueryable<OUT>) (function)).getProducedType();
}
// perform extraction
try {
final LambdaExecutable exec;
try {
exec = checkAndExtractLambda(function);
} catch (TypeExtractionException e) {
throw new InvalidTypesException("Internal error occurred.", e);
}
if (exec != null) {
final Method sam = TypeExtractionUtils.getSingleAbstractMethod(baseClass);
final int baseParametersLen
= sam.getParameterCount();
// parameters must be accessed from behind, since JVM can add additional parameters
// e.g. when using local variables inside lambda function
final int paramLen = exec.getParameterTypes().length;
final Type output;
if (lambdaOutputTypeArgumentIndices.length > 0) {
output = TypeExtractionUtils.extractTypeFromLambda(baseClass, exec, lambdaOutputTypeArgumentIndices, paramLen, baseParametersLen);
} else {
output = exec.getReturnType();
TypeExtractionUtils.validateLambdaType(baseClass, output);
}
return new TypeExtractor().privateCreateTypeInfo(output, in1Type, in2Type);
} else {
if (in1Type != null) {validateInputType(baseClass, function.getClass(), input1TypeArgumentIndex, in1Type);
}if (in2Type != null) {
validateInputType(baseClass, function.getClass(), input2TypeArgumentIndex, in2Type);
}
return new TypeExtractor().privateCreateTypeInfo(baseClass, function.getClass(), outputTypeArgumentIndex, in1Type, in2Type);
}
} catch (InvalidTypesException e) {
if (allowMissing) {
return ((TypeInformation<OUT>) (new MissingTypeInfo(functionName != null ? functionName : function.toString(), e)));
} else {
throw e;
}
}
} | 3.26 |
flink_TypeExtractor_createTypeInfoFromInput_rdh | /**
* Finds the type information to a type variable.
*
* <p>It solve the following:
*
* <p>Return the type information for "returnTypeVar" given that "inType" has type information
* "inTypeInfo". Thus "inType" must contain "returnTypeVar" in a "inputTypeHierarchy", otherwise
* null is returned.
*/
private <IN1> TypeInformation<?> createTypeInfoFromInput(TypeVariable<?> returnTypeVar, List<Type> inputTypeHierarchy, Type inType, TypeInformation<IN1> inTypeInfo) {
TypeInformation<?> info = null;// use a factory to find corresponding type information to type variable
final List<Type> factoryHierarchy = new ArrayList<>(inputTypeHierarchy);
final TypeInfoFactory<?> factory = getClosestFactory(factoryHierarchy, inType);
if (factory != null) {
// the type that defines the factory is last in factory hierarchy
final Type factoryDefiningType = factoryHierarchy.get(factoryHierarchy.size() - 1);
// defining type has generics, the factory need to be asked for a mapping of subtypes to
// type information
if (factoryDefiningType instanceof ParameterizedType) {
final Type[] typeParams = typeToClass(factoryDefiningType).getTypeParameters();
final Type[] actualParams = ((ParameterizedType) (factoryDefiningType)).getActualTypeArguments();
// go thru all elements and search for type variables
for (int i = 0; i < actualParams.length; i++) {
final Map<String, TypeInformation<?>> componentInfo = inTypeInfo.getGenericParameters();
final String typeParamName = typeParams[i].toString();
if ((!componentInfo.containsKey(typeParamName)) || (componentInfo.get(typeParamName) == null)) {
throw new InvalidTypesException(((((("TypeInformation '" + inTypeInfo.getClass().getSimpleName()) + "' does not supply a mapping of TypeVariable '") + typeParamName) + "' to corresponding TypeInformation. ") + "Input type inference can only produce a result with this information. ") + "Please implement method 'TypeInformation.getGenericParameters()' for this.");
}
info = createTypeInfoFromInput(returnTypeVar, factoryHierarchy, actualParams[i], componentInfo.get(typeParamName));
if (info != null) {
break;
}
}
}
} else if (sameTypeVars(inType, returnTypeVar)) {
return inTypeInfo;
} else if (inType instanceof TypeVariable) {
Type resolvedInType = materializeTypeVariable(inputTypeHierarchy, ((TypeVariable<?>) (inType)));
if (resolvedInType != inType) {
info = createTypeInfoFromInput(returnTypeVar, inputTypeHierarchy, resolvedInType, inTypeInfo);
}
}
else if (inType instanceof GenericArrayType) {
TypeInformation<?> componentInfo = null;
if (inTypeInfo instanceof BasicArrayTypeInfo) {
componentInfo = ((BasicArrayTypeInfo<?, ?>) (inTypeInfo)).getComponentInfo();
} else if (inTypeInfo instanceof PrimitiveArrayTypeInfo) {
componentInfo = BasicTypeInfo.getInfoFor(inTypeInfo.getTypeClass().getComponentType());
} else if (inTypeInfo instanceof ObjectArrayTypeInfo) {
componentInfo = ((ObjectArrayTypeInfo<?, ?>) (inTypeInfo)).getComponentInfo();
}
info = createTypeInfoFromInput(returnTypeVar, inputTypeHierarchy, ((GenericArrayType) (inType)).getGenericComponentType(), componentInfo);
} else if (((inTypeInfo instanceof TupleTypeInfo) && isClassType(inType)) && Tuple.class.isAssignableFrom(typeToClass(inType))) {
ParameterizedType tupleBaseClass;
// get tuple from possible tuple subclass
while (!(isClassType(inType)
&& typeToClass(inType).getSuperclass().equals(Tuple.class))) {
inputTypeHierarchy.add(inType);
inType =
typeToClass(inType).getGenericSuperclass();
}
inputTypeHierarchy.add(inType);
// we can assume to be parameterized since we
// already did input validation
tupleBaseClass = ((ParameterizedType) (inType));
Type[] tupleElements = tupleBaseClass.getActualTypeArguments();
// go thru all tuple elements and search for type variables
for (int i = 0; i < tupleElements.length; i++) {
info = createTypeInfoFromInput(returnTypeVar, inputTypeHierarchy, tupleElements[i], ((TupleTypeInfo<?>) (inTypeInfo)).getTypeAt(i));
if (info != null) {
break;
}
}
} else if ((inTypeInfo instanceof
PojoTypeInfo) && isClassType(inType)) {
// build the entire type hierarchy for the pojo
getTypeHierarchy(inputTypeHierarchy, inType, Object.class);
// determine a field containing the type variable
List<Field> v53 = getAllDeclaredFields(typeToClass(inType), false);
for (Field field : v53) {
Type fieldType = field.getGenericType();
if ((fieldType instanceof TypeVariable) && sameTypeVars(returnTypeVar, materializeTypeVariable(inputTypeHierarchy, ((TypeVariable<?>) (fieldType))))) {
return getTypeOfPojoField(inTypeInfo, field);
} else
if ((fieldType instanceof ParameterizedType) || (fieldType instanceof GenericArrayType)) {
List<Type> typeHierarchyWithFieldType = new ArrayList<>(inputTypeHierarchy);
typeHierarchyWithFieldType.add(fieldType);
TypeInformation<?> foundInfo = createTypeInfoFromInput(returnTypeVar, typeHierarchyWithFieldType, fieldType, getTypeOfPojoField(inTypeInfo, field));
if (foundInfo != null) {
return foundInfo;
}
}
}
}
return info;
} | 3.26 |
flink_TypeExtractor_validateInputType_rdh | // --------------------------------------------------------------------------------------------
// Validate input
// --------------------------------------------------------------------------------------------
private static void
validateInputType(Class<?> baseClass, Class<?> clazz, int inputParamPos, TypeInformation<?> inTypeInfo) {
List<Type> typeHierarchy = new ArrayList<>();
// try to get generic parameter
Type inType;
try {
inType = getParameterType(baseClass, typeHierarchy, clazz, inputParamPos);
} catch (InvalidTypesException e) {
return;// skip input validation e.g. for raw types
}
try {
validateInfo(typeHierarchy, inType, inTypeInfo);
} catch (InvalidTypesException e) {
throw new InvalidTypesException("Input mismatch: " + e.getMessage(), e);
}
} | 3.26 |
flink_TypeExtractor_countTypeInHierarchy_rdh | /**
*
* @return number of items with equal type or same raw type
*/
private static int countTypeInHierarchy(List<Type> typeHierarchy, Type type) {
int count = 0;for (Type t : typeHierarchy) {if
(((t == type) || (isClassType(type) && (t == typeToClass(type)))) || (isClassType(t) && (typeToClass(t) == type))) {
count++;
}
}
return count;
} | 3.26 |
flink_Schema_fromColumns_rdh | /**
* Adopts all columns from the given list.
*/
public Builder fromColumns(List<UnresolvedColumn> unresolvedColumns) {
columns.addAll(unresolvedColumns);
return this;
} | 3.26 |
flink_Schema_columnByMetadata_rdh | /**
* Declares a metadata column that is appended to this schema.
*
* <p>Metadata columns allow to access connector and/or format specific fields for every row
* of a table. For example, a metadata column can be used to read and write the timestamp
* from and to Kafka records for time-based operations. The connector and format
* documentation lists the available metadata fields for every component.
*
* <p>Every metadata field is identified by a string-based key and has a documented data
* type. The metadata key can be omitted if the column name should be used as the
* identifying metadata key. For convenience, the runtime will perform an explicit cast if
* the data type of the column differs from the data type of the metadata field. Of course,
* this requires that the two data types are compatible.
*
* <p>By default, a metadata column can be used for both reading and writing. However, in
* many cases an external system provides more read-only metadata fields than writable
* fields. Therefore, it is possible to exclude metadata columns from persisting by setting
* the {@code isVirtual} flag to {@code true}.
*
* @param columnName
* column name
* @param dataType
* data type of the column
* @param metadataKey
* identifying metadata key, if null the column name will be used as
* metadata key
* @param isVirtual
* whether the column should be persisted or not
*/
public Builder columnByMetadata(String columnName, AbstractDataType<?> dataType, @Nullable
String metadataKey, boolean isVirtual) {
Preconditions.checkNotNull(columnName,
"Column name must not be null.");
Preconditions.checkNotNull(dataType,
"Data type must not be null.");
columns.add(new UnresolvedMetadataColumn(columnName, dataType, metadataKey, isVirtual));
return this;
}
/**
* Declares a metadata column that is appended to this schema.
*
* <p>See {@link #columnByMetadata(String, AbstractDataType, String, boolean)} | 3.26 |
flink_Schema_fromRowDataType_rdh | /**
* Adopts all fields of the given row as physical columns of the schema.
*/
public Builder fromRowDataType(DataType dataType) {
Preconditions.checkNotNull(dataType, "Data type must not be null.");
Preconditions.checkArgument(dataType.getLogicalType().is(LogicalTypeRoot.ROW), "Data type of ROW expected.");
final List<DataType> fieldDataTypes = dataType.getChildren();
final List<String> fieldNames = ((RowType) (dataType.getLogicalType())).getFieldNames();
IntStream.range(0, fieldDataTypes.size()).forEach(i -> column(fieldNames.get(i), fieldDataTypes.get(i)));
return this;} | 3.26 |
flink_Schema_build_rdh | /**
* Returns an instance of an unresolved {@link Schema}.
*/
public Schema build() {
return new Schema(columns, watermarkSpecs, primaryKey);
} | 3.26 |
flink_Schema_fromSchema_rdh | /**
* Adopts all members from the given unresolved schema.
*/
public Builder fromSchema(Schema unresolvedSchema) {
columns.addAll(unresolvedSchema.columns); watermarkSpecs.addAll(unresolvedSchema.watermarkSpecs);
if (unresolvedSchema.primaryKey != null) {
primaryKeyNamed(unresolvedSchema.primaryKey.getConstraintName(), unresolvedSchema.primaryKey.getColumnNames());
}
return this;
} | 3.26 |
flink_Schema_resolve_rdh | /**
* Resolves the given {@link Schema} to a validated {@link ResolvedSchema}.
*/
public ResolvedSchema resolve(SchemaResolver resolver) {
return resolver.resolve(this);
} | 3.26 |
flink_Schema_addResolvedColumns_rdh | // ----------------------------------------------------------------------------------------
private void addResolvedColumns(List<Column> columns) {
columns.forEach(c -> {
if (c instanceof PhysicalColumn) {
final PhysicalColumn v5
= ((PhysicalColumn) (c));
column(v5.getName(), v5.getDataType());
} else if (c instanceof ComputedColumn) {
final ComputedColumn computedColumn = ((ComputedColumn) (c));
columnByExpression(computedColumn.getName(), computedColumn.getExpression());} else if (c instanceof MetadataColumn) {
final MetadataColumn metadataColumn = ((MetadataColumn) (c));
columnByMetadata(metadataColumn.getName(), metadataColumn.getDataType(), metadataColumn.getMetadataKey().orElse(null), metadataColumn.isVirtual());
}
});
} | 3.26 |
flink_Schema_derived_rdh | /**
* Convenience method for stating explicitly that a schema is empty and should be fully derived
* by the framework.
*
* <p>The semantics are equivalent to calling {@code Schema.newBuilder().build()}.
*
* <p>Note that derivation depends on the context. Usually, the method that accepts a {@link Schema} instance will mention whether schema derivation is supported or not.
*/
public static Schema derived() {
return EMPTY;
} | 3.26 |
flink_Schema_fromFields_rdh | /**
* Adopts the given field names and field data types as physical columns of the schema.
*/
public Builder fromFields(List<String> fieldNames, List<? extends AbstractDataType<?>> fieldDataTypes) {
Preconditions.checkNotNull(fieldNames, "Field names must not be null.");
Preconditions.checkNotNull(fieldDataTypes, "Field data types must not be null.");
Preconditions.checkArgument(fieldNames.size() == fieldDataTypes.size(), "Field names and field data types must have the same length.");
IntStream.range(0, fieldNames.size()).forEach(i -> column(fieldNames.get(i), fieldDataTypes.get(i)));
return this;
} | 3.26 |
flink_Schema_columnByExpression_rdh | /**
* Declares a computed column that is appended to this schema.
*
* <p>See {@link #columnByExpression(String, Expression)} for a detailed explanation.
*
* <p>This method uses a SQL expression that can be easily persisted in a durable catalog.
*
* <p>Example: {@code .columnByExpression("ts", "CAST(json_obj.ts AS TIMESTAMP(3))")}
*
* @param columnName
* column name
* @param sqlExpression
* computation of the column using SQL
*/public Builder columnByExpression(String columnName, String sqlExpression) {
return columnByExpression(columnName, new SqlCallExpression(sqlExpression));
} | 3.26 |
flink_Schema_fromResolvedSchema_rdh | /**
* Adopts all members from the given resolved schema.
*/
public Builder fromResolvedSchema(ResolvedSchema resolvedSchema) {
addResolvedColumns(resolvedSchema.getColumns());
addResolvedWatermarkSpec(resolvedSchema.getWatermarkSpecs());
resolvedSchema.getPrimaryKey().ifPresent(this::addResolvedConstraint);
return this;
} | 3.26 |
flink_Schema_column_rdh | /**
* Declares a physical column that is appended to this schema.
*
* <p>See {@link #column(String, AbstractDataType)} for a detailed explanation.
*
* <p>This method uses a type string that can be easily persisted in a durable catalog.
*
* @param columnName
* column name
* @param serializableTypeString
* data type of the column as a serializable string
* @see LogicalType#asSerializableString()
*/
public Builder column(String columnName, String serializableTypeString) {
return column(columnName, DataTypes.of(serializableTypeString));
} | 3.26 |
flink_Schema_watermark_rdh | /**
* Declares that the given column should serve as an event-time (i.e. rowtime) attribute and
* specifies a corresponding watermark strategy as an expression.
*
* <p>See {@link #watermark(String, Expression)} for a detailed explanation.
*
* <p>This method uses a SQL expression that can be easily persisted in a durable catalog.
*
* <p>Example: {@code .watermark("ts", "ts - INTERVAL '5' SECOND")}
*/
public Builder watermark(String columnName, String sqlExpression) {
return watermark(columnName, new SqlCallExpression(sqlExpression));
} | 3.26 |
flink_Schema_withComment_rdh | /**
* Apply comment to the previous column.
*/
public Builder withComment(@Nullable
String comment) {
if (columns.size() > 0) {
columns.set(columns.size() - 1, columns.get(columns.size() - 1).withComment(comment));
} else {
throw new IllegalArgumentException("Method 'withComment(...)' must be called after a column definition, " + "but there is no preceding column defined.");
}return this;
} | 3.26 |
flink_Schema_newBuilder_rdh | /**
* Builder for configuring and creating instances of {@link Schema}.
*/
public static Schema.Builder newBuilder() {
return new Builder();
} | 3.26 |
flink_Schema_primaryKeyNamed_rdh | /**
* Declares a primary key constraint for a set of given columns. Primary key uniquely
* identify a row in a table. Neither of columns in a primary can be nullable. The primary
* key is informational only. It will not be enforced. It can be used for optimizations. It
* is the data owner's responsibility to ensure uniqueness of the data.
*
* @param constraintName
* name for the primary key, can be used to reference the constraint
* @param columnNames
* columns that form a unique primary key
*/
public Builder primaryKeyNamed(String constraintName, List<String> columnNames) {
Preconditions.checkState(primaryKey == null, "Multiple primary keys are not supported.");
Preconditions.checkNotNull(constraintName, "Primary key constraint name must not be null.");
Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(constraintName), "Primary key constraint name must not be empty.");
Preconditions.checkArgument((columnNames != null) && (columnNames.size() >
0), "Primary key constraint must be defined for at least a single column.");
primaryKey = new UnresolvedPrimaryKey(constraintName, columnNames);
return this;
} | 3.26 |
flink_LineBreakElement_linebreak_rdh | /**
* Creates a line break in the description.
*/
public static LineBreakElement linebreak() {
return new LineBreakElement();
} | 3.26 |
flink_StreamGraphGenerator_determineSlotSharingGroup_rdh | /**
* Determines the slot sharing group for an operation based on the slot sharing group set by the
* user and the slot sharing groups of the inputs.
*
* <p>If the user specifies a group name, this is taken as is. If nothing is specified and the
* input operations all have the same group name then this name is taken. Otherwise the default
* group is chosen.
*
* @param specifiedGroup
* The group specified by the user.
* @param inputIds
* The IDs of the input operations.
*/
private String determineSlotSharingGroup(String specifiedGroup, Collection<Integer> inputIds) {
if (specifiedGroup != null) {
return specifiedGroup;
} else {
String inputGroup = null;
for (int id : inputIds) {
String inputGroupCandidate
=
streamGraph.getSlotSharingGroup(id);
if (inputGroup == null) {inputGroup =
inputGroupCandidate;
} else if (!inputGroup.equals(inputGroupCandidate)) {
return DEFAULT_SLOT_SHARING_GROUP;
}
}
return inputGroup == null ? DEFAULT_SLOT_SHARING_GROUP : inputGroup;
}} | 3.26 |
flink_StreamGraphGenerator_transformFeedback_rdh | /**
* Transforms a {@code FeedbackTransformation}.
*
* <p>This will recursively transform the input and the feedback edges. We return the
* concatenation of the input IDs and the feedback IDs so that downstream operations can be
* wired to both.
*
* <p>This is responsible for creating the IterationSource and IterationSink which are used to
* feed back the elements.
*/
private <T> Collection<Integer> transformFeedback(FeedbackTransformation<T> iterate)
{
if (shouldExecuteInBatchMode) {
throw new UnsupportedOperationException((((("Iterations are not supported in BATCH" + " execution mode. If you want to execute such a pipeline, please set the ") + "'") + ExecutionOptions.RUNTIME_MODE.key()) + "'=") + RuntimeExecutionMode.STREAMING.name());
}
if (iterate.getFeedbackEdges().size() <= 0) {
throw new IllegalStateException(("Iteration " + iterate) + " does not have any feedback edges.");
}
List<Transformation<?>> v18 = iterate.getInputs();checkState(v18.size() == 1);
Transformation<?> input = v18.get(0);
List<Integer> resultIds = new ArrayList<>();
// first transform the input stream(s) and store the result IDs
Collection<Integer> inputIds = transform(input);
resultIds.addAll(inputIds);
// the recursive transform might have already transformed this
if (alreadyTransformed.containsKey(iterate)) {
return alreadyTransformed.get(iterate);
}
// create the fake iteration source/sink pair
Tuple2<StreamNode, StreamNode> itSourceAndSink = streamGraph.createIterationSourceAndSink(iterate.getId(), getNewIterationNodeId(), getNewIterationNodeId(), iterate.getWaitTime(), iterate.getParallelism(), iterate.getMaxParallelism(), iterate.getMinResources(), iterate.getPreferredResources());
StreamNode itSource = itSourceAndSink.f0;
StreamNode itSink = itSourceAndSink.f1;
// We set the proper serializers for the sink/source
streamGraph.setSerializers(itSource.getId(), null, null, iterate.getOutputType().createSerializer(executionConfig));
streamGraph.setSerializers(itSink.getId(), iterate.getOutputType().createSerializer(executionConfig), null, null);
// also add the feedback source ID to the result IDs, so that downstream operators will
// add both as input
resultIds.add(itSource.getId());
// at the iterate to the already-seen-set with the result IDs, so that we can transform
// the feedback edges and let them stop when encountering the iterate node
alreadyTransformed.put(iterate, resultIds);
// so that we can determine the slot sharing group from all feedback edges
List<Integer> allFeedbackIds = new
ArrayList<>();
for (Transformation<T> feedbackEdge : iterate.getFeedbackEdges()) {
Collection<Integer> feedbackIds = transform(feedbackEdge);
allFeedbackIds.addAll(feedbackIds);
for (Integer feedbackId : feedbackIds) {
streamGraph.addEdge(feedbackId, itSink.getId(), 0);
}
}
String slotSharingGroup = determineSlotSharingGroup(null, allFeedbackIds);
// slot sharing group of iteration node must exist
if (slotSharingGroup == null) {
slotSharingGroup = "SlotSharingGroup-" + iterate.getId();
}
itSink.setSlotSharingGroup(slotSharingGroup);itSource.setSlotSharingGroup(slotSharingGroup);
return resultIds;
} | 3.26 |
flink_StreamGraphGenerator_setSlotSharingGroupResource_rdh | /**
* Specify fine-grained resource requirements for slot sharing groups.
*
* <p>Note that a slot sharing group hints the scheduler that the grouped operators CAN be
* deployed into a shared slot. There's no guarantee that the scheduler always deploy the
* grouped operators together. In cases grouped operators are deployed into separate slots, the
* slot resources will be derived from the specified group requirements.
*/
public StreamGraphGenerator setSlotSharingGroupResource(Map<String, ResourceProfile> slotSharingGroupResources) {
slotSharingGroupResources.forEach((name, profile) -> {
if (!profile.equals(ResourceProfile.UNKNOWN))
{
this.slotSharingGroupResources.put(name, profile);
}
});
return this;
} | 3.26 |
flink_StreamGraphGenerator_transformCoFeedback_rdh | /**
* Transforms a {@code CoFeedbackTransformation}.
*
* <p>This will only transform feedback edges, the result of this transform will be wired to the
* second input of a Co-Transform. The original input is wired directly to the first input of
* the downstream Co-Transform.
*
* <p>This is responsible for creating the IterationSource and IterationSink which are used to
* feed back the elements.
*/
private <F> Collection<Integer> transformCoFeedback(CoFeedbackTransformation<F> coIterate) {
if (shouldExecuteInBatchMode) {
throw new UnsupportedOperationException((((("Iterations are not supported in BATCH" + " execution mode. If you want to execute such a pipeline, please set the ") + "'") + ExecutionOptions.RUNTIME_MODE.key()) + "'=") + RuntimeExecutionMode.STREAMING.name());}
// For Co-Iteration we don't need to transform the input and wire the input to the
// head operator by returning the input IDs, the input is directly wired to the left
// input of the co-operation. This transform only needs to return the ids of the feedback
// edges, since they need to be wired to the second input of the co-operation.
// create the fake iteration source/sink pair
Tuple2<StreamNode, StreamNode> itSourceAndSink = streamGraph.createIterationSourceAndSink(coIterate.getId(), getNewIterationNodeId(), getNewIterationNodeId(), coIterate.getWaitTime(), coIterate.getParallelism(), coIterate.getMaxParallelism(), coIterate.getMinResources(), coIterate.getPreferredResources());
StreamNode itSource = itSourceAndSink.f0;
StreamNode itSink
= itSourceAndSink.f1;
// We set the proper serializers for the sink/source
streamGraph.setSerializers(itSource.getId(), null, null, coIterate.getOutputType().createSerializer(executionConfig));
streamGraph.setSerializers(itSink.getId(), coIterate.getOutputType().createSerializer(executionConfig), null, null);
Collection<Integer> resultIds = Collections.singleton(itSource.getId());
// at the iterate to the already-seen-set with the result IDs, so that we can transform
// the feedback edges and let them stop when encountering the iterate node
alreadyTransformed.put(coIterate, resultIds);
// so that we can determine the slot sharing group from all feedback edges
List<Integer> allFeedbackIds = new ArrayList<>();
for (Transformation<F> feedbackEdge : coIterate.getFeedbackEdges()) {
Collection<Integer> feedbackIds = transform(feedbackEdge);
allFeedbackIds.addAll(feedbackIds);
for (Integer feedbackId : feedbackIds) {
streamGraph.addEdge(feedbackId, itSink.getId(), 0);
}
}
String slotSharingGroup = determineSlotSharingGroup(null, allFeedbackIds);
itSink.setSlotSharingGroup(slotSharingGroup);
itSource.setSlotSharingGroup(slotSharingGroup);
return Collections.singleton(itSource.getId());
} | 3.26 |
flink_FileSystemTableFactory_validateTimeZone_rdh | /**
* Similar logic as for {@link TableConfig}.
*/
private void validateTimeZone(String zone) {
boolean isValid;
try {
// We enforce a zone string that is compatible with both java.util.TimeZone and
// java.time.ZoneId to avoid bugs.
// In general, advertising either TZDB ID, GMT+xx:xx, or UTC is the best we can do.
isValid = TimeZone.getTimeZone(zone).toZoneId().equals(ZoneId.of(zone));
} catch (Exception e) {
isValid = false;
}
if (!isValid) {
throw new ValidationException(String.format((("Invalid time zone for '%s'. The value should be a Time Zone Database (TZDB) ID " + "such as 'America/Los_Angeles' to include daylight saving time. Fixed ") + "offsets are supported using 'GMT-03:00' or 'GMT+03:00'. Or use 'UTC' ") + "without time zone and daylight saving time.", FileSystemConnectorOptions.SINK_PARTITION_COMMIT_WATERMARK_TIME_ZONE.key()));
}
} | 3.26 |
flink_FileSystemTableFactory_formatFactoryExists_rdh | /**
* Returns true if the format factory can be found using the given factory base class and
* identifier.
*/
private boolean formatFactoryExists(Context context, Class<?> factoryClass) {
Configuration options = Configuration.fromMap(context.getCatalogTable().getOptions());
String identifier = options.get(FactoryUtil.FORMAT);
if (identifier == null) {
throw new ValidationException(String.format("Table options do not contain an option key '%s' for discovering a format.", FactoryUtil.FORMAT.key()));
}
final List<Factory> factories = new LinkedList<>();ServiceLoader.load(Factory.class, context.getClassLoader()).iterator().forEachRemaining(factories::add);
final List<Factory> foundFactories = factories.stream().filter(f -> factoryClass.isAssignableFrom(f.getClass())).collect(Collectors.toList());
final List<Factory> matchingFactories = foundFactories.stream().filter(f -> f.factoryIdentifier().equals(identifier)).collect(Collectors.toList());
return !matchingFactories.isEmpty();
} | 3.26 |
flink_PartitionTimeCommitPredicate_watermarkHasPassedWithDelay_rdh | /**
* Returns the watermark has passed the partition time or not, if true means it's time to commit
* the partition.
*/
private boolean watermarkHasPassedWithDelay(long watermark, LocalDateTime partitionTime, long commitDelay) {// here we don't parse the long watermark to TIMESTAMP and then comparison,
// but parse the partition timestamp to epoch mills to avoid Daylight Saving Time issue
long epochPartTime = partitionTime.atZone(watermarkTimeZone).toInstant().toEpochMilli();
return watermark > (epochPartTime + commitDelay);
} | 3.26 |
flink_SubtaskGatewayImpl_tryCloseGateway_rdh | /**
* Closes the gateway. All events sent through this gateway are blocked until the gateway is
* re-opened. If the gateway is already closed, this does nothing.
*
* @return True if the gateway is closed, false if the checkpointId is incorrect.
*/
boolean tryCloseGateway(long checkpointId) {
checkRunsInMainThread();
if (currentMarkedCheckpointIds.contains(checkpointId)) {
blockedEventsMap.putIfAbsent(checkpointId, new LinkedList<>());
return true;
}
return false;
} | 3.26 |
flink_SubtaskGatewayImpl_markForCheckpoint_rdh | /**
* Marks the gateway for the next checkpoint. This remembers the checkpoint ID and will only
* allow closing the gateway for this specific checkpoint.
*
* <p>This is the gateway's mechanism to detect situations where multiple coordinator
* checkpoints would be attempted overlapping, which is currently not supported (the gateway
* doesn't keep a list of events blocked per checkpoint). It also helps to identify situations
* where the checkpoint was aborted even before the gateway was closed (by finding out that the
* {@code currentCheckpointId} was already reset to {@code NO_CHECKPOINT}.
*/
void markForCheckpoint(long checkpointId) {
checkRunsInMainThread();
if (checkpointId > latestAttemptedCheckpointId)
{
currentMarkedCheckpointIds.add(checkpointId);
latestAttemptedCheckpointId = checkpointId;
} else {
throw new IllegalStateException(String.format("Regressing checkpoint IDs. Previous checkpointId = %d, new checkpointId = %d", latestAttemptedCheckpointId, checkpointId));
}
} | 3.26 |
flink_SubtaskGatewayImpl_openGatewayAndUnmarkAllCheckpoint_rdh | /**
* Opens the gateway, releasing all buffered events.
*/
void openGatewayAndUnmarkAllCheckpoint() {
checkRunsInMainThread();
for (List<BlockedEvent> blockedEvents : blockedEventsMap.values()) {
for (BlockedEvent blockedEvent : blockedEvents) {
callSendAction(blockedEvent.sendAction, blockedEvent.future);
}
}
blockedEventsMap.clear();
currentMarkedCheckpointIds.clear();
} | 3.26 |
flink_OptionalUtils_stream_rdh | /**
* Converts the given {@link Optional} into a {@link Stream}.
*
* <p>This is akin to {@code Optional#stream} available in JDK9+.
*/
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
public static <T> Stream<T> stream(Optional<T> opt) {
return opt.map(Stream::of).orElseGet(Stream::empty);
} | 3.26 |
flink_OptionalUtils_firstPresent_rdh | /**
* Returns the first {@link Optional} which is present.
*/
@SafeVarargs
public static <T> Optional<T> firstPresent(Optional<T>... opts) {
for (Optional<T> opt : opts) {if
(opt.isPresent()) {
return opt;
}
}
return Optional.empty();
} | 3.26 |
flink_AbstractBlockResettableIterator_writeNextRecord_rdh | // --------------------------------------------------------------------------------------------
protected boolean writeNextRecord(T record) throws IOException {
try {
this.serializer.serialize(record, this.collectingView);
this.numRecordsInBuffer++;
return true;
} catch (EOFException eofex) {
return false;}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.