name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ExecNodeContext_withId_rdh
|
/**
* Set the unique ID of the node, so that the {@link ExecNodeContext}, together with the type
* related {@link #name} and {@link #version}, stores all the necessary info to uniquely
* reconstruct the {@link ExecNode}, and avoid storing the {@link #id} independently as a field
* in {@link ExecNodeBase}.
*/
public ExecNodeContext withId(int id) {
return new ExecNodeContext(id, this.name, this.version);
}
| 3.26 |
flink_ExecNodeContext_getId_rdh
|
/**
* The unique identifier for each ExecNode in the JSON plan.
*/
int getId() {
return checkNotNull(id); }
| 3.26 |
flink_ExecNodeContext_getName_rdh
|
/**
* The type identifying an ExecNode in the JSON plan. See {@link ExecNodeMetadata#name()}.
*/
public String getName() {
return name;
}
/**
* The version of the ExecNode in the JSON plan. See {@link ExecNodeMetadata#version()}
| 3.26 |
flink_ExecNodeContext_resetIdCounter_rdh
|
/**
* Reset the id counter to 0.
*/
@VisibleForTesting
public static void resetIdCounter() {
idCounter.set(0);}
| 3.26 |
flink_LocalTimeComparator_compareSerializedLocalTime_rdh
|
// --------------------------------------------------------------------------------------------
// Static Helpers for Date Comparison
// --------------------------------------------------------------------------------------------
public static int compareSerializedLocalTime(DataInputView firstSource, DataInputView secondSource, boolean ascendingComparison) throws IOException {
int cmp
= firstSource.readByte() - secondSource.readByte();
if (cmp == 0) {
cmp = firstSource.readByte() - secondSource.readByte();
if (cmp == 0) {
cmp = firstSource.readByte() - secondSource.readByte();
if (cmp == 0) {
cmp = firstSource.readInt() -
secondSource.readInt();
}
}
}
return ascendingComparison ? cmp : -cmp;
}
| 3.26 |
flink_FlinkSemiAntiJoinProjectTransposeRule_onMatch_rdh
|
// ~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
LogicalJoin join =
call.rel(0);
LogicalProject
project = call.rel(1);
// convert the semi/anti join condition to reflect the LHS with the project
// pulled up
RexNode newCondition = adjustCondition(project, join);
Join newJoin = LogicalJoin.create(project.getInput(), join.getRight(), join.getHints(), newCondition, join.getVariablesSet(), join.getJoinType());
// Create the new projection. Note that the projection expressions
// are the same as the original because they only reference the LHS
// of the semi/anti join and the semi/anti join only projects out the LHS
final RelBuilder relBuilder = call.builder();
relBuilder.push(newJoin);
relBuilder.project(project.getProjects(), project.getRowType().getFieldNames());call.transformTo(relBuilder.build());
}
| 3.26 |
flink_FlinkSemiAntiJoinProjectTransposeRule_adjustCondition_rdh
|
/**
* Pulls the project above the semi/anti join and returns the resulting semi/anti join
* condition. As a result, the semi/anti join condition should be modified such that references
* to the LHS of a semi/anti join should now reference the children of the project that's on the
* LHS.
*
* @param project
* LogicalProject on the LHS of the semi/anti join
* @param join
* the semi/anti join
* @return the modified semi/anti join condition
*/ private RexNode adjustCondition(LogicalProject project, Join join) {
// create two RexPrograms -- the bottom one representing a
// concatenation of the project and the RHS of the semi/anti join and the
// top one representing the semi/anti join condition
RexBuilder rexBuilder = project.getCluster().getRexBuilder();
RelDataTypeFactory typeFactory = rexBuilder.getTypeFactory();
RelNode rightChild = join.getRight();
// for the bottom RexProgram, the input is a concatenation of the
// child of the project and the RHS of the semi/anti join
RelDataType bottomInputRowType =
SqlValidatorUtil.deriveJoinRowType(project.getInput().getRowType(), rightChild.getRowType(), JoinRelType.INNER, typeFactory, null, join.getSystemFieldList());
RexProgramBuilder bottomProgramBuilder
= new RexProgramBuilder(bottomInputRowType, rexBuilder);
// add the project expressions, then add input references for the RHS
// of the semi/anti join
for (Pair<RexNode, String> pair : project.getNamedProjects()) {
bottomProgramBuilder.addProject(pair.left, pair.right);
}
int nLeftFields = project.getInput().getRowType().getFieldCount();
List<RelDataTypeField> rightFields = rightChild.getRowType().getFieldList();
int nRightFields = rightFields.size();
for (int i = 0; i < nRightFields; i++) {
final RelDataTypeField field = rightFields.get(i);
RexNode
inputRef = rexBuilder.makeInputRef(field.getType(), i + nLeftFields);
bottomProgramBuilder.addProject(inputRef, field.getName());
}
RexProgram bottomProgram = bottomProgramBuilder.getProgram();
// input rowtype into the top program is the concatenation of the
// project and the RHS of the semi/anti join
RelDataType topInputRowType = SqlValidatorUtil.deriveJoinRowType(project.getRowType(), rightChild.getRowType(), JoinRelType.INNER, typeFactory, null, join.getSystemFieldList());
RexProgramBuilder topProgramBuilder = new RexProgramBuilder(topInputRowType, rexBuilder);
topProgramBuilder.addIdentity();
topProgramBuilder.addCondition(join.getCondition());
RexProgram topProgram = topProgramBuilder.getProgram();
// merge the programs and expand out the local references to form
// the new semi/anti join condition; it now references a concatenation of
// the project's child and the RHS of the semi/anti join
RexProgram mergedProgram = RexProgramBuilder.mergePrograms(topProgram, bottomProgram, rexBuilder);
return mergedProgram.expandLocalRef(mergedProgram.getCondition());
}
| 3.26 |
flink_LongSumAggregator_aggregate_rdh
|
/**
* Adds the given value to the current aggregate.
*
* @param value
* The value to add to the aggregate.
*/
public void aggregate(long value) {
sum += value;
}
| 3.26 |
flink_GenericInMemoryCatalog_dropTable_rdh
|
// ------ tables and views ------
@Override
public void dropTable(ObjectPath tablePath, boolean ignoreIfNotExists) throws
TableNotExistException {
checkNotNull(tablePath);
if (tableExists(tablePath)) {tables.remove(tablePath);
tableStats.remove(tablePath);
tableColumnStats.remove(tablePath);
partitions.remove(tablePath);
partitionStats.remove(tablePath);
partitionColumnStats.remove(tablePath);
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
| 3.26 |
flink_GenericInMemoryCatalog_createDatabase_rdh
|
// ------ databases ------
@Override
public void createDatabase(String databaseName, CatalogDatabase db, boolean ignoreIfExists) throws DatabaseAlreadyExistException {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
checkNotNull(db);
if (databaseExists(databaseName)) {
if (!ignoreIfExists) {
throw new DatabaseAlreadyExistException(getName(), databaseName);
}
} else {
databases.put(databaseName, db.copy());
}
}
| 3.26 |
flink_GenericInMemoryCatalog_isPartitionedTable_rdh
|
/**
* Check if the given table is a partitioned table. Note that "false" is returned if the table
* doesn't exists.
*/
private boolean isPartitionedTable(ObjectPath tablePath) {
CatalogBaseTable table = null;
try {
table = getTable(tablePath);
} catch (TableNotExistException e) {
return false;
}
return (table instanceof CatalogTable) && ((CatalogTable) (table)).isPartitioned();
}
| 3.26 |
flink_GenericInMemoryCatalog_createFunction_rdh
|
// ------ functions ------
@Override
public void
createFunction(ObjectPath path, CatalogFunction function, boolean ignoreIfExists) throws FunctionAlreadyExistException, DatabaseNotExistException {
checkNotNull(path);
checkNotNull(function);
ObjectPath functionPath = normalize(path);
if (!databaseExists(functionPath.getDatabaseName()))
{
throw
new DatabaseNotExistException(getName(), functionPath.getDatabaseName());
}if (functionExists(functionPath)) { if (!ignoreIfExists) {
throw new FunctionAlreadyExistException(getName(), functionPath);
}
} else
{
functions.put(functionPath, function.copy());
}
}
| 3.26 |
flink_GenericInMemoryCatalog_isFullPartitionSpec_rdh
|
/**
* Check if the given partitionSpec is full partition spec for the given table.
*/
private boolean isFullPartitionSpec(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws TableNotExistException {
CatalogBaseTable baseTable = getTable(tablePath);
if (!(baseTable instanceof CatalogTable)) {
return false;
}
CatalogTable v16 = ((CatalogTable) (baseTable));
List<String> partitionKeys = v16.getPartitionKeys();
Map<String, String> spec = partitionSpec.getPartitionSpec();
// The size of partition spec should not exceed the size of partition keys
return (partitionKeys.size() == spec.size()) && spec.keySet().containsAll(partitionKeys);
}
| 3.26 |
flink_GenericInMemoryCatalog_getTableStatistics_rdh
|
// ------ statistics ------
@Override
public CatalogTableStatistics getTableStatistics(ObjectPath tablePath) throws TableNotExistException {
checkNotNull(tablePath);
if (!tableExists(tablePath)) {
throw new TableNotExistException(getName(), tablePath);
}
if (!isPartitionedTable(tablePath))
{
CatalogTableStatistics result = tableStats.get(tablePath);
return result != null ? result.copy() : CatalogTableStatistics.UNKNOWN;
} else {
return CatalogTableStatistics.UNKNOWN;
}
}
| 3.26 |
flink_GenericInMemoryCatalog_createTable_rdh
|
// ------ tables ------
@Override
public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException {
checkNotNull(tablePath);
checkNotNull(table);
if (!databaseExists(tablePath.getDatabaseName()))
{
throw new DatabaseNotExistException(getName(), tablePath.getDatabaseName());
}
if (tableExists(tablePath)) {
if (!ignoreIfExists) {
throw new TableAlreadyExistException(getName(), tablePath);
}
} else {
tables.put(tablePath, table.copy());if (isPartitionedTable(tablePath)) {
partitions.put(tablePath, new LinkedHashMap<>());
partitionStats.put(tablePath, new LinkedHashMap<>());
partitionColumnStats.put(tablePath, new LinkedHashMap<>());
}
}
}
| 3.26 |
flink_GenericInMemoryCatalog_createPartition_rdh
|
// ------ partitions ------
@Override
public void createPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition partition, boolean ignoreIfExists) throws TableNotExistException, TableNotPartitionedException, PartitionSpecInvalidException, PartitionAlreadyExistsException, CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
checkNotNull(partition);ensureTableExists(tablePath);
ensurePartitionedTable(tablePath);ensureFullPartitionSpec(tablePath, partitionSpec);
if (partitionExists(tablePath, partitionSpec)) {
if (!ignoreIfExists) {
throw new PartitionAlreadyExistsException(getName(), tablePath, partitionSpec);
} }
partitions.get(tablePath).put(partitionSpec, partition.copy());
}
| 3.26 |
flink_ReduceCombineDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<ReduceFunction<T>, T> context) {
taskContext = context;
running = true;
}
| 3.26 |
flink_HiveTableSource_toHiveTablePartition_rdh
|
/**
* Convert partition to HiveTablePartition.
*/
public HiveTablePartition toHiveTablePartition(Partition partition)
{
return HivePartitionUtils.toHiveTablePartition(partitionKeys, tableProps, partition);
}
| 3.26 |
flink_CheckpointStatistics_generateCheckpointStatistics_rdh
|
// -------------------------------------------------------------------------
// Static factory methods
// -------------------------------------------------------------------------
public static CheckpointStatistics generateCheckpointStatistics(AbstractCheckpointStats checkpointStats, boolean includeTaskCheckpointStatistics) {
Preconditions.checkNotNull(checkpointStats);
Map<JobVertexID, TaskCheckpointStatistics> checkpointStatisticsPerTask;
if (includeTaskCheckpointStatistics) {
Collection<TaskStateStats> taskStateStats = checkpointStats.getAllTaskStateStats();
checkpointStatisticsPerTask = CollectionUtil.newHashMapWithExpectedSize(taskStateStats.size());
for (TaskStateStats taskStateStat : taskStateStats) {checkpointStatisticsPerTask.put(taskStateStat.getJobVertexId(), new TaskCheckpointStatistics(checkpointStats.getCheckpointId(), checkpointStats.getStatus(), taskStateStat.getLatestAckTimestamp(), taskStateStat.getCheckpointedSize(), taskStateStat.getStateSize(), taskStateStat.getEndToEndDuration(checkpointStats.getTriggerTimestamp()), 0, taskStateStat.getProcessedDataStats(), taskStateStat.getPersistedDataStats(), taskStateStat.getNumberOfSubtasks(), taskStateStat.getNumberOfAcknowledgedSubtasks()));
}
} else {
checkpointStatisticsPerTask = Collections.emptyMap();
}
String savepointFormat = null;
SnapshotType snapshotType = checkpointStats.getProperties().getCheckpointType();
if (snapshotType instanceof SavepointType) {
savepointFormat = ((SavepointType) (snapshotType)).getFormatType().name();
}
if (checkpointStats instanceof CompletedCheckpointStats) {
final CompletedCheckpointStats completedCheckpointStats = ((CompletedCheckpointStats) (checkpointStats));
return new CheckpointStatistics.CompletedCheckpointStatistics(completedCheckpointStats.getCheckpointId(), completedCheckpointStats.getStatus(), snapshotType.isSavepoint(), savepointFormat, completedCheckpointStats.getTriggerTimestamp(), completedCheckpointStats.getLatestAckTimestamp(), completedCheckpointStats.getCheckpointedSize(), completedCheckpointStats.getStateSize(), completedCheckpointStats.getEndToEndDuration(), 0, completedCheckpointStats.getProcessedData(), completedCheckpointStats.getPersistedData(), completedCheckpointStats.getNumberOfSubtasks(), completedCheckpointStats.getNumberOfAcknowledgedSubtasks(), RestAPICheckpointType.valueOf(completedCheckpointStats.getProperties().getCheckpointType(), completedCheckpointStats.isUnalignedCheckpoint()), checkpointStatisticsPerTask, completedCheckpointStats.getExternalPath(), completedCheckpointStats.isDiscarded());
} else if (checkpointStats instanceof FailedCheckpointStats) {
final FailedCheckpointStats failedCheckpointStats = ((FailedCheckpointStats) (checkpointStats));
return new CheckpointStatistics.FailedCheckpointStatistics(failedCheckpointStats.getCheckpointId(), failedCheckpointStats.getStatus(), failedCheckpointStats.getProperties().isSavepoint(), savepointFormat, failedCheckpointStats.getTriggerTimestamp(), failedCheckpointStats.getLatestAckTimestamp(), failedCheckpointStats.getCheckpointedSize(), failedCheckpointStats.getStateSize(), failedCheckpointStats.getEndToEndDuration(), 0, failedCheckpointStats.getProcessedData(), failedCheckpointStats.getPersistedData(), failedCheckpointStats.getNumberOfSubtasks(), failedCheckpointStats.getNumberOfAcknowledgedSubtasks(), RestAPICheckpointType.valueOf(failedCheckpointStats.getProperties().getCheckpointType(), failedCheckpointStats.isUnalignedCheckpoint()), checkpointStatisticsPerTask, failedCheckpointStats.getFailureTimestamp(), failedCheckpointStats.getFailureMessage());
} else if (checkpointStats instanceof PendingCheckpointStats) {
final PendingCheckpointStats pendingCheckpointStats = ((PendingCheckpointStats) (checkpointStats));return new CheckpointStatistics.PendingCheckpointStatistics(pendingCheckpointStats.getCheckpointId(), pendingCheckpointStats.getStatus(), pendingCheckpointStats.getProperties().isSavepoint(), savepointFormat, pendingCheckpointStats.getTriggerTimestamp(), pendingCheckpointStats.getLatestAckTimestamp(), pendingCheckpointStats.getCheckpointedSize(), pendingCheckpointStats.getStateSize(), pendingCheckpointStats.getEndToEndDuration(), 0, pendingCheckpointStats.getProcessedData(), pendingCheckpointStats.getPersistedData(), pendingCheckpointStats.getNumberOfSubtasks(),
pendingCheckpointStats.getNumberOfAcknowledgedSubtasks(), RestAPICheckpointType.valueOf(pendingCheckpointStats.getProperties().getCheckpointType(), pendingCheckpointStats.isUnalignedCheckpoint()), checkpointStatisticsPerTask);
} else {
throw new IllegalArgumentException(("Given checkpoint stats object of type " + checkpointStats.getClass().getName()) + " cannot be converted.");
}
}
| 3.26 |
flink_OrcShim_defaultShim_rdh
|
/**
* Default with orc dependent, we should use v2.3.0.
*/
static OrcShim<VectorizedRowBatch> defaultShim() {
return new OrcShimV230();
}
| 3.26 |
flink_OrcShim_createShim_rdh
|
/**
* Create shim from hive version.
*/
static OrcShim<VectorizedRowBatch> createShim(String hiveVersion) {
if
(hiveVersion.startsWith("2.0")) {
return new OrcShimV200();
} else if (hiveVersion.startsWith("2.1")) {
return new OrcShimV210();
} else if ((hiveVersion.startsWith("2.2") || hiveVersion.startsWith("2.3")) || hiveVersion.startsWith("3.")) {
return new OrcShimV230();
} else {
throw new
UnsupportedOperationException("Unsupported hive version for orc shim: " + hiveVersion);
}
}
| 3.26 |
flink_BeamStateRequestHandler_of_rdh
|
/**
* Create a {@link BeamStateRequestHandler}.
*
* @param keyedStateBackend
* if null, {@link BeamStateRequestHandler} would throw an error when
* receive keyed-state requests.
* @param operatorStateBackend
* if null, {@link BeamStateRequestHandler} would throw an error
* when receive operator-state requests.
* @param keySerializer
* key serializer for {@link KeyedStateBackend}, must not be null if {@code keyedStatedBackend} is not null.
* @param namespaceSerializer
* namespace serializer for {@link KeyedStateBackend}, could be null
* when there's no window logic involved.
* @param config
* state-related configurations
* @return A new {@link BeamBagStateHandler}
*/
public static BeamStateRequestHandler of(@Nullable
KeyedStateBackend<?> keyedStateBackend, @Nullable
OperatorStateBackend operatorStateBackend, @Nullable
TypeSerializer<?> keySerializer, @Nullable
TypeSerializer<?> namespaceSerializer, ReadableConfig config) {
BeamStateStore keyedStateStore = BeamStateStore.unsupported();
if (keyedStateBackend != null) {
assert keySerializer != null;
keyedStateStore = new BeamKeyedStateStore(keyedStateBackend, keySerializer, namespaceSerializer);
}
BeamStateStore operatorStateStore = BeamStateStore.unsupported();
if (operatorStateBackend != null) {
operatorStateStore = new BeamOperatorStateStore(operatorStateBackend);
}
BeamStateHandler<ListState<byte[]>> bagStateBeamStateHandler
= new BeamBagStateHandler(namespaceSerializer);
BeamStateHandler<MapState<ByteArrayWrapper, byte[]>> mapStateBeamStateHandler = new BeamMapStateHandler(config);
return new BeamStateRequestHandler(keyedStateStore, operatorStateStore, bagStateBeamStateHandler, mapStateBeamStateHandler);
}
| 3.26 |
flink_JobIdsWithStatusOverview_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return jobsWithStatus.hashCode();
}
| 3.26 |
flink_JobIdsWithStatusOverview_combine_rdh
|
// ------------------------------------------------------------------------
private static Collection<JobIdWithStatus> combine(Collection<JobIdWithStatus> first, Collection<JobIdWithStatus> second) {
checkNotNull(first);
checkNotNull(second);
ArrayList<JobIdWithStatus> result = new ArrayList<>(first.size() + second.size());
result.addAll(first);
result.addAll(second);
return result;
}
| 3.26 |
flink_HeaderlessChannelWriterOutputView_close_rdh
|
/**
* Closes this OutputView, closing the underlying writer. And return number bytes in last memory
* segment.
*/
@Override
public int close() throws IOException {
if (!writer.isClosed()) {
int currentPositionInSegment = getCurrentPositionInSegment();
// write last segment
writer.writeBlock(getCurrentSegment());
clear();
writer.getReturnQueue().clear();
this.writer.close();
return currentPositionInSegment;
}
return -1;
}
| 3.26 |
flink_ResultRetryStrategy_fixedDelayRetry_rdh
|
/**
* Create a fixed-delay retry strategy by given params.
*/
public static ResultRetryStrategy fixedDelayRetry(int maxAttempts, long backoffTimeMillis,
Predicate<Collection<RowData>> resultPredicate) {
return new ResultRetryStrategy(new AsyncRetryStrategies.FixedDelayRetryStrategyBuilder(maxAttempts, backoffTimeMillis).ifResult(resultPredicate).build());
}
| 3.26 |
flink_SSLUtils_createRestSSLContext_rdh
|
/**
* Creates an SSL context for clients against the external REST endpoint.
*/
@Nullable
@VisibleForTesting
public static SSLContext createRestSSLContext(Configuration config, boolean clientMode) throws Exception {
ClientAuth clientAuth = (SecurityOptions.isRestSSLAuthenticationEnabled(config)) ? ClientAuth.REQUIRE : ClientAuth.NONE;JdkSslContext nettySSLContext = ((JdkSslContext) (createRestNettySSLContext(config, clientMode, clientAuth, JDK)));
if (nettySSLContext != null) {
return nettySSLContext.context();
} else {
return null;
}
}
| 3.26 |
flink_SSLUtils_getAndCheckOption_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private static String getAndCheckOption(Configuration config, ConfigOption<String> primaryOption, ConfigOption<String> fallbackOption)
{
String value = config.getString(primaryOption, config.getString(fallbackOption));
if (value != null) {
return value;
} else {
throw new IllegalConfigurationException(((("The config option " + primaryOption.key()) + " or ") +
fallbackOption.key()) + " is missing.");
}
}
| 3.26 |
flink_SSLUtils_createSSLClientSocketFactory_rdh
|
/**
* Creates a factory for SSL Client Sockets from the given configuration. SSL Client Sockets are
* always part of internal communication.
*/
public static SocketFactory createSSLClientSocketFactory(Configuration config) throws Exception {
SSLContext sslContext = createInternalSSLContext(config, true);
if (sslContext == null) {throw new IllegalConfigurationException("SSL is not enabled");
}
return sslContext.getSocketFactory();
}
| 3.26 |
flink_SSLUtils_m0_rdh
|
/**
* Creates a {@link SSLHandlerFactory} to be used by the REST Servers.
*
* @param config
* The application configuration.
*/
public static SSLHandlerFactory m0(final Configuration config) throws Exception {
ClientAuth clientAuth = (SecurityOptions.isRestSSLAuthenticationEnabled(config)) ? ClientAuth.REQUIRE : ClientAuth.NONE;
SslContext sslContext
= createRestNettySSLContext(config, false, clientAuth);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for REST endpoints.");
}
return new SSLHandlerFactory(sslContext, -1, -1);
}
| 3.26 |
flink_SSLUtils_createInternalClientSSLEngineFactory_rdh
|
/**
* Creates a SSLEngineFactory to be used by internal communication client endpoints.
*/
public static SSLHandlerFactory createInternalClientSSLEngineFactory(final Configuration config) throws Exception {
SslContext sslContext = m1(config, true);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for internal communication.");
}
return new SSLHandlerFactory(sslContext, config.getInteger(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT),
config.getInteger(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
}
| 3.26 |
flink_SSLUtils_createSSLServerSocketFactory_rdh
|
/**
* Creates a factory for SSL Server Sockets from the given configuration. SSL Server Sockets are
* always part of internal communication.
*/
public static ServerSocketFactory createSSLServerSocketFactory(Configuration config) throws Exception {
SSLContext
sslContext = createInternalSSLContext(config, false);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled");
}
String[] protocols = getEnabledProtocols(config);
String[] cipherSuites
= getEnabledCipherSuites(config);
SSLServerSocketFactory factory = sslContext.getServerSocketFactory();
return new ConfiguringSSLServerSocketFactory(factory,
protocols, cipherSuites);
}
| 3.26 |
flink_SSLUtils_createRestNettySSLContext_rdh
|
/**
* Creates an SSL context for the external REST SSL. If mutual authentication is configured the
* client and the server side configuration are identical.
*/
@Nullable
public static SslContext createRestNettySSLContext(Configuration config, boolean clientMode, ClientAuth
clientAuth, SslProvider provider) throws Exception {
checkNotNull(config, "config");
if (!SecurityOptions.isRestSSLEnabled(config)) {
return null;
}
String[] sslProtocols = getEnabledProtocols(config);
List<String> ciphers = Arrays.asList(getEnabledCipherSuites(config));
final SslContextBuilder sslContextBuilder;
if (clientMode) {
sslContextBuilder = SslContextBuilder.forClient();
if (clientAuth != ClientAuth.NONE) {
KeyManagerFactory kmf = getKeyManagerFactory(config, false, provider);sslContextBuilder.keyManager(kmf);
}
} else {
KeyManagerFactory kmf = getKeyManagerFactory(config, false, provider);
sslContextBuilder = SslContextBuilder.forServer(kmf);
}
if (clientMode || (clientAuth != ClientAuth.NONE)) {
Optional<TrustManagerFactory> tmf = getTrustManagerFactory(config, false);
// Use specific ciphers and protocols if SSL is configured with self-signed
// certificates (user-supplied truststore)
tmf.map(tm -> sslContextBuilder.trustManager(tm).protocols(sslProtocols).ciphers(ciphers).clientAuth(clientAuth)); }
return sslContextBuilder.sslProvider(provider).build();
}
| 3.26 |
flink_SSLUtils_m2_rdh
|
/**
* Creates the SSL Context for internal SSL, if internal SSL is configured. For internal SSL,
* the client and server side configuration are identical, because of mutual authentication.
*/
@Nullable
private static SslContext m2(Configuration config, boolean clientMode, SslProvider provider) throws Exception {
checkNotNull(config, "config");if (!SecurityOptions.isInternalSSLEnabled(config)) {
return null;
}
String[] sslProtocols = getEnabledProtocols(config);
List<String> ciphers = Arrays.asList(getEnabledCipherSuites(config));
int sessionCacheSize = config.getInteger(SecurityOptions.SSL_INTERNAL_SESSION_CACHE_SIZE);
int sessionTimeoutMs = config.getInteger(SecurityOptions.SSL_INTERNAL_SESSION_TIMEOUT);
KeyManagerFactory kmf = getKeyManagerFactory(config, true, provider);ClientAuth clientAuth = ClientAuth.REQUIRE;
final SslContextBuilder sslContextBuilder;
if (clientMode) {
sslContextBuilder = SslContextBuilder.forClient().keyManager(kmf);
} else { sslContextBuilder = SslContextBuilder.forServer(kmf);
}
Optional<TrustManagerFactory> tmf
= getTrustManagerFactory(config, true);
tmf.map(sslContextBuilder::trustManager);
return sslContextBuilder.sslProvider(provider).protocols(sslProtocols).ciphers(ciphers).clientAuth(clientAuth).sessionCacheSize(sessionCacheSize).sessionTimeout(sessionTimeoutMs / 1000).build();
}
| 3.26 |
flink_SSLUtils_createRestClientSSLEngineFactory_rdh
|
/**
* Creates a {@link SSLHandlerFactory} to be used by the REST Clients.
*
* @param config
* The application configuration.
*/ public static SSLHandlerFactory createRestClientSSLEngineFactory(final Configuration config) throws Exception {
ClientAuth clientAuth
= (SecurityOptions.isRestSSLAuthenticationEnabled(config)) ? ClientAuth.REQUIRE
: ClientAuth.NONE;
SslContext sslContext = createRestNettySSLContext(config, true, clientAuth);if (sslContext == null) {throw new IllegalConfigurationException("SSL is not enabled for REST endpoints.");
}
return new SSLHandlerFactory(sslContext, -1, -1);
}
| 3.26 |
flink_SSLUtils_createInternalServerSSLEngineFactory_rdh
|
/**
* Creates a SSLEngineFactory to be used by internal communication server endpoints.
*/
public static SSLHandlerFactory createInternalServerSSLEngineFactory(final Configuration config) throws Exception {
SslContext sslContext = m1(config, false);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for internal communication.");
}
return new SSLHandlerFactory(sslContext, config.getInteger(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT), config.getInteger(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
}
| 3.26 |
flink_SSLUtils_createInternalSSLContext_rdh
|
/**
* Creates the SSL Context for internal SSL, if internal SSL is configured. For internal SSL,
* the client and server side configuration are identical, because of mutual authentication.
*/
@Nullable
private static SSLContext createInternalSSLContext(Configuration config, boolean clientMode) throws Exception {
JdkSslContext nettySSLContext = ((JdkSslContext) (m2(config, clientMode, JDK)));
if (nettySSLContext != null) {
return nettySSLContext.context();
} else {
return null;
}
}
| 3.26 |
flink_HiveJdbcParameterUtils_m0_rdh
|
/**
* Use the {@param parameters} to set {@param hiveConf} or {@param hiveVariables} according to
* what kinds of the parameter belongs.
*/
public static void m0(HiveConf
hiveConf, Map<String, String> sessionConfigs, Map<String, String> parameters) {
for (Map.Entry<String, String> entry :
parameters.entrySet()) {
String
key
= entry.getKey();
if (key.startsWith(SET_PREFIX)) {
String newKey = key.substring(SET_PREFIX.length());
HiveSetProcessor.setVariable(hiveConf, sessionConfigs,
newKey, entry.getValue());
} else if (!key.startsWith(USE_PREFIX)) {
sessionConfigs.put(key, entry.getValue());
}
}
}
| 3.26 |
flink_YarnTaskExecutorRunner_main_rdh
|
// ------------------------------------------------------------------------
// Program entry point
// ------------------------------------------------------------------------
/**
* The entry point for the YARN task executor runner.
*
* @param args
* The command line arguments.
*/
public static void main(String[] args) {
EnvironmentInformation.logEnvironmentInfo(LOG, "YARN TaskExecutor runner", args);
SignalHandler.register(LOG);
JvmShutdownSafeguard.installAsShutdownHook(LOG);
runTaskManagerSecurely(args);
}
| 3.26 |
flink_YarnTaskExecutorRunner_runTaskManagerSecurely_rdh
|
/**
* The instance entry point for the YARN task executor. Obtains user group information and calls
* the main work method {@link TaskManagerRunner#runTaskManager(Configuration, PluginManager)}
* as a privileged action.
*
* @param args
* The command line arguments.
*/
private static void runTaskManagerSecurely(String[] args) {
Configuration configuration = null;
try {
LOG.debug("All environment variables: {}", ENV);
final String currDir = ENV.get(Environment.PWD.key());
LOG.info("Current working Directory: {}", currDir);
configuration = TaskManagerRunner.loadConfiguration(args);
setupAndModifyConfiguration(configuration, currDir, ENV);
} catch (Throwable t) {
LOG.error("YARN TaskManager initialization failed.", t);
System.exit(INIT_ERROR_EXIT_CODE);
}
TaskManagerRunner.runTaskManagerProcessSecurely(Preconditions.checkNotNull(configuration));}
| 3.26 |
flink_FlinkS3FileSystem_getEntropyInjectionKey_rdh
|
// ------------------------------------------------------------------------
@Nullable
@Override
public String getEntropyInjectionKey() {return entropyInjectionKey;
}
| 3.26 |
flink_TableConfigUtils_isOperatorDisabled_rdh
|
/**
* Returns whether the given operator type is disabled.
*
* @param tableConfig
* TableConfig object
* @param operatorType
* operator type to check
* @return true if the given operator is disabled.
*/
public static boolean isOperatorDisabled(TableConfig tableConfig, OperatorType operatorType) {
String value = tableConfig.get(TABLE_EXEC_DISABLED_OPERATORS);
if (value == null) {
return false;
}
String[] operators = value.split(",");
Set<OperatorType> operatorSets = new HashSet<>();
for (String operator : operators) {
operator = operator.trim();
if (operator.isEmpty()) {
continue;
}
if (operator.equals("HashJoin")) {
operatorSets.add(OperatorType.BroadcastHashJoin);
operatorSets.add(OperatorType.ShuffleHashJoin);
} else {
operatorSets.add(OperatorType.valueOf(operator));
}
}
return operatorSets.contains(operatorType);
}
| 3.26 |
flink_TableConfigUtils_getCalciteConfig_rdh
|
/**
* Returns {@link CalciteConfig} wraps in the given TableConfig.
*
* @param tableConfig
* TableConfig object
* @return wrapped CalciteConfig.
*/
public static CalciteConfig getCalciteConfig(TableConfig tableConfig) {
return tableConfig.getPlannerConfig().unwrap(CalciteConfig.class).orElse(CalciteConfig$.MODULE$.DEFAULT());
}
| 3.26 |
flink_TableConfigUtils_getMaxIdleStateRetentionTime_rdh
|
/**
* Similar to {@link TableConfig#getMaxIdleStateRetentionTime()}.
*
* @see TableConfig#getMaxIdleStateRetentionTime()
*/
@Deprecated
public static long getMaxIdleStateRetentionTime(ReadableConfig tableConfig) {
return (tableConfig.get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis() * 3) / 2;
}
| 3.26 |
flink_TableConfigUtils_m0_rdh
|
/**
* Returns the aggregate phase strategy configuration.
*
* @param tableConfig
* TableConfig object
* @return the aggregate phase strategy
*/
public static AggregatePhaseStrategy m0(ReadableConfig tableConfig) {
String aggPhaseConf = tableConfig.get(TABLE_OPTIMIZER_AGG_PHASE_STRATEGY).trim();
if (aggPhaseConf.isEmpty()) {
return AggregatePhaseStrategy.AUTO;
} else {
return AggregatePhaseStrategy.valueOf(aggPhaseConf);
}
}
| 3.26 |
flink_TableConfigUtils_getLocalTimeZone_rdh
|
/**
* Similar to {@link TableConfig#getLocalTimeZone()} but extracting it from a generic {@link ReadableConfig}.
*
* @see TableConfig#getLocalTimeZone()
*/
public static ZoneId getLocalTimeZone(ReadableConfig tableConfig) {
final String zone = tableConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
if (TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)) {
return ZoneId.systemDefault();
}
validateTimeZone(zone);
return ZoneId.of(zone);
}
| 3.26 |
flink_OperatorCoordinatorHolder_start_rdh
|
// ------------------------------------------------------------------------
// OperatorCoordinator Interface
// ------------------------------------------------------------------------
public void start() throws Exception {
mainThreadExecutor.assertRunningInMainThread();
checkState(context.isInitialized(), "Coordinator Context is not yet initialized");
coordinator.start();
}
| 3.26 |
flink_OperatorCoordinatorHolder_coordinator_rdh
|
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public OperatorCoordinator coordinator() {
return coordinator;
}
| 3.26 |
flink_OperatorCoordinatorHolder_abortCurrentTriggering_rdh
|
// ------------------------------------------------------------------------
// Checkpointing Callbacks
// ------------------------------------------------------------------------
@Override
public void abortCurrentTriggering() {
// unfortunately, this method does not run in the scheduler executor, but in the
// checkpoint coordinator time thread.
// we can remove the delegation once the checkpoint coordinator runs fully in the
// scheduler's main thread executor
mainThreadExecutor.execute(() -> subtaskGatewayMap.values().forEach(SubtaskGatewayImpl::openGatewayAndUnmarkLastCheckpointIfAny));
}
| 3.26 |
flink_OperatorCoordinatorHolder_setupAllSubtaskGateways_rdh
|
// ------------------------------------------------------------------------
// miscellaneous helpers
// ------------------------------------------------------------------------
private void setupAllSubtaskGateways() {
for (int i = 0; i < operatorParallelism; i++) {
setupSubtaskGateway(i);
}
}
| 3.26 |
flink_TimeUtils_toDuration_rdh
|
/**
* Translates {@link Time} to {@link Duration}.
*
* @param time
* time to transform into duration
* @return duration equal to the given time
*/
public static Duration
toDuration(Time time) {
return Duration.of(time.getSize(), toChronoUnit(time.getUnit()));
}
| 3.26 |
flink_TimeUtils_formatWithHighestUnit_rdh
|
/**
* Pretty prints the duration as a lowest granularity unit that does not lose precision.
*
* <p>Examples:
*
* <pre>{@code Duration.ofMilliseconds(60000) will be printed as 1 min
* Duration.ofHours(1).plusSeconds(1) will be printed as 3601 s}</pre>
*
* <b>NOTE:</b> It supports only durations that fit into long.
*/
public static String formatWithHighestUnit(Duration duration) {
long nanos =
duration.toNanos();
List<TimeUnit> orderedUnits
= Arrays.asList(TimeUnit.NANOSECONDS, TimeUnit.f1, TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.f0, TimeUnit.DAYS);
TimeUnit highestIntegerUnit = IntStream.range(0, orderedUnits.size()).sequential().filter(idx -> (nanos % orderedUnits.get(idx).unit.getDuration().toNanos()) != 0).boxed().findFirst().map(idx ->
{
if (idx == 0) {
return orderedUnits.get(0);
} else {
return orderedUnits.get(idx - 1); }
}).orElse(TimeUnit.MILLISECONDS);
return String.format("%d %s", nanos / highestIntegerUnit.unit.getDuration().toNanos(), highestIntegerUnit.getLabels().get(0));
}
| 3.26 |
flink_TimeUtils_m0_rdh
|
/**
*
* @param duration
* to convert to string
* @return duration string in millis
*/
public static String m0(final Duration duration) {
return duration.toMillis() + TimeUnit.MILLISECONDS.labels.get(0); }
| 3.26 |
flink_TimeUtils_parseDuration_rdh
|
/**
* Parse the given string to a java {@link Duration}. The string is in format "{length
* value}{time unit label}", e.g. "123ms", "321 s". If no time unit label is specified, it will
* be considered as milliseconds.
*
* <p>Supported time unit labels are:
*
* <ul>
* <li>DAYS: "d", "day"
* <li>HOURS: "h", "hour"
* <li>MINUTES: "m", "min", "minute"
* <li>SECONDS: "s", "sec", "second"
* <li>MILLISECONDS: "ms", "milli", "millisecond"
* <li>MICROSECONDS: "µs", "micro", "microsecond"
* <li>NANOSECONDS: "ns", "nano", "nanosecond"
* </ul>
*
* @param text
* string to parse.
*/
public static Duration parseDuration(String text) {
checkNotNull(text);
final String trimmed = text.trim();
checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;
char current;
while (((pos < len) && ((current = trimmed.charAt(pos))
>= '0')) && (current <= '9')) {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final long value;try {
value = Long.parseLong(number);// this throws a NumberFormatException on overflow
} catch (NumberFormatException e) {
throw new IllegalArgumentException(("The value '" + number) + "' cannot be re represented as 64bit number (numeric overflow).");}
if (unitLabel.isEmpty()) {
return Duration.of(value, ChronoUnit.MILLIS);
}
ChronoUnit unit = LABEL_TO_UNIT_MAP.get(unitLabel);if
(unit != null) {
return Duration.of(value, unit);
} else {
throw new IllegalArgumentException((("Time interval unit label '" + unitLabel) + "' does not match any of the recognized units: ") + TimeUnit.getAllUnits());
}
}
| 3.26 |
flink_TimeUtils_singular_rdh
|
/**
*
* @param label
* the original label
* @return the singular format of the original label
*/
private static String[] singular(String label) {
return new String[]{ label };
}
| 3.26 |
flink_Order_isOrdered_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Checks, if this enum constant represents in fact an order. That is, whether this property is
* not equal to <tt>Order.NONE</tt>.
*
* @return True, if this enum constant is unequal to <tt>Order.NONE</tt>, false otherwise.
*/
public boolean isOrdered() {return this != Order.NONE;
}
| 3.26 |
flink_TypeInference_m0_rdh
|
/**
* Sets the list of argument types for specifying a fixed, not overloaded, not vararg input
* signature explicitly.
*
* <p>This information is useful for optional arguments with default value. In particular,
* the number of arguments that need to be filled with a default value and their types is
* important.
*/
public Builder m0(List<DataType> argumentTypes) {
this.typedArguments = Preconditions.checkNotNull(argumentTypes, "List of argument types must not be null.");
return this;
}
| 3.26 |
flink_TypeInference_outputTypeStrategy_rdh
|
/**
* Sets the strategy for inferring the final output data type of a function call.
*
* <p>Required.
*/
public Builder outputTypeStrategy(TypeStrategy outputTypeStrategy) {
this.outputTypeStrategy = Preconditions.checkNotNull(outputTypeStrategy, "Output type strategy must not be null.");
return this;
}
| 3.26 |
flink_TypeInference_inputTypeStrategy_rdh
|
/**
* Sets the strategy for inferring and validating input arguments in a function call.
*
* <p>A {@link InputTypeStrategies#WILDCARD} strategy function is assumed by default.
*/
public Builder inputTypeStrategy(InputTypeStrategy inputTypeStrategy) {
this.inputTypeStrategy = Preconditions.checkNotNull(inputTypeStrategy, "Input type strategy must not be null.");
return this;
}
| 3.26 |
flink_TypeInference_typedArguments_rdh
|
/**
*
* @see #typedArguments(List)
*/
public Builder typedArguments(DataType... argumentTypes) {
return m0(Arrays.asList(argumentTypes));}
| 3.26 |
flink_TypeInference_namedArguments_rdh
|
/**
*
* @see #namedArguments(List)
*/
public Builder namedArguments(String... argumentNames) {
return namedArguments(Arrays.asList(argumentNames));
}
| 3.26 |
flink_TypeInference_accumulatorTypeStrategy_rdh
|
/**
* Sets the strategy for inferring the intermediate accumulator data type of a function
* call.
*/
public Builder accumulatorTypeStrategy(TypeStrategy accumulatorTypeStrategy) {
this.f0 = Preconditions.checkNotNull(accumulatorTypeStrategy, "Accumulator type strategy must not be null.");
return this;
}
| 3.26 |
flink_TypeInference_newBuilder_rdh
|
/**
* Builder for configuring and creating instances of {@link TypeInference}.
*/
public static TypeInference.Builder newBuilder() {
return new TypeInference.Builder();
}
| 3.26 |
flink_AfterMatchSkipStrategy_noSkip_rdh
|
/**
* Every possible match will be emitted.
*
* @return the created AfterMatchSkipStrategy
*/
public static NoSkipStrategy noSkip() {
return NoSkipStrategy.INSTANCE;
}
| 3.26 |
flink_AfterMatchSkipStrategy_skipToLast_rdh
|
/**
* Discards every partial match that started before the last event of emitted match mapped to
* *PatternName*.
*
* @param patternName
* the pattern name to skip to
* @return the created AfterMatchSkipStrategy
*/
public static SkipToLastStrategy skipToLast(String patternName) {
return new SkipToLastStrategy(patternName, false);
}
| 3.26 |
flink_AfterMatchSkipStrategy_skipPastLastEvent_rdh
|
/**
* Discards every partial match that started before emitted match ended.
*
* @return the created AfterMatchSkipStrategy
*/
public static SkipPastLastStrategy skipPastLastEvent() {
return SkipPastLastStrategy.INSTANCE;
}
| 3.26 |
flink_AfterMatchSkipStrategy_skipToFirst_rdh
|
/**
* Discards every partial match that started before the first event of emitted match mapped to
* *PatternName*.
*
* @param patternName
* the pattern name to skip to
* @return the created AfterMatchSkipStrategy
*/
public static SkipToFirstStrategy skipToFirst(String patternName) {
return new SkipToFirstStrategy(patternName, false);
}
| 3.26 |
flink_AfterMatchSkipStrategy_getPatternName_rdh
|
/**
* Name of pattern that processing will be skipped to.
*/
public Optional<String> getPatternName() {
return Optional.empty();
}
| 3.26 |
flink_Savepoint_load_rdh
|
/**
* Loads an existing savepoint. Useful if you want to query, modify, or extend the state of an
* existing application.
*
* @param env
* The execution environment used to transform the savepoint.
* @param path
* The path to an existing savepoint on disk.
* @param stateBackend
* The state backend of the savepoint.
* @see #load(ExecutionEnvironment, String)
*/public static ExistingSavepoint load(ExecutionEnvironment env, String path, StateBackend stateBackend) throws IOException {
Preconditions.checkNotNull(stateBackend, "The state backend must not be null");
CheckpointMetadata metadata = SavepointLoader.loadSavepointMetadata(path);
int maxParallelism = metadata.getOperatorStates().stream().map(OperatorState::getMaxParallelism).max(Comparator.naturalOrder()).orElseThrow(() -> new RuntimeException("Savepoint must contain at least one operator state."));
SavepointMetadata savepointMetadata = new SavepointMetadata(maxParallelism, metadata.getMasterStates(), metadata.getOperatorStates());
return new ExistingSavepoint(env, savepointMetadata, stateBackend);
}
| 3.26 |
flink_Savepoint_create_rdh
|
/**
* Creates a new savepoint.
*
* @param stateBackend
* The state backend of the savepoint used for keyed state.
* @param maxParallelism
* The max parallelism of the savepoint.
* @return A new savepoint.
* @see #create(int)
*/
public static NewSavepoint create(StateBackend stateBackend, int maxParallelism) {
Preconditions.checkNotNull(stateBackend, "The state backend must not be null");
Preconditions.checkArgument((maxParallelism > 0) && (maxParallelism <= UPPER_BOUND_MAX_PARALLELISM), (("Maximum parallelism must be between 1 and " +
UPPER_BOUND_MAX_PARALLELISM) + ". Found: ") + maxParallelism);
SavepointMetadata metadata
= new SavepointMetadata(maxParallelism, Collections.emptyList(), Collections.emptyList()); return new NewSavepoint(metadata, stateBackend);
}
| 3.26 |
flink_AbstractRuntimeUDFContext_getAccumulator_rdh
|
// --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
private <V, A extends Serializable> Accumulator<V, A> getAccumulator(String name, Class<? extends Accumulator<V, A>> accumulatorClass)
{
Accumulator<?, ?> accumulator = accumulators.get(name);
if (accumulator != null) {
AccumulatorHelper.compareAccumulatorTypes(name, accumulator.getClass(), accumulatorClass);
} else {
// Create new accumulator
try {
accumulator = accumulatorClass.newInstance();
} catch (Exception e) {
throw new RuntimeException("Cannot create accumulator " +
accumulatorClass.getName());
}
accumulators.put(name,
accumulator);}
return ((Accumulator<V, A>) (accumulator));
}
| 3.26 |
flink_SelectByMaxFunction_reduce_rdh
|
/**
* Reduce implementation, returns bigger tuple or value1 if both tuples are equal. Comparison
* highly depends on the order and amount of fields chosen as indices. All given fields (at
* construction time) are checked in the same order as defined (at construction time). If both
* tuples are equal in one index, the next index is compared. Or if no next index is available
* value1 is returned. The tuple which has a bigger value at one index will be returned.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public T reduce(T value1, T value2) throws Exception {
for (int index = 0; index < fields.length; index++) {
// Save position of compared key
int position = this.fields[index];
// Get both values - both implement comparable
Comparable v3 = value1.getFieldNotNull(position);
Comparable comparable2 = value2.getFieldNotNull(position);
// Compare values
int comp = v3.compareTo(comparable2);
// If comp is bigger than 0 comparable 1 is bigger.
// Return the smaller value.
if (comp > 0) {
return value1;
} else if (comp < 0) {return value2;
}
}
return value1;
}
| 3.26 |
flink_GlobalWindow_snapshotConfiguration_rdh
|
// ------------------------------------------------------------------------
@Overridepublic TypeSerializerSnapshot<GlobalWindow> snapshotConfiguration() {
return new GlobalWindowSerializerSnapshot();
}
| 3.26 |
flink_WorkerResourceSpec_setExtendedResource_rdh
|
/**
* Add the given extended resource. The old value with the same resource name will be
* replaced if present.
*/
public Builder setExtendedResource(ExternalResource extendedResource) {
this.extendedResources.put(extendedResource.getName(), extendedResource);
return this;
}
| 3.26 |
flink_WorkerResourceSpec_setExtendedResources_rdh
|
/**
* Add the given extended resources. This will discard all the previous added extended
* resources.
*/
public Builder setExtendedResources(Collection<ExternalResource> extendedResources) {
this.extendedResources = extendedResources.stream().collect(Collectors.toMap(ExternalResource::getName, Function.identity()));
return this;
}
| 3.26 |
flink_SessionContext_isStatementSetState_rdh
|
// --------------------------------------------------------------------------------------------
// Begin statement set
// --------------------------------------------------------------------------------------------
public boolean isStatementSetState() {
return isStatementSetState;}
| 3.26 |
flink_SessionContext_create_rdh
|
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
public static SessionContext create(DefaultContext defaultContext, SessionHandle sessionId, SessionEnvironment environment, ExecutorService operationExecutorService)
{
Configuration configuration = initializeConfiguration(defaultContext, environment, sessionId);
final MutableURLClassLoader userClassLoader = FlinkUserCodeClassLoaders.create(defaultContext.getDependencies().toArray(new URL[0]), SessionContext.class.getClassLoader(), configuration);
final ResourceManager resourceManager = new ResourceManager(configuration, userClassLoader);
return new SessionContext(defaultContext, sessionId, environment.getSessionEndpointVersion(), configuration, userClassLoader, initializeSessionState(environment, configuration, resourceManager), new OperationManager(operationExecutorService));
}
| 3.26 |
flink_SessionContext_close_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Close resources, e.g. catalogs.
*/
public void close() {
operationManager.close();
for (String name : sessionState.catalogManager.listCatalogs()) {
try {
sessionState.catalogManager.getCatalog(name).ifPresent(Catalog::close);
} catch (Throwable t) {
LOG.error(String.format("Failed to close catalog %s for the session %s.", name, sessionId), t);
}}
try {
userClassloader.close();
} catch (IOException e) {
LOG.error(String.format("Error while closing class loader for the session %s.", sessionId), e);
}
try {
sessionState.resourceManager.close();
} catch (IOException e) {
LOG.error(String.format("Failed to close the resource manager for the session %s.", sessionId), e);}
}
| 3.26 |
flink_SessionContext_getSessionId_rdh
|
// --------------------------------------------------------------------------------------------
// Getter/Setter
// --------------------------------------------------------------------------------------------
public SessionHandle getSessionId() {
return this.sessionId;
}
| 3.26 |
flink_SessionContext_initializeConfiguration_rdh
|
// ------------------------------------------------------------------------------------------------------------------
// Helpers
// ------------------------------------------------------------------------------------------------------------------
protected static Configuration initializeConfiguration(DefaultContext defaultContext, SessionEnvironment environment, SessionHandle sessionId) {
Configuration configuration = defaultContext.getFlinkConfig().clone();
configuration.addAll(Configuration.fromMap(environment.getSessionConfig()));
// every session configure the specific local resource download directory
Path path = Paths.get(configuration.get(TableConfigOptions.RESOURCES_DOWNLOAD_DIR), String.format("sql-gateway-%s", sessionId));
// override resource download temp directory
configuration.set(TableConfigOptions.RESOURCES_DOWNLOAD_DIR, path.toAbsolutePath().toString());
return configuration;
}
| 3.26 |
flink_SessionContext_createOperationExecutor_rdh
|
// --------------------------------------------------------------------------------------------
// Method to execute commands
// --------------------------------------------------------------------------------------------
public OperationExecutor createOperationExecutor(Configuration executionConfig) {
return new OperationExecutor(this, executionConfig);
}
| 3.26 |
flink_KerberosLoginProvider_doLoginAndReturnUGI_rdh
|
/**
* Does kerberos login and doesn't set current user, just returns a new UGI instance. Must be
* called when isLoginPossible returns true.
*/
public UserGroupInformation doLoginAndReturnUGI() throws IOException {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
if (principal != null) {
LOG.info("Attempting to login to KDC using principal: {} keytab: {}", principal, keytab);
UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
LOG.info("Successfully logged into KDC");
return ugi;
} else if (!HadoopUserUtils.isProxyUser(currentUser)) {
LOG.info("Attempting to load user's ticket cache");final String ccache = System.getenv("KRB5CCNAME");
final String user = Optional.ofNullable(System.getenv("KRB5PRINCIPAL")).orElse(currentUser.getUserName());
UserGroupInformation ugi = UserGroupInformation.getUGIFromTicketCache(ccache, user);
LOG.info("Loaded user's ticket cache successfully");
return ugi;
} else {
throwProxyUserNotSupported();
return currentUser;
}
}
| 3.26 |
flink_KerberosLoginProvider_doLogin_rdh
|
/**
* Does kerberos login and sets current user. Must be called when isLoginPossible returns true.
*/public void doLogin(boolean supportProxyUser) throws IOException {
if (principal != null) {
LOG.info("Attempting to login to KDC using principal: {} keytab: {}", principal, keytab);
UserGroupInformation.loginUserFromKeytab(principal, keytab);
LOG.info("Successfully logged into KDC");
} else if (!HadoopUserUtils.isProxyUser(UserGroupInformation.getCurrentUser())) {
LOG.info("Attempting to load user's ticket cache");
UserGroupInformation.loginUserFromSubject(null);
LOG.info("Loaded user's ticket cache successfully");
} else if (supportProxyUser) {
LOG.info("Proxy user doesn't need login since it must have credentials already");
} else {
throwProxyUserNotSupported();
}
}
| 3.26 |
flink_DatabaseMetaDataUtils_createSchemasResultSet_rdh
|
/**
* Create result set for schemas. The schema columns are:
*
* <ul>
* <li>TABLE_SCHEM String => schema name
* <li>TABLE_CATALOG String => catalog name (may be null)
* </ul>
*
* <p>The results are ordered by TABLE_CATALOG and TABLE_SCHEM.
*
* @param statement
* The statement for database meta data
* @param catalogs
* The catalog list
* @param catalogSchemas
* The catalog with schema list
* @return a ResultSet object in which each row is a schema description
*/public static FlinkResultSet createSchemasResultSet(Statement statement, List<String> catalogs, Map<String, List<String>> catalogSchemas) {
List<RowData> schemaWithCatalogList = new ArrayList<>();
List<String> catalogList = new ArrayList<>(catalogs);
catalogList.sort(String::compareTo);
for (String catalog : catalogList) {
List<String>
schemas = catalogSchemas.get(catalog);
schemas.sort(String::compareTo);schemas.forEach(s -> schemaWithCatalogList.add(GenericRowData.of(StringData.fromString(s), StringData.fromString(catalog))));
}
return new FlinkResultSet(statement, new CollectionResultIterator(schemaWithCatalogList.iterator()), ResolvedSchema.of(TABLE_SCHEM_COLUMN, TABLE_CATALOG_COLUMN));
}
| 3.26 |
streampipes_SpServiceDefinitionBuilder_m0_rdh
|
/**
* Include migrations in the service definition.
* <br>
* Please refrain from providing {@link IModelMigrator}s with overlapping version definitions for one application id.
*
* @param migrations
* List of migrations to be registered
* @return {@link SpServiceDefinitionBuilder}
*/
public SpServiceDefinitionBuilder m0(IModelMigrator<?,
?>... migrations) {
this.serviceDefinition.addMigrators(List.of(migrations));
return this;
}
| 3.26 |
streampipes_SpOpcUaConfigExtractor_extractAdapterConfig_rdh
|
/**
* Creates {@link OpcUaAdapterConfig} instance in accordance with the given
* {@link org.apache.streampipes.sdk.extractor.StaticPropertyExtractor}.
*
* @param extractor
* extractor for user inputs
* @return {@link OpcUaAdapterConfig} instance based on information from {@code extractor}
*/
public static OpcUaAdapterConfig extractAdapterConfig(IStaticPropertyExtractor extractor) {
var config = extractSharedConfig(extractor, new OpcUaAdapterConfig());
boolean
usePullMode = extractor.selectedAlternativeInternalId(ADAPTER_TYPE.name()).equals(PULL_MODE.name());
if (usePullMode) {Integer pullIntervalSeconds = extractor.singleValueParameter(PULLING_INTERVAL.name(), Integer.class);
config.setPullIntervalMilliSeconds(pullIntervalSeconds);
}
return config;
}
| 3.26 |
streampipes_AbstractProcessingElementBuilder_naryMappingPropertyWithoutRequirement_rdh
|
/**
* Adds a new {@link org.apache.streampipes.model.staticproperty.MappingPropertyNary}
* to the pipeline element definition which is not linked to a specific input property.
* Use this method if you want to present users a selection (in form of a Checkbox Group)
* of all available input event properties.
*
* @param label
* A human-readable label that is displayed to users in the StreamPipes UI.
* @param propertyScope
* Only input event properties that match the
* {@link org.apache.streampipes.model.schema.PropertyScope} are displayed.
* @return */
public K naryMappingPropertyWithoutRequirement(Label label, PropertyScope propertyScope) {
MappingPropertyNary mp = new MappingPropertyNary(label.getInternalId(), label.getLabel(), label.getDescription());
mp.setPropertyScope(propertyScope.name());
this.staticProperties.add(mp);
return me();
}
/**
*
* @deprecated Use {@link #unaryMappingPropertyWithoutRequirement(Label)} instead.
Use this method if you want to present users a single-value selection of all available input
event properties.
Adds a new {@link org.apache.streampipes.model.staticproperty.MappingPropertyUnary}
| 3.26 |
streampipes_AbstractProcessingElementBuilder_requiredStream_rdh
|
/**
* Set a new stream requirement by adding restrictions on this stream. Use
* {@link StreamRequirementsBuilder} to create requirements for a single stream.
*
* @param streamRequirements
* A bundle of collected {@link CollectedStreamRequirements}
* @return this
*/
public K requiredStream(CollectedStreamRequirements streamRequirements) {
this.streamRequirements.add(streamRequirements.getStreamRequirements());
this.staticProperties.addAll(rewrite(streamRequirements.getMappingProperties(), this.streamRequirements.size())); return me();
}
| 3.26 |
streampipes_AbstractProcessingElementBuilder_supportedProtocols_rdh
|
/**
* Assigns supported communication/transport protocols to the pipeline elements that can be handled at runtime (e.g.,
* Kafka or JMS).
*
* @param protocol
* An arbitrary number of supported
* {@link org.apache.streampipes.model.grounding.TransportProtocol}s.
* Use {@link org.apache.streampipes.sdk.helpers.SupportedProtocols} to assign protocols
* from some pre-defined ones or create your own by following the developer guide.
* @return this
*/ public K supportedProtocols(TransportProtocol... protocol) {return supportedProtocols(Arrays.asList(protocol));
}
/**
* Assigns supported communication/transport protocols to the pipeline elements that can be handled at runtime (e.g.,
* Kafka or JMS).
*
* @param protocols
* A list of supported {@link org.apache.streampipes.model.grounding.TransportProtocol}s.
* Use {@link org.apache.streampipes.sdk.helpers.SupportedProtocols}
| 3.26 |
streampipes_AbstractProcessingElementBuilder_setStream2_rdh
|
/**
*
* @deprecated Use {@link #requiredStream(CollectedStreamRequirements)} instead
*/
@Deprecated(since = "0.90.0", forRemoval = true)
public K setStream2() {
f1 = true;
return me();
}
| 3.26 |
streampipes_AbstractProcessingElementBuilder_setStream1_rdh
|
/**
*
* @deprecated Use {@link #requiredStream(CollectedStreamRequirements)} instead
*/
@Deprecated(since = "0.90.0", forRemoval = true)public K setStream1() {
stream1 = true;
return me();}
| 3.26 |
streampipes_AbstractProcessingElementBuilder_supportedFormats_rdh
|
/**
* Assigns supported transport formats to the pipeline elements that can be handled at runtime (e.g.,
* JSON or XMl).
*
* @param formats
* A list of supported {@link org.apache.streampipes.model.grounding.TransportFormat}s. Use
* {@link org.apache.streampipes.sdk.helpers.SupportedFormats} to assign formats from some pre-defined
* ones or create your own by following the developer guide.
* @return this
*/
public K supportedFormats(List<TransportFormat> formats) {
this.supportedGrounding.setTransportFormats(formats);
return me();
}
| 3.26 |
streampipes_PipelineApi_delete_rdh
|
/**
* Deletes the pipeline with a given id
*
* @param pipelineId
* The id of the pipeline
*/
@Overridepublic void delete(String pipelineId) {delete(getBaseResourcePath().addToPath(pipelineId), Message.class);
}
| 3.26 |
streampipes_PipelineApi_all_rdh
|
/**
* Receives all pipelines owned by the current user
*
* @return (list) {@link org.apache.streampipes.model.pipeline.Pipeline} a list of all pipelines
*/
@Override
public List<Pipeline> all() {
return getAll(getBaseResourcePath());
}
| 3.26 |
streampipes_PipelineApi_start_rdh
|
/**
* Starts a pipeline by given id
*
* @param pipeline
* The pipeline
* @return {@link org.apache.streampipes.model.pipeline.PipelineOperationStatus} the status message after invocation
*/
@Override
public PipelineOperationStatus start(Pipeline pipeline) {
return start(pipeline.getPipelineId());
}
| 3.26 |
streampipes_PipelineApi_stop_rdh
|
/**
* Stops a pipeline by given id
*
* @param pipeline
* The pipeline
* @return {@link org.apache.streampipes.model.pipeline.PipelineOperationStatus} the status message after detach
*/
@Override
public PipelineOperationStatus stop(Pipeline pipeline) {
return stop(pipeline.getPipelineId());
}
/**
* Stops a pipeline by given id
*
* @param pipelineId
* The id of the pipeline
* @return {@link org.apache.streampipes.model.pipeline.PipelineOperationStatus}
| 3.26 |
streampipes_Options_from_rdh
|
/**
* Creates a new list of options by using the provided string values.
*
* @param optionLabel
* An arbitrary number of option labels.
* @return */
public static List<Option> from(String... optionLabel) {
return Arrays.stream(optionLabel).map(Option::new).collect(Collectors.toList());
}
| 3.26 |
streampipes_TokenizerProcessor_declareModel_rdh
|
// TODO: Maybe change outputStrategy to an array instead of tons of different strings
@Override
public DataProcessorDescription declareModel() {
return ProcessingElementBuilder.create("org.apache.streampipes.processors.textmining.jvm.tokenizer").category(DataProcessorType.ENRICH_TEXT).withAssets(Assets.DOCUMENTATION, Assets.ICON).withLocales(Locales.EN).requiredFile(Labels.withId(BINARY_FILE_KEY)).requiredStream(StreamRequirementsBuilder.create().requiredPropertyWithUnaryMapping(EpRequirements.stringReq(), Labels.withId(DETECTION_FIELD_KEY), PropertyScope.NONE).build()).outputStrategy(OutputStrategies.append(EpProperties.listStringEp(Labels.withId(TOKEN_LIST_FIELD_KEY), TOKEN_LIST_FIELD_KEY, "http://schema.org/ItemList"))).build();
}
| 3.26 |
streampipes_OutputStrategies_append_rdh
|
/**
* Creates a {@link org.apache.streampipes.model.output.AppendOutputStrategy}. Append output strategies add additional
* properties to an input event stream.
*
* @param appendProperties
* An arbitrary number of event properties that are appended to any input stream.
* @return AppendOutputStrategy
*/
public static AppendOutputStrategy append(EventProperty... appendProperties) {
return new AppendOutputStrategy(Arrays.asList(appendProperties));
}
| 3.26 |
streampipes_OutputStrategies_custom_rdh
|
/**
* Creates a {@link org.apache.streampipes.model.output.CustomOutputStrategy}.
*
* @param outputBoth
* If two input streams are expected by a pipeline element, you can use outputBoth to indicate
* whether the properties of both input streams should be available to the pipeline developer for
* selection.
* @return CustomOutputStrategy
*/
public static CustomOutputStrategy custom(boolean outputBoth) {
return new CustomOutputStrategy(outputBoth);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.