name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_AsyncCheckpointRunnable_cleanup_rdh | /**
*
* @return discarded full/incremental size (if available).
*/
private Tuple2<Long, Long> cleanup() throws Exception {
LOG.debug("Cleanup AsyncCheckpointRunnable for checkpoint {} of {}.", checkpointMetaData.getCheckpointId(), taskName);
Exception exception = null;
// clean up ongoing operator snapshot results and non partitioned state handles
long stateSize = 0;long checkpointedSize = 0;
for (OperatorSnapshotFutures operatorSnapshotResult : operatorSnapshotsInProgress.values()) {
if (operatorSnapshotResult != null) {
try {
Tuple2<Long, Long> tuple2 = operatorSnapshotResult.cancel();
stateSize += tuple2.f0;
checkpointedSize += tuple2.f1;
} catch
(Exception cancelException) {
exception = ExceptionUtils.firstOrSuppressed(cancelException, exception);
}
}
}
if (null != exception) {
throw exception;
}
return Tuple2.of(stateSize, checkpointedSize);
} | 3.26 |
flink_HiveCatalogLock_createFactory_rdh | /**
* Create a hive lock factory.
*/
public static Factory createFactory(HiveConf hiveConf) {
return new HiveCatalogLockFactory(hiveConf);
} | 3.26 |
flink_ResourceManagerRuntimeServices_fromConfiguration_rdh | // -------------------- Static methods --------------------------------------
public static ResourceManagerRuntimeServices fromConfiguration(ResourceManagerRuntimeServicesConfiguration configuration, HighAvailabilityServices highAvailabilityServices, ScheduledExecutor scheduledExecutor, SlotManagerMetricGroup slotManagerMetricGroup) {
final SlotManager slotManager = createSlotManager(configuration, scheduledExecutor, slotManagerMetricGroup);
final JobLeaderIdService jobLeaderIdService = new DefaultJobLeaderIdService(highAvailabilityServices, scheduledExecutor, configuration.getJobTimeout());
return new ResourceManagerRuntimeServices(slotManager, jobLeaderIdService);
} | 3.26 |
flink_ExternalSerializer_of_rdh | /**
* Creates an instance of a {@link ExternalSerializer} defined by the given {@link DataType}.
*/
public static <I, E> ExternalSerializer<I, E> of(DataType dataType, boolean isInternalInput) {
return new ExternalSerializer<>(dataType, InternalSerializers.create(dataType.getLogicalType()), isInternalInput);
} | 3.26 |
flink_ExternalSerializer_readObject_rdh | // ---------------------------------------------------------------------------------
private void readObject(ObjectInputStream serialized) throws IOException, ClassNotFoundException {
serialized.defaultReadObject();
initializeConverter();
} | 3.26 |
flink_TPCHQuery10_m0_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void m0(String[] args) throws Exception {
LOGGER.warn(DATASET_DEPRECATION_INFO);
final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();if ((((!params.has("customer")) && (!params.has("orders"))) && (!params.has("lineitem"))) && (!params.has("nation"))) {
System.err.println(" This program expects data from the TPC-H benchmark as input data.");
System.err.println(" Due to legal restrictions, we can not ship generated data.");
System.err.println(" You can find the TPC-H data generator at http://www.tpc.org/tpch/.");
System.err.println(" Usage: TPCHQuery10 --customer <path> --orders <path> --lineitem <path> --nation <path> [--output <path>]");
return;
}
// get customer data set: (custkey, name, address, nationkey, acctbal)
DataSet<Tuple5<Integer, String, String, Integer, Double>> customers = getCustomerDataSet(env, params.get("customer"));
// get orders data set: (orderkey, custkey, orderdate)
DataSet<Tuple3<Integer, Integer, String>> orders = getOrdersDataSet(env, params.get("orders"));
// get lineitem data set: (orderkey, extendedprice, discount, returnflag)
DataSet<Tuple4<Integer, Double, Double, String>> lineitems = getLineitemDataSet(env, params.get("lineitem"));
// get nation data set: (nationkey, name)
DataSet<Tuple2<Integer, String>> nations = getNationsDataSet(env, params.get("nation"));
// orders filtered by year: (orderkey, custkey)
// filter by year
DataSet<Tuple2<Integer, Integer>> ordersFilteredByYear = // project fields out that are no longer required
orders.filter(order -> Integer.parseInt(order.f2.substring(0, 4)) > 1990).project(0, 1);
// lineitems filtered by flag: (orderkey, revenue)
// filter by flag
DataSet<Tuple2<Integer, Double>> lineitemsFilteredByFlag = // compute revenue and project out return flag
// revenue per item = l_extendedprice * (1 - l_discount)
lineitems.filter(lineitem -> lineitem.f3.equals("R")).map(lineitem -> new Tuple2<>(lineitem.f0, lineitem.f1 * (1 - lineitem.f2))).returns(Types.TUPLE(Types.INT, Types.DOUBLE));// for lambda with generics
// join orders with lineitems: (custkey, revenue)
DataSet<Tuple2<Integer, Double>> revenueByCustomer =
ordersFilteredByYear.joinWithHuge(lineitemsFilteredByFlag).where(0).equalTo(0).projectFirst(1).projectSecond(1); revenueByCustomer = revenueByCustomer.groupBy(0).aggregate(Aggregations.SUM, 1);
// join customer with nation (custkey, name, address, nationname, acctbal)
DataSet<Tuple5<Integer, String, String, String, Double>> customerWithNation = customers.joinWithTiny(nations).where(3).equalTo(0).projectFirst(0, 1, 2).projectSecond(1).projectFirst(4);
// join customer (with nation) with revenue (custkey, name, address, nationname, acctbal,
// revenue)
DataSet<Tuple6<Integer, String, String, String, Double, Double>> result = customerWithNation.join(revenueByCustomer).where(0).equalTo(0).projectFirst(0, 1, 2, 3, 4).projectSecond(1);
// emit result
if (params.has("output")) {
result.writeAsCsv(params.get("output"), "\n", "|");
// execute program
env.execute("TPCH Query 10 Example");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
result.print();
}
} | 3.26 |
flink_HiveASTParseDriver_create_rdh | /**
* Creates an HiveParserASTNode for the given token. The HiveParserASTNode is a
* wrapper around antlr's CommonTree class that implements the Node interface.
*
* @param payload
* The token.
* @return Object (which is actually an HiveParserASTNode) for the token.
*/
@Override
public Object create(Token payload) {
return new HiveParserASTNode(payload);
} | 3.26 |
flink_HiveASTParseDriver_parse_rdh | /**
* Parses a command, optionally assigning the parser's token stream to the given context.
*
* @param command
* command to parse
* @param ctx
* context with which to associate this parser's token stream, or null if either no
* context is available or the context already has an existing stream
* @return parsed AST
*/
public HiveParserASTNode parse(String
command, HiveParserContext ctx, String viewFullyQualifiedName) throws HiveASTParseException {
if (LOG.isDebugEnabled()) {
LOG.debug("Parsing command: " + command);
}
HiveLexerX lexer = new HiveLexerX(new ANTLRNoCaseStringStream(command));
TokenRewriteStream tokens = new TokenRewriteStream(lexer);
if (ctx != null) {
if (viewFullyQualifiedName == null) {
// Top level query
ctx.setTokenRewriteStream(tokens);
} else {
// It is a view
ctx.addViewTokenRewriteStream(viewFullyQualifiedName, tokens);
}
lexer.setHiveConf(ctx.getConf());
}
HiveASTParser parser = new HiveASTParser(tokens);
if (ctx
!= null) {
parser.setHiveConf(ctx.getConf());
}
parser.setTreeAdaptor(ADAPTOR);
statement_return r = null;
try {
r = parser.statement();
}
catch (RecognitionException e) {
throw new HiveASTParseException(parser.errors);
}
if ((lexer.getErrors().size() == 0) && (parser.errors.size() == 0)) {
LOG.debug("Parse Completed");
} else if (lexer.getErrors().size() != 0) {
throw new HiveASTParseException(lexer.getErrors());
} else {
throw new
HiveASTParseException(parser.errors);
}
HiveParserASTNode tree = r.getTree();
tree.setUnknownTokenBoundaries();
return tree;
} | 3.26 |
flink_ResultPartition_setup_rdh | /**
* Registers a buffer pool with this result partition.
*
* <p>There is one pool for each result partition, which is shared by all its sub partitions.
*
* <p>The pool is registered with the partition *after* it as been constructed in order to
* conform to the life-cycle of task registrations in the {@link TaskExecutor}.
*/
@Override
public void setup() throws IOException {
checkState(this.bufferPool == null, "Bug in result partition setup logic: Already registered buffer pool.");
this.bufferPool = checkNotNull(bufferPoolFactory.get());
setupInternal();partitionManager.registerResultPartition(this);
} | 3.26 |
flink_ResultPartition_canBeCompressed_rdh | /**
* Whether the buffer can be compressed or not. Note that event is not compressed because it is
* usually small and the size can become even larger after compression.
*/
protected boolean canBeCompressed(Buffer buffer) {
return ((bufferCompressor != null) && buffer.isBuffer()) && (buffer.readableBytes() > 0);
} | 3.26 |
flink_ResultPartition_notifyEndOfData_rdh | // ------------------------------------------------------------------------
@Override
public void notifyEndOfData(StopMode mode) throws IOException {
throw new UnsupportedOperationException();
} | 3.26 |
flink_ResultPartition_onSubpartitionAllDataProcessed_rdh | /**
* The subpartition notifies that the corresponding downstream task have processed all the user
* records.
*
* @see EndOfData
* @param subpartition
* The index of the subpartition sending the notification.
*/
public void onSubpartitionAllDataProcessed(int subpartition) {
} | 3.26 |
flink_ResultPartition_onConsumedSubpartition_rdh | // ------------------------------------------------------------------------
/**
* Notification when a subpartition is released.
*/
void onConsumedSubpartition(int subpartitionIndex) {
if (isReleased.get()) {
return;
}
LOG.debug("{}: Received release notification for subpartition {}.", this, subpartitionIndex);
} | 3.26 |
flink_ResultPartition_finish_rdh | /**
* Finishes the result partition.
*
* <p>After this operation, it is not possible to add further data to the result partition.
*
* <p>For BLOCKING results, this will trigger the deployment of consuming tasks.
*/
@Override
public void finish() throws IOException {
checkInProduceState();
isFinished = true;
} | 3.26 |
flink_ResultPartition_isReleased_rdh | /**
* Whether this partition is released.
*
* <p>A partition is released when each subpartition is either consumed and communication is
* closed by consumer or failed. A partition is also released if task is cancelled.
*/
@Override
public boolean isReleased() {
return isReleased.get();
} | 3.26 |
flink_BuiltInFunctionDefinition_internal_rdh | /**
* Specifies that this {@link BuiltInFunctionDefinition} is meant for internal purposes only
* and should not be exposed when listing functions.
*/
public Builder internal() {
this.isInternal =
true;
return this;
} | 3.26 |
flink_BuiltInFunctionDefinition_runtimeProvided_rdh | /**
* Specifies that this {@link BuiltInFunctionDefinition} is implemented during code
* generation.
*/
public Builder runtimeProvided() {
this.isRuntimeProvided = true;
return this;
}
/**
* Specifies the runtime class implementing this {@link BuiltInFunctionDefinition} | 3.26 |
flink_BuiltInFunctionDefinition_newBuilder_rdh | /**
* Builder for configuring and creating instances of {@link BuiltInFunctionDefinition}.
*/
public static BuiltInFunctionDefinition.Builder newBuilder()
{
return new BuiltInFunctionDefinition.Builder();
} | 3.26 |
flink_BuiltInFunctionDefinition_name_rdh | /**
* Specifies a name that uniquely identifies a built-in function.
*
* <p>Please adhere to the following naming convention:
*
* <ul>
* <li>Use upper case and separate words with underscore.
* <li>Depending on the importance of the function, the underscore is sometimes omitted
* e.g. for {@code IFNULL} or {@code TYPEOF} but not for {@code TO_TIMESTAMP_LTZ}.
* <li>Internal functions must start with $ and include a version starting from 1. The
* following format is enforced: {@code $NAME$VERSION} such as {@code $REPLICATE_ROWS$1}.
* </ul>
*/
public Builder name(String
name) {
this.name = name;
return this;
} | 3.26 |
flink_BuiltInFunctionDefinition_version_rdh | /**
* Specifies a version that will be persisted in the plan together with the function's name.
* The default version is 1 for non-internal functions.
*
* <p>Note: Internal functions don't need to specify a version as we enforce a unique name
* that includes a version (see {@link #name(String)}).
*/
public Builder version(int version) {
this.version = version;
return this;
} | 3.26 |
flink_BuiltInFunctionDefinition_runtimeDeferred_rdh | /**
* Specifies that this {@link BuiltInFunctionDefinition} will be mapped to a Calcite
* function.
*/
public Builder runtimeDeferred() {// This method is just a marker method for clarity. It is equivalent to calling
// neither {@link #runtimeProvided} nor {@link #runtimeClass}.
return this;
} | 3.26 |
flink_FloatValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean
supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_InterestingProperties_hashCode_rdh | // ------------------------------------------------------------------------
@Override public int hashCode() {
final int prime = 31; int result = 1;
result = (prime * result) + (globalProps == null ? 0 : globalProps.hashCode());
result = (prime
* result) + (localProps == null ? 0 : localProps.hashCode());
return result;
} | 3.26 |
flink_InterestingProperties_getLocalProperties_rdh | /**
* Gets the interesting local properties.
*
* @return The interesting local properties.
*/
public Set<RequestedLocalProperties> getLocalProperties() {
return this.localProps;
} | 3.26 |
flink_InterestingProperties_addGlobalProperties_rdh | // ------------------------------------------------------------------------
public void addGlobalProperties(RequestedGlobalProperties props) {this.globalProps.add(props);
} | 3.26 |
flink_JobManagerMetricGroup_putVariables_rdh | // ------------------------------------------------------------------------
// Component Metric Group Specifics
// ------------------------------------------------------------------------
@Override
protected void putVariables(Map<String, String> variables) {
variables.put(ScopeFormat.SCOPE_HOST, hostname);
} | 3.26 |
flink_JobManagerMetricGroup_addJob_rdh | // ------------------------------------------------------------------------
public JobManagerJobMetricGroup addJob(JobID jobId, String jobName) {
// get or create a jobs metric group
JobManagerJobMetricGroup
currentJobGroup;
synchronized(this) {
if (!isClosed()) {
currentJobGroup = jobs.get(jobId);
if ((currentJobGroup == null) ||
currentJobGroup.isClosed()) {
currentJobGroup = new JobManagerJobMetricGroup(registry, this, jobId, jobName);
jobs.put(jobId, currentJobGroup);
}
return currentJobGroup;
} else {
return null;
}
}
} | 3.26 |
flink_ResultPartitionManager_onConsumedPartition_rdh | // ------------------------------------------------------------------------
// Notifications
// ------------------------------------------------------------------------
void onConsumedPartition(ResultPartition partition) {
LOG.debug("Received consume notification from {}.", partition);
synchronized(registeredPartitions) {final ResultPartition
previous = registeredPartitions.remove(partition.getPartitionId());// Release the partition if it was successfully removed
if (partition == previous) {
partition.release();
ResultPartitionID partitionId = partition.getPartitionId();
LOG.debug("Released partition {} produced by {}.", partitionId.getPartitionId(), partitionId.getProducerId());
}
PartitionRequestListenerManager listenerManager = listenerManagers.remove(partition.getPartitionId());
checkState((listenerManager == null) || listenerManager.isEmpty(), "The partition request listeners is not empty for " + partition.getPartitionId());
}
} | 3.26 |
flink_ResultPartitionManager_checkRequestPartitionListeners_rdh | /**
* Check whether the partition request listener is timeout.
*/
private void checkRequestPartitionListeners() {
List<PartitionRequestListener> timeoutPartitionRequestListeners = new LinkedList<>();
synchronized(registeredPartitions) {
if (isShutdown) {
return;
}
long now = System.currentTimeMillis();
Iterator<Map.Entry<ResultPartitionID, PartitionRequestListenerManager>> iterator = listenerManagers.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<ResultPartitionID, PartitionRequestListenerManager> entry = iterator.next();
PartitionRequestListenerManager partitionRequestListeners = entry.getValue();
partitionRequestListeners.removeExpiration(now, partitionListenerTimeout, timeoutPartitionRequestListeners);if
(partitionRequestListeners.isEmpty()) {
iterator.remove();
}
}
}for (PartitionRequestListener partitionRequestListener :
timeoutPartitionRequestListeners) {
partitionRequestListener.notifyPartitionCreatedTimeout();
}
} | 3.26 |
flink_StatusWatermarkValve_markWatermarkUnaligned_rdh | /**
* Mark the {@link InputChannelStatus} as watermark-unaligned and remove it from the {@link #alignedChannelStatuses}.
*
* @param inputChannelStatus
* the input channel status to be marked
*/
private void markWatermarkUnaligned(InputChannelStatus inputChannelStatus) {
if (inputChannelStatus.isWatermarkAligned) {
inputChannelStatus.isWatermarkAligned = false;
inputChannelStatus.removeFrom(f0);
}
} | 3.26 |
flink_StatusWatermarkValve_inputWatermarkStatus_rdh | /**
* Feed a {@link WatermarkStatus} into the valve. This may trigger the valve to output either a
* new Watermark Status, for which {@link DataOutput#emitWatermarkStatus(WatermarkStatus)} will
* be called, or a new Watermark, for which {@link DataOutput#emitWatermark(Watermark)} will be
* called.
*
* @param watermarkStatus
* the watermark status to feed to the valve
* @param channelIndex
* the index of the channel that the fed watermark status belongs to (index
* starting from 0)
*/
public void inputWatermarkStatus(WatermarkStatus watermarkStatus, int channelIndex, DataOutput<?> output) throws Exception {
// only account for watermark status inputs that will result in a status change for the
// input
// channel
if (watermarkStatus.isIdle() && channelStatuses[channelIndex].watermarkStatus.isActive()) {
// handle active -> idle toggle for the input channel
channelStatuses[channelIndex].watermarkStatus = WatermarkStatus.IDLE;
// the channel is now idle, therefore not aligned
markWatermarkUnaligned(channelStatuses[channelIndex]);
// if all input channels of the valve are now idle, we need to output an idle stream
// status from the valve (this also marks the valve as idle)
if (!InputChannelStatus.hasActiveChannels(channelStatuses)) {
// now that all input channels are idle and no channels will continue to advance its
// watermark,
// we should "flush" all watermarks across all channels; effectively, this means
// emitting
// the max watermark across all channels as the new watermark. Also, since we
// already try to advance
// the min watermark as channels individually become IDLE, here we only need to
// perform the flush
// if the watermark of the last active channel that just became idle is the current
// min watermark.
if (channelStatuses[channelIndex].watermark == lastOutputWatermark) {
findAndOutputMaxWatermarkAcrossAllChannels(output);
}
lastOutputWatermarkStatus = WatermarkStatus.IDLE;
output.emitWatermarkStatus(lastOutputWatermarkStatus);
} else if (channelStatuses[channelIndex].watermark == lastOutputWatermark) {
// if the watermark of the channel that just became idle equals the last output
// watermark (the previous overall min watermark), we may be able to find a new
// min watermark from the remaining aligned channels
findAndOutputNewMinWatermarkAcrossAlignedChannels(output);
}
} else if (watermarkStatus.isActive() && channelStatuses[channelIndex].watermarkStatus.isIdle()) {
// handle idle -> active toggle for the input channel
channelStatuses[channelIndex].watermarkStatus = WatermarkStatus.ACTIVE;
// if the last watermark of the input channel, before it was marked idle, is still
// larger than
// the overall last output watermark of the valve, then we can set the channel to be
// aligned already.
if (channelStatuses[channelIndex].watermark >= lastOutputWatermark) {
markWatermarkAligned(channelStatuses[channelIndex]);
}
// if the valve was previously marked to be idle, mark it as active and output an active
// stream
// status because at least one of the input channels is now active
if (lastOutputWatermarkStatus.isIdle()) {
lastOutputWatermarkStatus =
WatermarkStatus.ACTIVE;
output.emitWatermarkStatus(lastOutputWatermarkStatus);
}
}
} | 3.26 |
flink_StatusWatermarkValve_markWatermarkAligned_rdh | /**
* Mark the {@link InputChannelStatus} as watermark-aligned and add it to the {@link #alignedChannelStatuses}.
*
* @param inputChannelStatus
* the input channel status to be marked
*/
private void markWatermarkAligned(InputChannelStatus inputChannelStatus) {
if (!inputChannelStatus.isWatermarkAligned) {
inputChannelStatus.isWatermarkAligned = true;
inputChannelStatus.addTo(f0);
}
} | 3.26 |
flink_StatusWatermarkValve_adjustAlignedChannelStatuses_rdh | /**
* Adjust the {@link #alignedChannelStatuses} when an element({@link InputChannelStatus}) in it
* was modified. The {@link #alignedChannelStatuses} is a priority queue, when an element in it
* was modified, we need to adjust the element's position to ensure its priority order.
*
* @param inputChannelStatus
* the modified input channel status
*/private void adjustAlignedChannelStatuses(InputChannelStatus inputChannelStatus) {
f0.adjustModifiedElement(inputChannelStatus);
} | 3.26 |
flink_StatusWatermarkValve_inputWatermark_rdh | /**
* Feed a {@link Watermark} into the valve. If the input triggers the valve to output a new
* Watermark, {@link DataOutput#emitWatermark(Watermark)} will be called to process the new
* Watermark.
*
* @param watermark
* the watermark to feed to the valve
* @param channelIndex
* the index of the channel that the fed watermark belongs to (index
* starting from 0)
*/
public void inputWatermark(Watermark watermark, int channelIndex, DataOutput<?> output) throws Exception {
// ignore the input watermark if its input channel, or all input channels are idle (i.e.
// overall the valve is idle).
if (lastOutputWatermarkStatus.isActive() && channelStatuses[channelIndex].watermarkStatus.isActive()) {
long watermarkMillis = watermark.getTimestamp();
// if the input watermark's value is less than the last received watermark for its input
// channel, ignore it also.
if (watermarkMillis > channelStatuses[channelIndex].watermark) {
channelStatuses[channelIndex].watermark = watermarkMillis;
if (channelStatuses[channelIndex].isWatermarkAligned) {
adjustAlignedChannelStatuses(channelStatuses[channelIndex]);
} else if (watermarkMillis >= lastOutputWatermark) {
// previously unaligned input channels are now aligned if its watermark has
// caught up
markWatermarkAligned(channelStatuses[channelIndex]);
}
// now, attempt to find a new min watermark across all aligned channels
findAndOutputNewMinWatermarkAcrossAlignedChannels(output);
}
}
} | 3.26 |
flink_Module_getTableSourceFactory_rdh | /**
* Returns a {@link DynamicTableSourceFactory} for creating source tables.
*
* <p>A factory is determined with the following precedence rule:
*
* <ul>
* <li>1. Factory provided by the corresponding catalog of a persisted table.
* <li>2. Factory provided by a module.
* <li>3. Factory discovered using Java SPI.
* </ul>
*
* <p>This will be called on loaded modules in the order in which they have been loaded. The
* first factory returned will be used.
*
* <p>This method can be useful to disable Java SPI completely or influence how temporary table
* sources should be created without a corresponding catalog.
*/
default Optional<DynamicTableSourceFactory> getTableSourceFactory()
{
return Optional.empty();
} | 3.26 |
flink_Module_listFunctions_rdh | /**
* List names of all functions in this module.
*
* <p>A module can decide to hide certain functions. For example, internal functions that can be
* resolved via {@link #getFunctionDefinition(String)} but should not be listed by default.
*
* @param includeHiddenFunctions
* whether to list hidden functions or not
* @return a set of function names
*/ default Set<String> listFunctions(boolean includeHiddenFunctions) {
return listFunctions();
} | 3.26 |
flink_Module_getTableSinkFactory_rdh | /**
* Returns a {@link DynamicTableSinkFactory} for creating sink tables.
*
* <p>A factory is determined with the following precedence rule:
*
* <ul>
* <li>1. Factory provided by the corresponding catalog of a persisted table.
* <li>2. Factory provided by a module.
* <li>3. Factory discovered using Java SPI.
* </ul>
*
* <p>This will be called on loaded modules in the order in which they have been loaded. The
* first factory returned will be used.
*
* <p>This method can be useful to disable Java SPI completely or influence how temporary table
* sinks should be created without a corresponding catalog.
*/ default Optional<DynamicTableSinkFactory> getTableSinkFactory() {
return Optional.empty();
} | 3.26 |
flink_Module_getFunctionDefinition_rdh | /**
* Get an optional of {@link FunctionDefinition} by a given name.
*
* <p>It includes hidden functions even though not listed in {@link #listFunctions()}.
*
* @param name
* name of the {@link FunctionDefinition}.
* @return an optional function definition
*/
default Optional<FunctionDefinition> getFunctionDefinition(String name) {
return Optional.empty();
} | 3.26 |
flink_RowDataLocalTimeZoneConverter_getSessionTimeZone_rdh | /**
* Get time zone from the given session config.
*/
private static ZoneId getSessionTimeZone(ReadableConfig sessionConfig) {
final String zone = sessionConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
return TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
} | 3.26 |
flink_Checkpoints_storeCheckpointMetadata_rdh | // ------------------------------------------------------------------------
public static void storeCheckpointMetadata(CheckpointMetadata checkpointMetadata, OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out);storeCheckpointMetadata(checkpointMetadata, dos);
} | 3.26 |
flink_Checkpoints_disposeSavepoint_rdh | // ------------------------------------------------------------------------
// Savepoint Disposal Hooks
// ------------------------------------------------------------------------
public static void disposeSavepoint(String pointer, CheckpointStorage checkpointStorage, ClassLoader classLoader) throws IOException, FlinkException {
checkNotNull(pointer, "location");
checkNotNull(checkpointStorage, "stateBackend");checkNotNull(classLoader, "classLoader");
final CompletedCheckpointStorageLocation checkpointLocation =
checkpointStorage.resolveCheckpoint(pointer);
final StreamStateHandle metadataHandle = checkpointLocation.getMetadataHandle();// load the savepoint object (the metadata) to have all the state handles that we need
// to dispose of all state
final CheckpointMetadata metadata;
try (InputStream in =
metadataHandle.openInputStream();DataInputStream dis = new DataInputStream(in)) {
metadata = loadCheckpointMetadata(dis, classLoader, pointer);
}
Exception exception = null;
// first dispose the savepoint metadata, so that the savepoint is not
// addressable any more even if the following disposal fails
try {
metadataHandle.discardState();
} catch (Exception e) {
exception = e;
}
// now dispose the savepoint data
try {
metadata.dispose();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// now dispose the location (directory, table, whatever)
try {
checkpointLocation.disposeStorageLocation();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// forward exceptions caught in the process
if (exception != null) {
ExceptionUtils.rethrowIOException(exception);
}
} | 3.26 |
flink_TimestampData_fromTimestamp_rdh | /**
* Creates an instance of {@link TimestampData} from an instance of {@link Timestamp}.
*
* @param timestamp
* an instance of {@link Timestamp}
*/
public static TimestampData fromTimestamp(Timestamp timestamp) {
return fromLocalDateTime(timestamp.toLocalDateTime());
} | 3.26 |
flink_TimestampData_m0_rdh | /**
* Converts this {@link TimestampData} object to a {@link Timestamp}.
*/
public Timestamp m0() {
return Timestamp.valueOf(toLocalDateTime());
} | 3.26 |
flink_TimestampData_fromEpochMillis_rdh | /**
* Creates an instance of {@link TimestampData} from milliseconds and a nanos-of-millisecond.
*
* @param milliseconds
* the number of milliseconds since {@code 1970-01-01 00:00:00}; a negative
* number is the number of milliseconds before {@code 1970-01-01 00:00:00}
* @param nanosOfMillisecond
* the nanoseconds within the millisecond, from 0 to 999,999
*/
public static TimestampData fromEpochMillis(long milliseconds, int nanosOfMillisecond) {
return new TimestampData(milliseconds, nanosOfMillisecond);
} | 3.26 |
flink_TimestampData_getMillisecond_rdh | /**
* Returns the number of milliseconds since {@code 1970-01-01 00:00:00}.
*/
public long getMillisecond() {
return millisecond;
} | 3.26 |
flink_TimestampData_fromInstant_rdh | /**
* Creates an instance of {@link TimestampData} from an instance of {@link Instant}.
*
* @param instant
* an instance of {@link Instant}
*/
public static TimestampData fromInstant(Instant instant) {
long epochSecond = instant.getEpochSecond();
int nanoSecond = instant.getNano();
long millisecond = (epochSecond * 1000) + (nanoSecond / 1000000);
int nanoOfMillisecond = nanoSecond % 1000000;
return new TimestampData(millisecond, nanoOfMillisecond);
} | 3.26 |
flink_TimestampData_isCompact_rdh | /**
* Returns whether the timestamp data is small enough to be stored in a long of milliseconds.
*/
public static boolean isCompact(int
precision) {
return precision <= 3;
} | 3.26 |
flink_TimestampData_toLocalDateTime_rdh | /**
* Converts this {@link TimestampData} object to a {@link LocalDateTime}.
*/public LocalDateTime toLocalDateTime() {
int date = ((int) (millisecond / MILLIS_PER_DAY));
int time = ((int) (millisecond % MILLIS_PER_DAY));
if (time < 0) {--date;
time += MILLIS_PER_DAY;
}
long nanoOfDay = (time * 1000000L) + nanoOfMillisecond;LocalDate localDate = LocalDate.ofEpochDay(date);
LocalTime localTime = LocalTime.ofNanoOfDay(nanoOfDay);
return LocalDateTime.of(localDate, localTime);
} | 3.26 |
flink_TimestampData_fromLocalDateTime_rdh | /**
* Creates an instance of {@link TimestampData} from an instance of {@link LocalDateTime}.
*
* @param dateTime
* an instance of {@link LocalDateTime}
*/
public static TimestampData fromLocalDateTime(LocalDateTime dateTime) {
long epochDay = dateTime.toLocalDate().toEpochDay();
long nanoOfDay = dateTime.toLocalTime().toNanoOfDay();long millisecond = (epochDay * MILLIS_PER_DAY) + (nanoOfDay / 1000000);
int nanoOfMillisecond = ((int) (nanoOfDay % 1000000));
return new TimestampData(millisecond, nanoOfMillisecond);
} | 3.26 |
flink_TimestampData_toInstant_rdh | /**
* Converts this {@link TimestampData} object to a {@link Instant}.
*/
public Instant toInstant() {
long epochSecond = millisecond / 1000;
int milliOfSecond = ((int) (millisecond % 1000)); if (milliOfSecond < 0) {
--epochSecond;
milliOfSecond += 1000;
}
long nanoAdjustment = (milliOfSecond * 1000000) + nanoOfMillisecond;
return Instant.ofEpochSecond(epochSecond, nanoAdjustment);
} | 3.26 |
flink_CliStrings_messageInfo_rdh | // --------------------------------------------------------------------------------------------
public static AttributedString messageInfo(String message) {return new AttributedStringBuilder().style(AttributedStyle.DEFAULT.bold().foreground(AttributedStyle.BLUE)).append("[INFO] ").append(message).toAttributedString();
} | 3.26 |
flink_StopWithSavepoint_onSavepointFailure_rdh | /**
* Restarts the checkpoint scheduler and, if only the savepoint failed without a task failure /
* job termination, transitions back to {@link Executing}.
*
* <p>This method must assume that {@link #onFailure}/{@link #onGloballyTerminalState} MAY
* already be waiting for the savepoint operation to complete, itching to trigger a state
* transition (hence the {@link #hasPendingStateTransition} check).
*
* <p>If the above is violated (e.g., by always transitioning into another state), then
* depending on other implementation details something very bad will happen, like the scheduler
* crashing the JVM because it attempted multiple state transitions OR effectively dropping the
* onFailure/onGloballyTerminalState call OR we trigger state transitions while we are already
* in another state.
*
* <p>For maintainability reasons this method should not mutate any state that affects state
* transitions in other methods.
*/
private void onSavepointFailure(Throwable cause) {
// revert side-effect of Executing#stopWithSavepoint
checkpointScheduling.startCheckpointScheduler();
// a task failed concurrently; defer the error handling to onFailure()
// otherwise we will attempt 2 state transitions, which is forbidden
if (!hasPendingStateTransition) {
operationFailureCause = cause;
context.goToExecuting(getExecutionGraph(), getExecutionGraphHandler(), getOperatorCoordinatorHandler(), getFailures());
}
} | 3.26 |
flink_SystemProcessingTimeService_shutdownAndAwaitPending_rdh | /**
* Shuts down and clean up the timer service provider hard and immediately. This does wait for
* all timers to complete or until the time limit is exceeded. Any call to {@link #registerTimer(long, ProcessingTimeCallback)} will result in a hard exception after calling
* this method.
*
* @param time
* time to wait for termination.
* @param timeUnit
* time unit of parameter time.
* @return {@code true} if this timer service and all pending timers are terminated and {@code false} if the timeout elapsed before this happened.
*/
@VisibleForTesting
boolean shutdownAndAwaitPending(long time, TimeUnit timeUnit) throws InterruptedException {
shutdownService();
return timerService.awaitTermination(time, timeUnit);
} | 3.26 |
flink_SystemProcessingTimeService_finalize_rdh | // safety net to destroy the thread pool
@Override
protected void finalize() throws Throwable {
super.finalize();
timerService.shutdownNow();
} | 3.26 |
flink_ResultPartitionMetrics_refreshAndGetTotal_rdh | // ------------------------------------------------------------------------
// these methods are package private to make access from the nested classes faster
/**
* Iterates over all sub-partitions and collects the total number of queued buffers in a
* best-effort way.
*
* @return total number of queued buffers
*/
long refreshAndGetTotal() {
return partition.getNumberOfQueuedBuffers();
} | 3.26 |
flink_ResultPartitionMetrics_getTotalQueueLenGauge_rdh | // ------------------------------------------------------------------------
// Gauges to access the stats
// ------------------------------------------------------------------------
private Gauge<Long> getTotalQueueLenGauge() {
return new Gauge<Long>() {
@Override
public Long getValue() {
return refreshAndGetTotal();
}
};
} | 3.26 |
flink_ResultPartitionMetrics_refreshAndGetAvg_rdh | /**
* Iterates over all sub-partitions and collects the average number of queued buffers in a
* sub-partition in a best-effort way.
*
* @return average number of queued buffers per sub-partition
*/
float refreshAndGetAvg() {
return partition.getNumberOfQueuedBuffers() / ((float) (partition.getNumberOfSubpartitions()));} | 3.26 |
flink_ResultPartitionMetrics_refreshAndGetMax_rdh | /**
* Iterates over all sub-partitions and collects the maximum number of queued buffers in a
* sub-partition in a best-effort way.
*
* @return maximum number of queued buffers per sub-partition
*/
int refreshAndGetMax() {
int max = 0;
int numSubpartitions = partition.getNumberOfSubpartitions();
for (int targetSubpartition = 0; targetSubpartition < numSubpartitions; ++targetSubpartition) {
int size = partition.getNumberOfQueuedBuffers(targetSubpartition);
max = Math.max(max, size);
}
return max;
} | 3.26 |
flink_ResultPartitionMetrics_registerQueueLengthMetrics_rdh | // ------------------------------------------------------------------------
// Static access
// ------------------------------------------------------------------------
public static void registerQueueLengthMetrics(MetricGroup parent, ResultPartition[] partitions) {
for (int i = 0; i < partitions.length; i++) {
ResultPartitionMetrics metrics = new ResultPartitionMetrics(partitions[i]);
MetricGroup group = parent.addGroup(i);
group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge());
group.gauge("minQueueLen", metrics.getMinQueueLenGauge());
group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge());
group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge());
}
} | 3.26 |
flink_CatalogView_of_rdh | /**
* Creates a basic implementation of this interface.
*
* <p>The signature is similar to a SQL {@code CREATE VIEW} statement.
*
* @param schema
* unresolved schema
* @param comment
* optional comment
* @param originalQuery
* original text of the view definition
* @param expandedQuery
* expanded text of the original view definition with materialized
* identifiers
* @param options
* options to configure the connector
*/
static CatalogView of(Schema schema, @Nullable
String comment, String originalQuery, String expandedQuery, Map<String, String> options) {
return new DefaultCatalogView(schema, comment, originalQuery, expandedQuery, options);
} | 3.26 |
flink_ReduceOperator_setCombineHint_rdh | /**
* Sets the strategy to use for the combine phase of the reduce.
*
* <p>If this method is not called, then the default hint will be used. ({@link org.apache.flink.api.common.operators.base.ReduceOperatorBase.CombineHint#OPTIMIZER_CHOOSES})
*
* @param strategy
* The hint to use.
* @return The ReduceOperator object, for function call chaining.
*/
@PublicEvolving
public ReduceOperator<IN> setCombineHint(CombineHint strategy) {
this.hint = strategy;
return this;
} | 3.26 |
flink_ReduceOperator_translateSelectorFunctionReducer_rdh | // --------------------------------------------------------------------------------------------
private static <T, K> SingleInputOperator<?, T, ?> translateSelectorFunctionReducer(SelectorFunctionKeys<T, ?> rawKeys, ReduceFunction<T> function, TypeInformation<T> inputType, String name, Operator<T> input, int parallelism, CombineHint hint) {
@SuppressWarnings("unchecked")
final SelectorFunctionKeys<T, K> keys = ((SelectorFunctionKeys<T, K>) (rawKeys));TypeInformation<Tuple2<K, T>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys);
Operator<Tuple2<K, T>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys);
PlanUnwrappingReduceOperator<T, K> reducer = new PlanUnwrappingReduceOperator<>(function, keys, name, inputType, typeInfoWithKey);
reducer.setInput(keyedInput);
reducer.setParallelism(parallelism);
reducer.setCombineHint(hint);
return
KeyFunctions.appendKeyRemover(reducer, keys);
} | 3.26 |
flink_SharedResources_release_rdh | /**
* Releases a lease (identified by the lease holder object) for the given type. If no further
* leases exist, the resource is disposed.
*
* <p>This method takes an additional hook that is called when the resource is disposed.
*/
public void release(String type, Object leaseHolder, LongConsumer releaser) throws Exception
{
lock.lock();
try {
final LeasedResource<?> resource = reservedResources.get(type);
if (resource == null) {
return;
}
if (resource.removeLeaseHolder(leaseHolder)) {
try {
reservedResources.remove(type);
resource.dispose();
} finally {
releaser.accept(resource.size());}
}} finally {
lock.unlock();
}
} | 3.26 |
flink_SharedResources_getOrAllocateSharedResource_rdh | /**
* Gets the shared memory resource for the given owner and registers a lease. If the resource
* does not yet exist, it will be created via the given initializer function.
*
* <p>The resource must be released when no longer used. That releases the lease. When all
* leases are released, the resource is disposed.
*/
public <T extends
AutoCloseable> ResourceAndSize<T> getOrAllocateSharedResource(String type, Object leaseHolder, LongFunctionWithException<T, Exception> initializer, long sizeForInitialization) throws Exception {
// We could be stuck on this lock for a while, in cases where another initialization is
// currently
// happening and the initialization is expensive.
// We lock interruptibly here to allow for faster exit in case of cancellation errors.
try {
lock.lockInterruptibly();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new MemoryAllocationException("Interrupted while acquiring memory");
}
try {
// we cannot use "computeIfAbsent()" here because the computing function may throw an
// exception.
@SuppressWarnings("unchecked")
LeasedResource<T> resource = ((LeasedResource<T>) (reservedResources.get(type)));
if (resource == null) {
resource = createResource(initializer, sizeForInitialization);
reservedResources.put(type, resource);
}
resource.addLeaseHolder(leaseHolder);
return resource;
} finally {
lock.unlock();
}
} | 3.26 |
flink_TaskDeploymentDescriptor_loadBigData_rdh | /**
* Loads externalized data from the BLOB store back to the object.
*
* @param blobService
* the blob store to use (may be <tt>null</tt> if {@link #serializedJobInformation} and {@link #serializedTaskInformation} are non-<tt>null</tt>)
* @param shuffleDescriptorsCache
* cache of shuffle descriptors to reduce the cost of
* deserialization
* @throws IOException
* during errors retrieving or reading the BLOBs
* @throws ClassNotFoundException
* Class of a serialized object cannot be found.
*/
public void loadBigData(@Nullable
PermanentBlobService blobService, GroupCache<JobID, PermanentBlobKey, JobInformation> jobInformationCache, GroupCache<JobID, PermanentBlobKey, TaskInformation> taskInformationCache, GroupCache<JobID, PermanentBlobKey, ShuffleDescriptorGroup> shuffleDescriptorsCache) throws IOException, ClassNotFoundException {
// re-integrate offloaded job info from blob
// here, if this fails, we need to throw the exception as there is no backup path anymore
if (serializedJobInformation instanceof Offloaded) {
PermanentBlobKey jobInfoKey = ((Offloaded<JobInformation>) (serializedJobInformation)).serializedValueKey;
Preconditions.checkNotNull(blobService);
JobInformation jobInformation = jobInformationCache.get(jobId, jobInfoKey);
if (jobInformation == null)
{
final File dataFile =
blobService.getFile(jobId, jobInfoKey);
// NOTE: Do not delete the job info BLOB since it may be needed again during
// recovery. (it is deleted automatically on the BLOB server and cache when the job
// enters a terminal state)
jobInformation = InstantiationUtil.deserializeObject(new BufferedInputStream(Files.newInputStream(dataFile.toPath())), getClass().getClassLoader());
jobInformationCache.put(jobId, jobInfoKey, jobInformation);}
this.jobInformation = jobInformation.deepCopy();
}
// re-integrate offloaded task info from blob
if (f0 instanceof
Offloaded) {
PermanentBlobKey taskInfoKey = ((Offloaded<TaskInformation>) (f0)).serializedValueKey;
Preconditions.checkNotNull(blobService);
TaskInformation taskInformation = taskInformationCache.get(jobId, taskInfoKey);
if (taskInformation == null) {
final File dataFile = blobService.getFile(jobId, taskInfoKey);
// NOTE: Do not delete the task info BLOB since it may be needed again during
// recovery. (it is deleted automatically on the BLOB server and cache when the job
// enters a terminal state)
taskInformation = InstantiationUtil.deserializeObject(new BufferedInputStream(Files.newInputStream(dataFile.toPath())), getClass().getClassLoader());
taskInformationCache.put(jobId, taskInfoKey, taskInformation);
}
this.taskInformation = taskInformation.deepCopy();
}
for (InputGateDeploymentDescriptor inputGate : inputGates) {
inputGate.tryLoadAndDeserializeShuffleDescriptors(blobService, jobId, shuffleDescriptorsCache);
}
} | 3.26 |
flink_TaskDeploymentDescriptor_m0_rdh | /**
* Returns the task's job ID.
*
* @return the job ID this task belongs to
*/
public JobID m0() {
return jobId;
} | 3.26 |
flink_TaskDeploymentDescriptor_getAttemptNumber_rdh | /**
* Returns the attempt number of the subtask.
*/
public int getAttemptNumber() {
return executionId.getAttemptNumber();
} | 3.26 |
flink_ChainedMapDriver_getStub_rdh | // --------------------------------------------------------------------------------------------
public Function getStub() {
return this.mapper;
} | 3.26 |
flink_ChainedMapDriver_setup_rdh | // --------------------------------------------------------------------------------------------
@Override
public void setup(AbstractInvokable parent) {
final MapFunction<IT, OT> mapper = BatchTask.instantiateUserCode(this.config, userCodeClassLoader, MapFunction.class);
this.mapper = mapper;
FunctionUtils.setFunctionRuntimeContext(mapper, getUdfRuntimeContext());
} | 3.26 |
flink_ChainedMapDriver_collect_rdh | // --------------------------------------------------------------------------------------------
@Override
public void collect(IT record) {
try {
this.numRecordsIn.inc();
this.outputCollector.collect(this.mapper.map(record));
} catch (Exception ex) {
throw new ExceptionInChainedStubException(this.taskName, ex);
}
} | 3.26 |
flink_ExternalServiceDecorator_getNamespacedExternalServiceName_rdh | /**
* Generate namespaced name of the external rest Service by cluster Id, This is used by other
* project, so do not delete it.
*/
public static String getNamespacedExternalServiceName(String clusterId, String namespace) {
return (getExternalServiceName(clusterId) + ".") + namespace;
} | 3.26 |
flink_ExternalServiceDecorator_getExternalServiceName_rdh | /**
* Generate name of the external rest Service.
*/
public static String getExternalServiceName(String clusterId) {
return clusterId + Constants.FLINK_REST_SERVICE_SUFFIX;
} | 3.26 |
flink_InputFormatProvider_of_rdh | /**
* Helper method for creating a static provider with a provided source parallelism.
*/
static InputFormatProvider of(InputFormat<RowData, ?> inputFormat, @Nullable
Integer sourceParallelism) {
return new InputFormatProvider() {
@Override
public InputFormat<RowData, ?> createInputFormat() {
return inputFormat;
}
@Override
public boolean isBounded() { return true;
}
@Override
public Optional<Integer> getParallelism() {
return Optional.ofNullable(sourceParallelism);
}
};
} | 3.26 |
flink_RpcSerializedValue_getSerializedDataLength_rdh | /**
* Return length of serialized data, zero if no serialized data.
*/
public int getSerializedDataLength() {
return serializedData ==
null ? 0 : serializedData.length;
} | 3.26 |
flink_RpcSerializedValue_m0_rdh | /**
* Construct a serialized value to transfer on wire.
*
* @param value
* nullable value
* @return serialized value to transfer on wire
* @throws IOException
* exception during value serialization
*/
public static RpcSerializedValue m0(@Nullable
Object value) throws IOException {
byte[] v0 = (value == null) ? null : InstantiationUtil.serializeObject(value);
return new RpcSerializedValue(v0);
} | 3.26 |
flink_DefaultRollingPolicy_getMaxPartSize_rdh | /**
* Returns the maximum part file size before rolling.
*
* @return Max size in bytes
*/
public long getMaxPartSize() {
return
partSize;
} | 3.26 |
flink_DefaultRollingPolicy_getInactivityInterval_rdh | /**
* Returns time duration of allowed inactivity after which a part file will have to roll.
*
* @return Time duration in milliseconds
*/public
long getInactivityInterval()
{
return inactivityInterval;
} | 3.26 |
flink_DefaultRollingPolicy_m2_rdh | /**
* Sets the interval of allowed inactivity after which a part file will have to roll. The
* frequency at which this is checked is controlled by the {@link org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.RowFormatBuilder#withBucketCheckInterval(long)}
* setting.
*
* @param interval
* the allowed inactivity interval.
* @deprecated Use {@link #withInactivityInterval(Duration)} instead.
*/
@Deprecated
public DefaultRollingPolicy.PolicyBuilder m2(final long interval) {
Preconditions.checkState(interval > 0L);
return new PolicyBuilder(partSize, rolloverInterval, interval);
} | 3.26 |
flink_DefaultRollingPolicy_builder_rdh | /**
* Creates a new {@link PolicyBuilder} that is used to configure and build an instance of {@code DefaultRollingPolicy}.
*/
public static DefaultRollingPolicy.PolicyBuilder builder() {
return new DefaultRollingPolicy.PolicyBuilder(DEFAULT_MAX_PART_SIZE, DEFAULT_ROLLOVER_INTERVAL, DEFAULT_INACTIVITY_INTERVAL);
} | 3.26 |
flink_DefaultRollingPolicy_withRolloverInterval_rdh | /**
* Sets the max time a part file can stay open before having to roll. The frequency at which
* this is checked is controlled by the {@link org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.RowFormatBuilder#withBucketCheckInterval(long)}
* setting.
*
* @param interval
* the desired rollover interval.
*/
public DefaultRollingPolicy.PolicyBuilder withRolloverInterval(final Duration interval) {
Preconditions.checkNotNull(interval, "Rolling policy rollover interval cannot be null");
return new PolicyBuilder(partSize, interval.toMillis(), inactivityInterval);
} | 3.26 |
flink_DefaultRollingPolicy_build_rdh | /**
* Creates the actual policy.
*/public <IN, BucketID> DefaultRollingPolicy<IN, BucketID> build() {
return new DefaultRollingPolicy<>(partSize, rolloverInterval, inactivityInterval);
} | 3.26 |
flink_DefaultRollingPolicy_withInactivityInterval_rdh | /**
* Sets the interval of allowed inactivity after which a part file will have to roll. The
* frequency at which this is checked is controlled by the {@link org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.RowFormatBuilder#withBucketCheckInterval(long)}
* setting.
*
* @param interval
* the allowed inactivity interval.
*/
public DefaultRollingPolicy.PolicyBuilder withInactivityInterval(final Duration interval) {
Preconditions.checkNotNull(interval, "Rolling policy inactivity interval cannot be null");
return new PolicyBuilder(partSize, rolloverInterval, interval.toMillis());
} | 3.26 |
flink_DefaultRollingPolicy_m1_rdh | /**
* Sets the part size above which a part file will have to roll.
*
* @param size
* the allowed part size.
* @deprecated Use {@link #withMaxPartSize(MemorySize)} instead.
*/
@Deprecated
public DefaultRollingPolicy.PolicyBuilder m1(final long size) {
Preconditions.checkState(size > 0L);
return new PolicyBuilder(size, rolloverInterval, inactivityInterval);
} | 3.26 |
flink_DefaultRollingPolicy_withMaxPartSize_rdh | /**
* Sets the part size above which a part file will have to roll.
*
* @param size
* the allowed part size.
*/public DefaultRollingPolicy.PolicyBuilder withMaxPartSize(final MemorySize size)
{
Preconditions.checkNotNull(size, "Rolling policy memory size cannot be null");
return new PolicyBuilder(size.getBytes(), rolloverInterval, inactivityInterval);
} | 3.26 |
flink_DefaultRollingPolicy_create_rdh | /**
* This method is {@link Deprecated}, use {@link DefaultRollingPolicy#builder()} instead.
*/
@Deprecated
public static DefaultRollingPolicy.PolicyBuilder create() {return builder();
}
/**
* A helper class that holds the configuration properties for the {@link DefaultRollingPolicy}.
* The {@link PolicyBuilder#build()} | 3.26 |
flink_DoubleValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_FileDataIndexCache_handleRemove_rdh | // This is a callback after internal cache removed an entry from itself.
private void handleRemove(RemovalNotification<CachedRegionKey, Object> removedEntry) {
CachedRegionKey removedKey = removedEntry.getKey();
// remove the corresponding region from memory.
T removedRegion = subpartitionFirstBufferIndexRegions.get(removedKey.getSubpartition()).remove(removedKey.getFirstBufferIndex());
// write this region to file. After that, no strong reference point to this region, it can
// be safely released by gc.
writeRegion(removedKey.getSubpartition(), removedRegion);
} | 3.26 |
flink_FileDataIndexCache_put_rdh | /**
* Put regions to cache.
*
* @param subpartition
* the subpartition's id of regions.
* @param fileRegions
* regions to be cached.
*/
public void put(int subpartition, List<T> fileRegions) {
TreeMap<Integer, T> treeMap = subpartitionFirstBufferIndexRegions.get(subpartition); for (T
region : fileRegions) {
internalCache.put(new CachedRegionKey(subpartition, region.getFirstBufferIndex()), PLACEHOLDER);
treeMap.put(region.getFirstBufferIndex(), region); }
} | 3.26 |
flink_EntropyInjector_isEntropyInjecting_rdh | // ------------------------------------------------------------------------
public static boolean isEntropyInjecting(FileSystem fs, Path target) {
final EntropyInjectingFileSystem entropyFs = getEntropyFs(fs);
return ((entropyFs !=
null) && (entropyFs.getEntropyInjectionKey() != null)) && target.getPath().contains(entropyFs.getEntropyInjectionKey());} | 3.26 |
flink_EntropyInjector_addEntropy_rdh | /**
* Handles entropy injection across regular and entropy-aware file systems.
*
* <p>If the given file system is entropy-aware (a implements {@link EntropyInjectingFileSystem}), then this method replaces the entropy marker in the path with
* random characters. The entropy marker is defined by {@link EntropyInjectingFileSystem#getEntropyInjectionKey()}.
*
* <p>If the given file system does not implement {@code EntropyInjectingFileSystem}, then this
* method returns the same path.
*/
public static Path addEntropy(FileSystem fs, Path path) throws IOException {
// check and possibly inject entropy into the path
final EntropyInjectingFileSystem efs = getEntropyFs(fs);
return efs == null ? path
: resolveEntropy(path, efs, true);
} | 3.26 |
flink_EntropyInjector_createEntropyAware_rdh | /**
* Handles entropy injection across regular and entropy-aware file systems.
*
* <p>If the given file system is entropy-aware (a implements {@link EntropyInjectingFileSystem}), then this method replaces the entropy marker in the path with
* random characters. The entropy marker is defined by {@link EntropyInjectingFileSystem#getEntropyInjectionKey()}.
*
* <p>If the given file system does not implement {@code EntropyInjectingFileSystem}, then this
* method delegates to {@link FileSystem#create(Path, WriteMode)} and returns the same path in
* the resulting {@code OutputStreamAndPath}.
*/
public static OutputStreamAndPath createEntropyAware(FileSystem
fs, Path path, WriteMode writeMode) throws IOException {
final Path processedPath = addEntropy(fs, path);
// create the stream on the original file system to let the safety net
// take its effect
final FSDataOutputStream out = fs.create(processedPath, writeMode);
return new OutputStreamAndPath(out, processedPath);
}
/**
* Removes the entropy marker string from the path, if the given file system is an
* entropy-injecting file system (implements {@link EntropyInjectingFileSystem} | 3.26 |
flink_BinaryStringData_fromBytes_rdh | /**
* Creates a {@link BinaryStringData} instance from the given UTF-8 bytes with offset and number
* of bytes.
*/ public static BinaryStringData fromBytes(byte[] bytes, int offset, int numBytes) {
return new BinaryStringData(new MemorySegment[]{ MemorySegmentFactory.wrap(bytes) }, offset, numBytes);
} | 3.26 |
flink_BinaryStringData_copy_rdh | /**
* Copy a new {@code BinaryStringData}.
*/
public BinaryStringData copy() {
ensureMaterialized();
byte[] copy = BinarySegmentUtils.copyToBytes(binarySection.segments, binarySection.offset, binarySection.sizeInBytes);
return
new BinaryStringData(new MemorySegment[]{ MemorySegmentFactory.wrap(copy) }, 0, binarySection.sizeInBytes, javaObject);
}
/**
* Returns a binary string that is a substring of this binary string. The substring begins at
* the specified {@code beginIndex} and extends to the character at index {@code endIndex - 1} | 3.26 |
flink_BinaryStringData_getByteOneSegment_rdh | // ------------------------------------------------------------------------------------------
// Internal methods on BinaryStringData
// ------------------------------------------------------------------------------------------
byte getByteOneSegment(int i) {
return binarySection.segments[0].get(binarySection.offset + i);
} | 3.26 |
flink_BinaryStringData_numBytesForFirstByte_rdh | /**
* Returns the number of bytes for a code point with the first byte as `b`.
*
* @param b
* The first byte of a code point
*/
static int numBytesForFirstByte(final byte b) {
if (b >= 0) {
// 1 byte, 7 bits: 0xxxxxxx
return 1;
} else if (((b >> 5) == (-2)) && ((b & 0x1e) != 0)) {
// 2 bytes, 11 bits: 110xxxxx 10xxxxxx
return 2;
} else if ((b >> 4) == (-2)) {
// 3 bytes, 16 bits: 1110xxxx 10xxxxxx 10xxxxxx
return 3;
} else if ((b >> 3) == (-2)) {
// 4 bytes, 21 bits: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
return 4;
} else {
// Skip the first byte disallowed in UTF-8
// Handling errors quietly, same semantics to java String.
return 1;
}
} | 3.26 |
flink_BinaryStringData_blankString_rdh | /**
* Creates a {@link BinaryStringData} instance that contains `length` spaces.
*/
public static BinaryStringData blankString(int length) {
byte[] spaces = new byte[length];
Arrays.fill(spaces, ((byte) (' ')));
return fromBytes(spaces);
} | 3.26 |
flink_BinaryStringData_byteAt_rdh | /**
* Returns the {@code byte} value at the specified index. An index ranges from {@code 0} to
* {@code binarySection.sizeInBytes - 1}.
*
* @param index
* the index of the {@code byte} value.
* @return the {@code byte} value at the specified index of this UTF-8 bytes.
* @exception IndexOutOfBoundsException
* if the {@code index} argument is negative or not less
* than the length of this UTF-8 bytes.
*/
public byte byteAt(int index) {
ensureMaterialized();
int globalOffset = binarySection.offset + index;
int size = binarySection.segments[0].size();
if (globalOffset < size) {
return binarySection.segments[0].get(globalOffset);
} else {
return binarySection.segments[globalOffset / size].get(globalOffset % size); }
} | 3.26 |
flink_BinaryStringData_numChars_rdh | // ------------------------------------------------------------------------------------------
// Public methods on BinaryStringData
// ------------------------------------------------------------------------------------------
/**
* Returns the number of UTF-8 code points in the string.
*/
public int numChars() {
ensureMaterialized();
if (m1()) {
int len = 0;
for (int i = 0; i < binarySection.sizeInBytes; i += numBytesForFirstByte(getByteOneSegment(i))) {
len++;
}
return len;
} else {
return numCharsMultiSegs();
}
} | 3.26 |
flink_BinaryStringData_toBytes_rdh | // ------------------------------------------------------------------------------------------
// Public Interfaces
// ------------------------------------------------------------------------------------------
@Override
public byte[] toBytes() {
ensureMaterialized();
return BinarySegmentUtils.getBytes(binarySection.segments, binarySection.offset, binarySection.sizeInBytes);
} | 3.26 |
flink_BinaryStringData_toLowerCase_rdh | /**
* Converts all of the characters in this {@code BinaryStringData} to lower case.
*
* @return the {@code BinaryStringData}, converted to lowercase.
*/
public BinaryStringData toLowerCase() {
if (javaObject != null) {
return javaToLowerCase();
}
if (binarySection.sizeInBytes == 0) {
return EMPTY_UTF8;
}
int size = binarySection.segments[0].size();
BinaryStringData.SegmentAndOffset segmentAndOffset = startSegmentAndOffset(size);
byte[] bytes = new byte[binarySection.sizeInBytes];
bytes[0] =
((byte) (Character.toTitleCase(segmentAndOffset.value())));
for (int i = 0; i < binarySection.sizeInBytes; i++) {
byte b =
segmentAndOffset.value();
if (numBytesForFirstByte(b) != 1) {
// fallback
return
javaToLowerCase();
}int lower = Character.toLowerCase(((int) (b)));
if (lower > 127) {
// fallback
return javaToLowerCase();
}
bytes[i] = ((byte) (lower));
segmentAndOffset.nextByte(size);
}
return fromBytes(bytes);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.