name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_GenericInputFormat_open_rdh
|
// --------------------------------------------------------------------------------------------
@Overridepublic void open(GenericInputSplit split) throws IOException {
this.partitionNumber = split.getSplitNumber();
}
| 3.26 |
flink_ActiveResourceManager_m0_rdh
|
/**
* Allocates a resource using the worker resource specification.
*
* @param workerResourceSpec
* workerResourceSpec specifies the size of the to be allocated
* resource
*/
@VisibleForTesting
public void m0(WorkerResourceSpec workerResourceSpec) {
final TaskExecutorProcessSpec taskExecutorProcessSpec = TaskExecutorProcessUtils.processSpecFromWorkerResourceSpec(flinkConfig, workerResourceSpec);
final int pendingCount = pendingWorkerCounter.increaseAndGet(workerResourceSpec);
totalWorkerCounter.increaseAndGet(workerResourceSpec);
log.info("Requesting new worker with resource spec {}, current pending count: {}.", workerResourceSpec, pendingCount);
final CompletableFuture<WorkerType> requestResourceFuture = resourceManagerDriver.requestResource(taskExecutorProcessSpec);
unallocatedWorkerFutures.put(requestResourceFuture, workerResourceSpec);
FutureUtils.assertNoException(requestResourceFuture.handle((worker, exception) -> {
unallocatedWorkerFutures.remove(requestResourceFuture);
if (exception != null) {
final int count = pendingWorkerCounter.decreaseAndGet(workerResourceSpec);
totalWorkerCounter.decreaseAndGet(workerResourceSpec);
if (exception instanceof CancellationException) {
log.info("Requesting worker with resource spec {} canceled, current pending count: {}", workerResourceSpec, count);
} else {
log.warn("Failed requesting worker with resource spec {}, current pending count: {}", workerResourceSpec, count, exception);
recordWorkerFailureAndPauseWorkerCreationIfNeeded();
checkResourceDeclarations();
}
} else {
final ResourceID resourceId = worker.getResourceID();
workerNodeMap.put(resourceId, worker);
workerResourceSpecs.put(resourceId, workerResourceSpec);
currentAttemptUnregisteredWorkers.add(resourceId);
scheduleWorkerRegistrationTimeoutCheck(resourceId);
log.info("Requested worker {} with resource spec {}.", resourceId.getStringWithMetadata(), workerResourceSpec);
}
return null;
}));
}
| 3.26 |
flink_ActiveResourceManager_onPreviousAttemptWorkersRecovered_rdh
|
// ------------------------------------------------------------------------
// ResourceEventListener
// ------------------------------------------------------------------------
@Override
public void onPreviousAttemptWorkersRecovered(Collection<WorkerType> recoveredWorkers) {
getMainThreadExecutor().assertRunningInMainThread();
log.info("Recovered {} workers from previous attempt.", recoveredWorkers.size());
for (WorkerType worker : recoveredWorkers) {
final ResourceID resourceId = worker.getResourceID();
workerNodeMap.put(resourceId, worker);previousAttemptUnregisteredWorkers.add(resourceId);
scheduleWorkerRegistrationTimeoutCheck(resourceId);
log.info("Worker {} recovered from previous attempt.", resourceId.getStringWithMetadata());
}
if ((recoveredWorkers.size() > 0) && (!f1.isZero())) { scheduleRunAsync(() -> {readyToServeFuture.complete(null);log.info("Timeout to wait recovery taskmanagers, recovery future is completed.");
}, f1.toMillis(), TimeUnit.MILLISECONDS);} else {
readyToServeFuture.complete(null);
}
}
| 3.26 |
flink_ActiveResourceManager_m3_rdh
|
// ------------------------------------------------------------------------
// Testing
// ------------------------------------------------------------------------
@VisibleForTesting
<T> CompletableFuture<T> m3(Callable<T> callable, Time timeout) {
return callAsync(callable,
TimeUtils.toDuration(timeout));
}
| 3.26 |
flink_ActiveResourceManager_recordStartWorkerFailure_rdh
|
/**
* Record failure number of starting worker in ResourceManagers. Return whether maximum failure
* rate is reached.
*
* @return whether max failure rate is reached
*/private boolean recordStartWorkerFailure() {
f0.markEvent();
try {
f0.checkAgainstThreshold();
} catch (ThresholdMeter.ThresholdExceedException e) {
log.warn("Reaching max start worker failure rate: {}", e.getMessage());
return true;
}
return false;
}
| 3.26 |
flink_ActiveResourceManager_initialize_rdh
|
// ------------------------------------------------------------------------
// ResourceManager
// ------------------------------------------------------------------------
@Override
protected void initialize() throws ResourceManagerException {
try {
resourceManagerDriver.initialize(this, new GatewayMainThreadExecutor(), ioExecutor, blocklistHandler::getAllBlockedNodeIds);
} catch (Exception e) {
throw new ResourceManagerException("Cannot initialize resource provider.", e);
}
}
| 3.26 |
flink_ActiveResourceManager_checkResourceDeclarations_rdh
|
// ------------------------------------------------------------------------
// Internal
// ------------------------------------------------------------------------
private void checkResourceDeclarations() {
validateRunsInMainThread();
for (ResourceDeclaration resourceDeclaration : resourceDeclarations) {
WorkerResourceSpec workerResourceSpec = resourceDeclaration.getSpec();int declaredWorkerNumber = resourceDeclaration.getNumNeeded();
final int releaseOrRequestWorkerNumber = totalWorkerCounter.getNum(workerResourceSpec) - declaredWorkerNumber;
if (releaseOrRequestWorkerNumber > 0) {
log.info("need release {} workers, current worker number {}, declared worker number {}", releaseOrRequestWorkerNumber, totalWorkerCounter.getNum(workerResourceSpec), declaredWorkerNumber);// release unwanted workers.
int remainingReleasingWorkerNumber = releaseUnWantedResources(resourceDeclaration.getUnwantedWorkers(),
releaseOrRequestWorkerNumber);
if (remainingReleasingWorkerNumber > 0) {
// release not allocated workers
remainingReleasingWorkerNumber = releaseUnallocatedWorkers(workerResourceSpec, remainingReleasingWorkerNumber);
}
if (remainingReleasingWorkerNumber > 0) {
// release starting workers
remainingReleasingWorkerNumber = releaseAllocatedWorkers(currentAttemptUnregisteredWorkers, workerResourceSpec, remainingReleasingWorkerNumber);
}
if (remainingReleasingWorkerNumber > 0) {
// release registered workers
remainingReleasingWorkerNumber = releaseAllocatedWorkers(workerNodeMap.keySet(), workerResourceSpec, remainingReleasingWorkerNumber);
}
checkState(remainingReleasingWorkerNumber == 0, "there are no more workers to release");
} else if (releaseOrRequestWorkerNumber < 0) {
// In case of start worker failures, we should wait for an interval before
// trying to start new workers.
// Otherwise, ActiveResourceManager will always re-requesting the worker,
// which keeps the main thread busy.
if (startWorkerCoolDown.isDone()) {
int requestWorkerNumber = -releaseOrRequestWorkerNumber;
log.info("need request {} new workers, current worker number {}, declared worker number {}", requestWorkerNumber, totalWorkerCounter.getNum(workerResourceSpec), declaredWorkerNumber);
for (int i = 0; i < requestWorkerNumber; i++) {
m0(workerResourceSpec);
}} else {startWorkerCoolDown.thenRun(this::checkResourceDeclarations);
}
} else {
log.debug("current worker number {} meets the declared worker {}", totalWorkerCounter.getNum(workerResourceSpec), declaredWorkerNumber);
}
}}
| 3.26 |
flink_BigIntParser_parseField_rdh
|
/**
* Static utility to parse a field of type BigInteger from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final BigInteger parseField(byte[] bytes, int startPos, int length, char delimiter) {final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if ((limitedLen > 0) && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[(startPos + limitedLen) - 1])))
{
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return new BigInteger(str);
}
| 3.26 |
flink_StringValueUtils_m0_rdh
|
/**
* Gets the next token from the string. If another token is available, the token is stored
* in the given target StringValue object.
*
* @param target
* The StringValue object to store the next token in.
* @return True, if there was another token, false if not.
*/
public boolean m0(StringValue target) {
final
char[] data = this.toTokenize.getCharArray();
final int limit = this.limit;
int pos = this.pos;
// skip the delimiter
for (; (pos < limit) && Character.isWhitespace(data[pos]); pos++) {
}
if (pos >= limit) {
this.pos = pos;return false;
}
final int start = pos;
for (; (pos < limit) && (!Character.isWhitespace(data[pos])); pos++) {}
this.pos = pos;
target.setValue(this.toTokenize, start, pos - start);
return true;
}
| 3.26 |
flink_StringValueUtils_setStringToTokenize_rdh
|
/**
* Sets the string to be tokenized and resets the state of the tokenizer.
*
* @param string
* The string value to be tokenized.
*/
public void setStringToTokenize(StringValue string) {
this.toTokenize = string;
this.pos = 0;
this.limit = string.length();
}
| 3.26 |
flink_StringValueUtils_replaceNonWordChars_rdh
|
/**
* Replaces all non-word characters in a string by a given character. The only characters not
* replaced are the characters that qualify as word characters or digit characters with respect
* to {@link Character#isLetter(char)} or {@link Character#isDigit(char)}, as well as the
* underscore character.
*
* <p>This operation is intended to simplify strings for counting distinct words.
*
* @param string
* The string value to have the non-word characters replaced.
* @param replacement
* The character to use as the replacement.
*/
public static void replaceNonWordChars(StringValue string, char replacement) {
final char[] chars = string.getCharArray();
final int len = string.length();
for (int i = 0; i < len; i++) {
final
char c = chars[i];
if (!((Character.isLetter(c) || Character.isDigit(c))
|| (c ==
'_'))) {
chars[i] = replacement;
}
}
}
| 3.26 |
flink_StringValueUtils_toLowerCase_rdh
|
/**
* Converts the given <code>StringValue</code> into a lower case variant.
*
* @param string
* The string to convert to lower case.
*/
public static void toLowerCase(StringValue string) {
final char[] v0 = string.getCharArray();
final int len = string.length();
for (int i = 0; i < len; i++) {
v0[i] = Character.toLowerCase(v0[i]);
}
}
| 3.26 |
flink_RestartBackoffTimeStrategyFactoryLoader_createRestartBackoffTimeStrategyFactory_rdh
|
/**
* Creates {@link RestartBackoffTimeStrategy.Factory} from the given configuration.
*
* <p>The strategy factory is decided in order as follows:
*
* <ol>
* <li>Strategy set within job graph, i.e. {@link RestartStrategies.RestartStrategyConfiguration}, unless the config is {@link RestartStrategies.FallbackRestartStrategyConfiguration}.
* <li>Strategy set in the cluster(server-side) config (flink-conf.yaml), unless the strategy
* is not specified
* <li>{@link FixedDelayRestartBackoffTimeStrategy.FixedDelayRestartBackoffTimeStrategyFactory} if
* checkpointing is enabled. Otherwise {@link NoRestartBackoffTimeStrategy.NoRestartBackoffTimeStrategyFactory}
* </ol>
*
* @param jobRestartStrategyConfiguration
* restart configuration given within the job graph
* @param clusterConfiguration
* cluster(server-side) configuration
* @param isCheckpointingEnabled
* if checkpointing is enabled for the job
* @return new version restart strategy factory
*/
public static Factory createRestartBackoffTimeStrategyFactory(final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration, final Configuration clusterConfiguration, final boolean isCheckpointingEnabled) {
checkNotNull(jobRestartStrategyConfiguration);
checkNotNull(clusterConfiguration);
return getJobRestartStrategyFactory(jobRestartStrategyConfiguration).orElse(getClusterRestartStrategyFactory(clusterConfiguration).orElse(getDefaultRestartStrategyFactory(isCheckpointingEnabled)));
}
| 3.26 |
flink_UnmodifiableConfiguration_m0_rdh
|
// --------------------------------------------------------------------------------------------
// All mutating methods must fail
// --------------------------------------------------------------------------------------------
@Override
public void m0(Properties props) {
// override to make the UnmodifiableConfigurationTest happy
super.addAllToProperties(props);
}
| 3.26 |
flink_KeyedOperatorTransformation_transform_rdh
|
/**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory
* A factory returning transformation logic type of the return stream
* @return An {@link BootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public BootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) {
return new BootstrapTransformation<>(dataSet, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
}
| 3.26 |
flink_KeyedOperatorTransformation_window_rdh
|
/**
* Windows this transformation into a {@code WindowedOperatorTransformation}, which bootstraps
* state that can be restored by a {@code WindowOperator}. Elements are put into windows by a
* {@link WindowAssigner}. The grouping of elements is done both by key and by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger} that is used if a {@code Trigger} is not specified.
*
* @param assigner
* The {@code WindowAssigner} that assigns elements to windows.
*/
public <W extends Window> WindowedOperatorTransformation<T, K, W> window(WindowAssigner<? super T, W> assigner) {
return new WindowedOperatorTransformation<>(dataSet, operatorMaxParallelism, timestamper, keySelector, keyType, assigner);
}
| 3.26 |
flink_ContinuousProcessingTimeTrigger_m0_rdh
|
/**
* Creates a trigger that continuously fires based on the given interval.
*
* @param interval
* The time interval at which to fire.
* @param <W>
* The type of {@link Window Windows} on which this trigger can operate.
*/public static <W extends Window> ContinuousProcessingTimeTrigger<W> m0(Time interval) {
return new ContinuousProcessingTimeTrigger<>(interval.toMilliseconds());
}
| 3.26 |
flink_AbstractOneInputTransformationTranslator_translateInternal_rdh
|
/**
* A utility base class for one input {@link Transformation transformations} that provides a
* function for configuring common graph properties.
*/
abstract class AbstractOneInputTransformationTranslator<IN, OUT, OP extends Transformation<OUT>> extends SimpleTransformationTranslator<OUT, OP> {protected Collection<Integer> translateInternal(final Transformation<OUT> transformation, final StreamOperatorFactory<OUT> operatorFactory, final TypeInformation<IN> inputType, @Nullable
final KeySelector<IN, ?> stateKeySelector, @Nullable
final TypeInformation<?> stateKeyType, final Context context) {
checkNotNull(transformation);
checkNotNull(operatorFactory);
checkNotNull(inputType);
checkNotNull(context);
final StreamGraph streamGraph = context.getStreamGraph();
final String slotSharingGroup = context.getSlotSharingGroup();
final int transformationId = transformation.getId();
final ExecutionConfig executionConfig = streamGraph.getExecutionConfig();
streamGraph.addOperator(transformationId, slotSharingGroup, transformation.getCoLocationGroupKey(), operatorFactory, inputType, transformation.getOutputType(), transformation.getName());
if (stateKeySelector != null) {
TypeSerializer<?> keySerializer = stateKeyType.createSerializer(executionConfig);
streamGraph.setOneInputStateKey(transformationId, stateKeySelector, keySerializer);
}
int parallelism = (transformation.getParallelism() != ExecutionConfig.PARALLELISM_DEFAULT) ? transformation.getParallelism() : executionConfig.getParallelism();
streamGraph.setParallelism(transformationId, parallelism, transformation.isParallelismConfigured());
streamGraph.setMaxParallelism(transformationId, transformation.getMaxParallelism());
final List<Transformation<?>> parentTransformations = transformation.getInputs();
checkState(parentTransformations.size() == 1, "Expected exactly one input transformation but found " + parentTransformations.size());
for (Integer inputId : context.getStreamNodeIds(parentTransformations.get(0))) {
streamGraph.addEdge(inputId, transformationId, 0);
}
if (transformation instanceof PhysicalTransformation) {
streamGraph.setSupportsConcurrentExecutionAttempts(transformationId, ((PhysicalTransformation<OUT>) (transformation)).isSupportsConcurrentExecutionAttempts());
}
return Collections.singleton(transformationId);
}
}
| 3.26 |
flink_OverWindowPartitioned_orderBy_rdh
|
/**
* Specifies the time attribute on which rows are ordered.
*
* <p>For streaming tables, reference a rowtime or proctime time attribute here to specify the
* time mode.
*
* <p>For batch tables, refer to a timestamp or long attribute.
*
* @param orderBy
* field reference
* @return an over window with defined order
*/
public OverWindowPartitionedOrdered orderBy(Expression orderBy) {
return new OverWindowPartitionedOrdered(partitionBy, orderBy);
}
| 3.26 |
flink_LocalDateComparator_compareSerializedLocalDate_rdh
|
// Static Helpers for Date Comparison
// --------------------------------------------------------------------------------------------
public static int compareSerializedLocalDate(DataInputView
firstSource, DataInputView secondSource, boolean ascendingComparison) throws IOException {
int cmp = firstSource.readInt() - secondSource.readInt();
if (cmp
== 0) {
cmp = firstSource.readByte() -
secondSource.readByte();
if (cmp == 0) {
cmp = firstSource.readByte() - secondSource.readByte();
}
}
return ascendingComparison ? cmp : -cmp;
}
| 3.26 |
flink_FunctionLookup_lookupBuiltInFunction_rdh
|
/**
* Helper method for looking up a built-in function.
*/
default ContextResolvedFunction lookupBuiltInFunction(BuiltInFunctionDefinition definition) {return lookupFunction(UnresolvedIdentifier.of(definition.getName())).orElseThrow(() -> new TableException(String.format("Required built-in function [%s] could not be found in any catalog.", definition.getName())));}
| 3.26 |
flink_GettingStartedExample_eval_rdh
|
// the 'eval()' method defines input and output types (reflectively extracted)
// and contains the runtime logic
public String eval(String street, String zipCode, String city) {
return (((normalize(street) + ", ") +
normalize(zipCode)) + ", ") + normalize(city);
}
| 3.26 |
flink_ShortValueComparator_supportsSerializationWithKeyNormalization_rdh
|
// --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean
supportsSerializationWithKeyNormalization() {
return false;
}
| 3.26 |
flink_JobClient_reportHeartbeat_rdh
|
/**
* The client reports the heartbeat to the dispatcher for aliveness.
*/
default void reportHeartbeat(long expiredTimestamp) {
}
| 3.26 |
flink_JobClient_stopWithSavepoint_rdh
|
/**
* Stops the associated job on Flink cluster.
*
* <p>Stopping works only for streaming programs. Be aware, that the job might continue to run
* for a while after sending the stop command, because after sources stopped to emit data all
* operators need to finish processing.
*
* @param advanceToEndOfEventTime
* flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* @param savepointDirectory
* directory the savepoint should be written to
* @return a {@link CompletableFuture} containing the path where the savepoint is located
* @deprecated pass the format explicitly
*/
@Deprecated
default CompletableFuture<String> stopWithSavepoint(boolean advanceToEndOfEventTime, @Nullable
String savepointDirectory) {
return stopWithSavepoint(advanceToEndOfEventTime, savepointDirectory, SavepointFormatType.DEFAULT);
}
| 3.26 |
flink_JobClient_triggerSavepoint_rdh
|
/**
* Triggers a savepoint for the associated job. The savepoint will be written to the given
* savepoint directory, or {@link org.apache.flink.configuration.CheckpointingOptions#SAVEPOINT_DIRECTORY} if it is null.
*
* @param savepointDirectory
* directory the savepoint should be written to
* @return a {@link CompletableFuture} containing the path where the savepoint is located
* @deprecated pass the format explicitly
*/
@Deprecated
default CompletableFuture<String> triggerSavepoint(@Nullable
String savepointDirectory) {
return triggerSavepoint(savepointDirectory, SavepointFormatType.DEFAULT);
}
| 3.26 |
flink_JobGraphJobInformation_copyJobGraph_rdh
|
/**
* Returns a copy of a jobGraph that can be mutated.
*/
public JobGraph copyJobGraph() {
return
InstantiationUtil.cloneUnchecked(jobGraph);
}
| 3.26 |
flink_PojoTestUtils_assertSerializedAsPojoWithoutKryo_rdh
|
/**
* Verifies that instances of the given class fulfill all conditions to be serialized with the
* {@link PojoSerializer}, as documented <a
* href="https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/datastream/fault-tolerance/serialization/types_serialization/#pojos">here</a>,
* without any field being serialized with Kryo.
*
* @param clazz
* class to analyze
* @param <T>
* class type
* @throws AssertionError
* if instances of the class cannot be serialized as a POJO or required
* Kryo for one or more fields
*/
public static <T> void assertSerializedAsPojoWithoutKryo(Class<T> clazz) throws AssertionError {
final ExecutionConfig executionConfig = new
ExecutionConfig();
executionConfig.disableGenericTypes(); final TypeInformation<T> typeInformation = TypeInformation.of(clazz);
final TypeSerializer<T> actualSerializer;
try {
actualSerializer = typeInformation.createSerializer(executionConfig);
} catch (UnsupportedOperationException e) {
throw new AssertionError(e);
}
assertThat(actualSerializer).withFailMessage("Instances of the class '%s' cannot be serialized as a POJO, but would use a '%s' instead. %n" + "Re-run this test with INFO logging enabled and check messages from the '%s' for possible reasons.", clazz.getSimpleName(), actualSerializer.getClass().getSimpleName(), TypeExtractor.class.getCanonicalName()).isInstanceOf(PojoSerializer.class);}
| 3.26 |
flink_PojoTestUtils_assertSerializedAsPojo_rdh
|
/**
* Verifies that instances of the given class fulfill all conditions to be serialized with the
* {@link PojoSerializer}, as documented <a
* href="https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/datastream/fault-tolerance/serialization/types_serialization/#pojos">here</a>.
*
* <p>Note that this check will succeed even if the Pojo is partially serialized with Kryo. If
* this is not desired, use {@link #assertSerializedAsPojoWithoutKryo(Class)} instead.
*
* @param clazz
* class to analyze
* @param <T>
* class type
* @throws AssertionError
* if instances of the class cannot be serialized as a POJO
*/
public static <T> void assertSerializedAsPojo(Class<T> clazz) throws AssertionError {
final TypeInformation<T> typeInformation = TypeInformation.of(clazz);
final TypeSerializer<T> actualSerializer = typeInformation.createSerializer(new ExecutionConfig());
assertThat(actualSerializer).withFailMessage("Instances of the class '%s' cannot be serialized as a POJO, but would use a '%s' instead. %n" + "Re-run this test with INFO logging enabled and check messages from the '%s' for possible reasons.", clazz.getSimpleName(), actualSerializer.getClass().getSimpleName(), TypeExtractor.class.getCanonicalName()).isInstanceOf(PojoSerializer.class);
}
| 3.26 |
flink_BinaryRowDataSerializer_getSerializedRowFixedPartLength_rdh
|
/**
* Return fixed part length to serialize one row.
*/
public int getSerializedRowFixedPartLength() {
return getFixedLengthPartSize() + LENGTH_SIZE_IN_BYTES;
}
| 3.26 |
flink_BinaryRowDataSerializer_copyFromPagesToView_rdh
|
/**
* Copy a binaryRow which stored in paged input view to output view.
*
* @param source
* source paged input view where the binary row stored
* @param target
* the target output view.
*/
public void copyFromPagesToView(AbstractPagedInputView source, DataOutputView target) throws IOException
{
checkSkipReadForFixLengthPart(source);
int length = source.readInt();
target.writeInt(length);target.write(source, length);
}
| 3.26 |
flink_BinaryRowDataSerializer_serializeToPages_rdh
|
// ============================ Page related operations ===================================
@Override
public int serializeToPages(BinaryRowData record, AbstractPagedOutputView
headerLessView) throws IOException {
checkArgument(headerLessView.getHeaderLength() == 0);
int skip = checkSkipWriteForFixLengthPart(headerLessView);
headerLessView.writeInt(record.getSizeInBytes());
serializeWithoutLength(record, headerLessView);
return skip;
}
| 3.26 |
flink_BinaryRowDataSerializer_checkSkipWriteForFixLengthPart_rdh
|
/**
* We need skip bytes to write when the remain bytes of current segment is not enough to write
* binary row fixed part. See {@link BinaryRowData}.
*/
private int checkSkipWriteForFixLengthPart(AbstractPagedOutputView out) throws IOException {
// skip if there is no enough size.
int v22 = out.getSegmentSize() - out.getCurrentPositionInSegment();
if (v22 < getSerializedRowFixedPartLength()) {
out.advance();
return v22;}
return 0;
}
| 3.26 |
flink_BinaryRowDataSerializer_pointTo_rdh
|
/**
* Point row to memory segments with offset(in the AbstractPagedInputView) and length.
*
* @param length
* row length.
* @param reuse
* reuse BinaryRowData object.
* @param headerLessView
* source memory segments container.
*/
public void pointTo(int length, BinaryRowData reuse, AbstractPagedInputView headerLessView) throws IOException {
checkArgument(headerLessView.getHeaderLength() == 0);
if (length < 0) {
throw new IOException(String.format("Read unexpected bytes in source of positionInSegment[%d] and limitInSegment[%d]",
headerLessView.getCurrentPositionInSegment(), headerLessView.getCurrentSegmentLimit()));
}
int remainInSegment = headerLessView.getCurrentSegmentLimit() - headerLessView.getCurrentPositionInSegment();
MemorySegment currSeg = headerLessView.getCurrentSegment();
int currPosInSeg = headerLessView.getCurrentPositionInSegment();
if (remainInSegment >= length) {
// all in one segment, that's good.
reuse.pointTo(currSeg, currPosInSeg, length);
headerLessView.skipBytesToRead(length);} else {
pointToMultiSegments(reuse, headerLessView, length, length - remainInSegment, currSeg, currPosInSeg);
}
}
| 3.26 |
flink_BinaryRowDataSerializer_checkSkipReadForFixLengthPart_rdh
|
/**
* We need skip bytes to read when the remain bytes of current segment is not enough to write
* binary row fixed part. See {@link BinaryRowData}.
*/
public void checkSkipReadForFixLengthPart(AbstractPagedInputView source) throws IOException {
// skip if there is no enough size.
// Note: Use currentSegmentLimit instead of segmentSize.
int available = source.getCurrentSegmentLimit() - source.getCurrentPositionInSegment();
if (available < getSerializedRowFixedPartLength()) {
source.advance();
}
}
| 3.26 |
flink_CopyOnWriteStateTable_stateSnapshot_rdh
|
// Snapshotting
// ----------------------------------------------------------------------------------------------------
/**
* Creates a snapshot of this {@link CopyOnWriteStateTable}, to be written in checkpointing.
*
* @return a snapshot from this {@link CopyOnWriteStateTable}, for checkpointing.
*/
@Nonnull
@Override
public CopyOnWriteStateTableSnapshot<K, N, S> stateSnapshot() {return new CopyOnWriteStateTableSnapshot<>(this, getKeySerializer().duplicate(), getNamespaceSerializer().duplicate(),
getStateSerializer().duplicate(),
getMetaInfo().getStateSnapshotTransformFactory().createForDeserializedState().orElse(null));
}
| 3.26 |
flink_PythonConfig_getLocalTimeZone_rdh
|
/**
* Returns the current session time zone id. It is used when converting to/from {@code TIMESTAMP
* WITH LOCAL TIME ZONE}.
*
* @see org.apache.flink.table.types.logical.LocalZonedTimestampType
*/
private static ZoneId getLocalTimeZone(ReadableConfig config) {String v3 = config.get(TableConfigOptions.LOCAL_TIME_ZONE);
return TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(v3) ? ZoneId.systemDefault() : ZoneId.of(v3);}
| 3.26 |
flink_GeneratorFunction_close_rdh
|
/**
* Tear-down method for the function.
*/
default void close()
throws Exception {
}
| 3.26 |
flink_BulkDecodingFormat_applyFilters_rdh
|
/**
* Provides a list of filters in conjunctive form for filtering on a best-effort basis.
*/
default void applyFilters(List<ResolvedExpression> filters) {
}
| 3.26 |
flink_ConfigurationParserUtils_loadCommonConfiguration_rdh
|
/**
* Generate configuration from only the config file and dynamic properties.
*
* @param args
* the commandline arguments
* @param cmdLineSyntax
* the syntax for this application
* @return generated configuration
* @throws FlinkParseException
* if the configuration cannot be generated
*/
public static Configuration loadCommonConfiguration(String[] args, String cmdLineSyntax) throws FlinkParseException {
final CommandLineParser<ClusterConfiguration> commandLineParser = new CommandLineParser<>(new ClusterConfigurationParserFactory());
final ClusterConfiguration clusterConfiguration;
try {
clusterConfiguration = commandLineParser.parse(args);
} catch (FlinkParseException e) {
LOG.error("Could not parse the command line options.", e);
commandLineParser.printHelp(cmdLineSyntax);
throw e;
}
final Configuration dynamicProperties = ConfigurationUtils.createConfiguration(clusterConfiguration.getDynamicProperties());
return GlobalConfiguration.loadConfiguration(clusterConfiguration.getConfigDir(), dynamicProperties);
}
| 3.26 |
flink_ConfigurationParserUtils_getPageSize_rdh
|
/**
* Parses the configuration to get the page size and validates the value.
*
* @param configuration
* configuration object
* @return size of memory segment
*/
public static int getPageSize(Configuration configuration) {
final int pageSize = checkedDownCast(configuration.get(TaskManagerOptions.MEMORY_SEGMENT_SIZE).getBytes());
// check page size of for minimum size
checkConfigParameter(pageSize >= MemoryManager.MIN_PAGE_SIZE, pageSize,
TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Minimum memory segment size is " + MemoryManager.MIN_PAGE_SIZE);
// check page size for power of two
checkConfigParameter(MathUtils.isPowerOf2(pageSize), pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Memory segment size must be a power of 2.");
return pageSize;
}
| 3.26 |
flink_ConfigurationParserUtils_checkConfigParameter_rdh
|
/**
* Validates a condition for a config parameter and displays a standard exception, if the
* condition does not hold.
*
* @param condition
* The condition that must hold. If the condition is false, an exception is
* thrown.
* @param parameter
* The parameter value. Will be shown in the exception message.
* @param name
* The name of the config parameter. Will be shown in the exception message.
* @param errorMessage
* The optional custom error message to append to the exception message.
* @throws IllegalConfigurationException
* if the condition does not hold
*/
public static void checkConfigParameter(boolean condition, Object parameter, String name, String errorMessage)
throws IllegalConfigurationException {
if (!condition) {
throw new IllegalConfigurationException((((("Invalid configuration value for " + name) + " : ") +
parameter) + " - ") + errorMessage);
}
}
| 3.26 |
flink_FileSystemCheckpointStorage_getMinFileSizeThreshold_rdh
|
/**
* Gets the threshold below which state is stored as part of the metadata, rather than in files.
* This threshold ensures that the backend does not create a large amount of very small files,
* where potentially the file pointers are larger than the state itself.
*
* <p>If not explicitly configured, this is the default value of {@link CheckpointingOptions#FS_SMALL_FILE_THRESHOLD}.
*
* @return The file size threshold, in bytes.
*/
public int getMinFileSizeThreshold() {
return fileStateThreshold >= 0 ? fileStateThreshold : MathUtils.checkedDownCast(FS_SMALL_FILE_THRESHOLD.defaultValue().getBytes());
}
| 3.26 |
flink_FileSystemCheckpointStorage_getSavepointPath_rdh
|
/**
*
* @return The default location where savepoints will be externalized if set.
*/
@Nullablepublic Path getSavepointPath() {
return location.getBaseSavepointPath();
}
| 3.26 |
flink_FileSystemCheckpointStorage_getCheckpointPath_rdh
|
/**
* Gets the base directory where all the checkpoints are stored. The job-specific checkpoint
* directory is created inside this directory.
*
* @return The base directory for checkpoints.
*/
@Nonnull
public Path getCheckpointPath() {
// we know that this can never be null by the way of constructor checks
// noinspection ConstantConditions
return location.getBaseCheckpointPath();
}
| 3.26 |
flink_FileSystemCheckpointStorage_m0_rdh
|
/**
* Gets the write buffer size for created checkpoint stream.
*
* <p>If not explicitly configured, this is the default value of {@link CheckpointingOptions#FS_WRITE_BUFFER_SIZE}.
*
* @return The write buffer size, in bytes.
*/
public int m0() {
return writeBufferSize >= 0 ? writeBufferSize : CheckpointingOptions.FS_WRITE_BUFFER_SIZE.defaultValue();
}
| 3.26 |
flink_FileSystemCheckpointStorage_createFromConfig_rdh
|
/**
* Creates a new {@link FileSystemCheckpointStorage} using the given configuration.
*
* @param config
* The Flink configuration (loaded by the TaskManager).
* @param classLoader
* The class loader that should be used to load the checkpoint storage.
* @return The created checkpoint storage.
* @throws IllegalConfigurationException
* If the configuration misses critical values, or
* specifies invalid values
*/
public static FileSystemCheckpointStorage createFromConfig(ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException {
// we need to explicitly read the checkpoint directory here, because that
// is a required constructor parameter
final String
checkpointDir = config.get(CheckpointingOptions.CHECKPOINTS_DIRECTORY);
if (checkpointDir == null) {
throw new IllegalConfigurationException((("Cannot create the file system state backend: The configuration does not specify the "
+ "checkpoint directory '") + CheckpointingOptions.CHECKPOINTS_DIRECTORY.key()) + '\'');
}
try {
return new FileSystemCheckpointStorage(checkpointDir).configure(config, classLoader);
} catch (IllegalArgumentException e) {
throw new IllegalConfigurationException("Invalid configuration for the state backend", e);
}
}
| 3.26 |
flink_FlinkMetricContainer_updateMetrics_rdh
|
/**
* Update Flink's internal metrics ({@link #flinkCounterCache}) with the latest metrics for a
* given step.
*/
private void updateMetrics(String stepName) {
MetricResults metricResults = asAttemptedOnlyMetricResults(metricsContainers);
MetricQueryResults metricQueryResults = metricResults.queryMetrics(MetricsFilter.builder().addStep(stepName).build());
updateCounterOrMeter(metricQueryResults.getCounters());
updateDistributions(metricQueryResults.getDistributions());
updateGauge(metricQueryResults.getGauges());
}
| 3.26 |
flink_BackendRestorerProcedure_createAndRestore_rdh
|
/**
* Creates a new state backend and restores it from the provided set of state snapshot
* alternatives.
*
* @param restoreOptions
* list of prioritized state snapshot alternatives for recovery.
* @return the created (and restored) state backend.
* @throws Exception
* if the backend could not be created or restored.
*/
@Nonnull
public T
createAndRestore(@Nonnull
List<? extends Collection<S>> restoreOptions) throws Exception {
if (restoreOptions.isEmpty()) {
restoreOptions = Collections.singletonList(Collections.emptyList());}
int alternativeIdx = 0;
Exception collectedException = null;
while (alternativeIdx < restoreOptions.size()) {
Collection<S> restoreState = restoreOptions.get(alternativeIdx);
++alternativeIdx;
// IMPORTANT: please be careful when modifying the log statements because they are used
// for validation in
// the automatic end-to-end tests. Those tests might fail if they are not aligned with
// the log message!
if (restoreState.isEmpty()) {
LOG.debug("Creating {} with empty state.", logDescription);
} else if (LOG.isTraceEnabled()) {
LOG.trace("Creating {} and restoring with state {} from alternative ({}/{}).", logDescription, restoreState, alternativeIdx, restoreOptions.size());
} else {
LOG.debug("Creating {} and restoring with state from alternative ({}/{}).", logDescription, alternativeIdx, restoreOptions.size());
}
try {
return attemptCreateAndRestore(restoreState);
} catch (Exception ex) {
collectedException = ExceptionUtils.firstOrSuppressed(ex, collectedException);
if (backendCloseableRegistry.isClosed()) {
throw new FlinkException("Stopping restore attempts for already cancelled task.", collectedException);
}
LOG.warn("Exception while restoring {} from alternative ({}/{}), will retry while more " + "alternatives are available.", logDescription, alternativeIdx, restoreOptions.size(), ex);
}
}
throw new FlinkException(((("Could not restore " + logDescription) + " from any of the ") + restoreOptions.size()) + " provided restore options.", collectedException);
}
| 3.26 |
flink_AbstractCachedBuildSideJoinDriver_isInputResettable_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public boolean isInputResettable(int inputNum) {
if ((inputNum < 0) || (inputNum > 1)) {
throw new IndexOutOfBoundsException();
}return inputNum == buildSideIndex;
}
| 3.26 |
flink_JsonFormatOptionsUtil_getMapNullKeyMode_rdh
|
/**
* Creates handling mode for null key map data.
*
* <p>See {@link #JSON_MAP_NULL_KEY_MODE_FAIL}, {@link #JSON_MAP_NULL_KEY_MODE_DROP}, and {@link #JSON_MAP_NULL_KEY_MODE_LITERAL} for more information.
*/
public static MapNullKeyMode getMapNullKeyMode(ReadableConfig config) {
String mapNullKeyMode = config.get(MAP_NULL_KEY_MODE);switch (mapNullKeyMode.toUpperCase()) {
case JSON_MAP_NULL_KEY_MODE_FAIL :
return MapNullKeyMode.FAIL;
case JSON_MAP_NULL_KEY_MODE_DROP :
return MapNullKeyMode.DROP;case JSON_MAP_NULL_KEY_MODE_LITERAL :
return MapNullKeyMode.LITERAL;
default :
throw new TableException(String.format("Unsupported map null key handling mode '%s'. Validator should have checked that.", mapNullKeyMode));
}
}
| 3.26 |
flink_JsonFormatOptionsUtil_getTimestampFormat_rdh
|
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
public static TimestampFormat getTimestampFormat(ReadableConfig config) {
String v0 = config.get(TIMESTAMP_FORMAT);
switch (v0) {
case SQL :
return TimestampFormat.SQL;
case ISO_8601 :
return TimestampFormat.ISO_8601;
default :
throw new TableException(String.format("Unsupported timestamp format '%s'. Validator should have checked that.", v0));
}
}
| 3.26 |
flink_JsonFormatOptionsUtil_validateTimestampFormat_rdh
|
/**
* Validates timestamp format which value should be SQL or ISO-8601.
*/
static void validateTimestampFormat(ReadableConfig tableOptions) {String timestampFormat =
tableOptions.get(TIMESTAMP_FORMAT);if (!TIMESTAMP_FORMAT_ENUM.contains(timestampFormat)) {
throw new ValidationException(String.format("Unsupported value '%s' for %s. Supported values are [SQL, ISO-8601].", timestampFormat, TIMESTAMP_FORMAT.key()));
}
}
| 3.26 |
flink_JsonFormatOptionsUtil_validateDecodingFormatOptions_rdh
|
// --------------------------------------------------------------------------------------------
// Validation
// --------------------------------------------------------------------------------------------
/**
* Validator for json decoding format.
*/
public static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
boolean failOnMissingField = tableOptions.get(FAIL_ON_MISSING_FIELD);
boolean ignoreParseErrors = tableOptions.get(IGNORE_PARSE_ERRORS);
if (ignoreParseErrors && failOnMissingField) {
throw new ValidationException(((FAIL_ON_MISSING_FIELD.key() + " and ") + IGNORE_PARSE_ERRORS.key()) + " shouldn't both be true.");}validateTimestampFormat(tableOptions);
}
| 3.26 |
flink_JsonFormatOptionsUtil_validateEncodingFormatOptions_rdh
|
/**
* Validator for json encoding format.
*/
public static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
// validator for {@link MAP_NULL_KEY_MODE}
Set<String>
nullKeyModes = Arrays.stream(JsonFormatOptions.MapNullKeyMode.values()).map(Objects::toString).collect(Collectors.toSet());if (!nullKeyModes.contains(tableOptions.get(MAP_NULL_KEY_MODE).toUpperCase())) {
throw new ValidationException(String.format("Unsupported value '%s' for option %s. Supported values are %s.", tableOptions.get(MAP_NULL_KEY_MODE), MAP_NULL_KEY_MODE.key(), nullKeyModes));
}
validateTimestampFormat(tableOptions);
}
| 3.26 |
flink_NodeId_m0_rdh
|
// ------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<NodeId> m0() {
return new NodeIdSerializerSnapshot(this);
}
| 3.26 |
flink_NodeId_readObject_rdh
|
// ------------------------------------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
if (eventIdSerializer ==
null) {
// the nested serializer will be null if this was read from a savepoint taken with
// versions
// lower than Flink 1.7; in this case, we explicitly create instance for the nested
// serializer.
this.eventIdSerializer = EventIdSerializer.INSTANCE;
}
}
| 3.26 |
flink_BoundedOutOfOrdernessWatermarks_onEvent_rdh
|
// ------------------------------------------------------------------------
@Override
public void onEvent(T event, long eventTimestamp, WatermarkOutput output) {
maxTimestamp = Math.max(maxTimestamp,
eventTimestamp);
}
| 3.26 |
flink_ClockService_of_rdh
|
/**
* Creates a {@link ClockService} from the given {@link InternalTimerService}.
*/
static ClockService of(InternalTimerService<?> timerService) {
return timerService::currentProcessingTime;
}
| 3.26 |
flink_ClockService_m0_rdh
|
/**
* Creates a {@link ClockService} which assigns as current processing time the result of calling
* {@link System#currentTimeMillis()}.
*/
static ClockService m0() {
return System::currentTimeMillis;}
| 3.26 |
flink_CheckedThread_sync_rdh
|
/**
* Waits with timeout until the thread is completed and checks whether any error occurred during
* the execution. In case of timeout an {@link Exception} is thrown.
*
* <p>This method blocks like {@link #join()}, but performs an additional check for exceptions
* thrown from the {@link #go()} method.
*/
public void sync(long timeout) throws Exception {
trySync(timeout);
checkFinished();
}
| 3.26 |
flink_CheckedThread_run_rdh
|
// ------------------------------------------------------------------------
/**
* This method is final - thread work should go into the {@link #go()} method instead.
*/
@Override
public final void run() {
try {
go();
} catch (Throwable t) {
error
= t;
}
}
| 3.26 |
flink_CheckedThread_trySync_rdh
|
/**
* Waits with timeout until the thread is completed and checks whether any error occurred during
* the execution.
*
* <p>This method blocks like {@link #join()}, but performs an additional check for exceptions
* thrown from the {@link #go()} method.
*/
public void trySync(long timeout) throws Exception {
join(timeout);
checkError();
}
| 3.26 |
flink_TableConnectorUtils_generateRuntimeName_rdh
|
/**
* Returns the table connector name used for logging and web UI.
*/public static String generateRuntimeName(Class<?> clazz, String[] fields) {
String className = clazz.getSimpleName();
if (null == fields) {
return className + "(*)";
} else {
return ((className + "(") + String.join(", ", fields)) + ")";
}
}
| 3.26 |
flink_HadoopBlockLocation_stripHostname_rdh
|
// ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
/**
* Looks for a domain suffix in a FQDN and strips it if present.
*
* @param originalHostname
* the original hostname, possibly an FQDN
* @return the stripped hostname without the domain suffix
*/
private static String stripHostname(final String originalHostname) {
// Check if the hostname domains the domain separator character
final int index = originalHostname.indexOf(DOMAIN_SEPARATOR);
if (index == (-1)) {
return originalHostname;
}
// Make sure we are not stripping an IPv4 address
final Matcher matcher
= IPV4_PATTERN.matcher(originalHostname);
if (matcher.matches()) {
return originalHostname;
}
if (index == 0) {
throw new IllegalStateException((("Hostname " + originalHostname) + " starts with a ") + DOMAIN_SEPARATOR);
}
return originalHostname.substring(0, index);
}
| 3.26 |
flink_Either_setValue_rdh
|
/**
* Sets the encapsulated value to another value
*
* @param value
* the new value of the encapsulated value
*/
public void setValue(R value) {
this.value = value;
}
| 3.26 |
flink_Either_m0_rdh
|
/**
* Creates a left value of {@link Either}
*/
public static <L, R> Left<L, R> m0(L left) {
return new Left<L, R>(left);
}
| 3.26 |
flink_Either_obtainLeft_rdh
|
/**
* Utility function for {@link EitherSerializer} to support object reuse.
*
* <p>To support object reuse both subclasses of Either contain a reference to an instance of
* the other type. This method provides access to and initializes the cross-reference.
*
* @param input
* container for Left or Right value
* @param leftSerializer
* for creating an instance of the left type
* @param <L>
* the type of Left
* @param <R>
* the type of Right
* @return input if Left type else input's Left reference
*/
@Internal
public static <L,
R> Left<L, R> obtainLeft(Either<L, R> input, TypeSerializer<L> leftSerializer) {
if (input.isLeft()) {
return ((Left<L, R>) (input));
} else {
Right<L, R> right = ((Right<L, R>) (input));
if (right.left == null) {right.left = Left.m0(leftSerializer.createInstance());
right.left.right = right; }return right.left;
}
}
/**
* Utility function for {@link EitherSerializer}
| 3.26 |
flink_Either_Left_rdh
|
/**
* Create a Left value of Either
*/
public static <L, R> Either<L, R> Left(L value) {
return new Left<L, R>(value);
}
| 3.26 |
flink_Either_Right_rdh
|
/**
* Create a Right value of Either
*/
public static <L, R> Either<L, R> Right(R value) {
return new Right<L, R>(value);
}
| 3.26 |
flink_Either_isLeft_rdh
|
/**
*
* @return true if this is a Left value, false if this is a Right value
*/
public final boolean isLeft() {
return getClass() == Either.Left.class;}
| 3.26 |
flink_KeyGroupRangeOffsets_setKeyGroupOffset_rdh
|
/**
* Sets the offset for the given key-group. The key-group must be contained in the range.
*
* @param keyGroup
* Key-group for which we set the offset. Must be contained in the range.
* @param offset
* Offset for the key-group.
*/
public void setKeyGroupOffset(int keyGroup, long offset) {
offsets[computeKeyGroupIndex(keyGroup)] = offset;
}
| 3.26 |
flink_KeyGroupRangeOffsets_getKeyGroupOffset_rdh
|
/**
* Returns the offset for the given key-group. The key-group must be contained in the range.
*
* @param keyGroup
* Key-group for which we query the offset. Key-group must be contained in the
* range.
* @return The offset for the given key-group which must be contained in the range.
*/
public long getKeyGroupOffset(int keyGroup) {
return offsets[computeKeyGroupIndex(keyGroup)];
}
| 3.26 |
flink_TimestampStringUtils_toLocalDateTime_rdh
|
/**
* Convert a calcite's {@link TimestampString} to a {@link LocalDateTime}.
*/
public static LocalDateTime toLocalDateTime(TimestampString timestampString) {
final String v = timestampString.toString();final int year = Integer.parseInt(v.substring(0, 4));
final int month = Integer.parseInt(v.substring(5, 7));
final int day = Integer.parseInt(v.substring(8, 10));
final int h = Integer.parseInt(v.substring(11, 13));
final int m = Integer.parseInt(v.substring(14, 16));
final int s = Integer.parseInt(v.substring(17, 19));
final int nano = getNanosInSecond(v);
return LocalDateTime.of(year, month, day, h, m, s, nano);
}
| 3.26 |
flink_TimestampStringUtils_fromLocalDateTime_rdh
|
/**
* Convert a {@link LocalDateTime} to a calcite's {@link TimestampString}.
*/
public static TimestampString fromLocalDateTime(LocalDateTime ldt) {
return new TimestampString(ldt.getYear(), ldt.getMonthValue(), ldt.getDayOfMonth(), ldt.getHour(), ldt.getMinute(), ldt.getSecond()).withNanos(ldt.getNano());
}
| 3.26 |
flink_KryoUtils_applyRegistrations_rdh
|
/**
* Apply a list of {@link KryoRegistration} to a Kryo instance. The list of registrations is
* assumed to already be a final resolution of all possible registration overwrites.
*
* <p>The registrations are applied in the given order and always specify the registration id,
* using the given {@code firstRegistrationId} and incrementing it for each registration.
*
* @param kryo
* the Kryo instance to apply the registrations
* @param resolvedRegistrations
* the registrations, which should already be resolved of all
* possible registration overwrites
* @param firstRegistrationId
* the first registration id to use
*/
public static void applyRegistrations(Kryo kryo, Collection<KryoRegistration> resolvedRegistrations, int firstRegistrationId) {
int v2 = firstRegistrationId;
Serializer<?> serializer;
for (KryoRegistration registration : resolvedRegistrations) {
serializer = registration.getSerializer(kryo);
if (serializer != null) {
kryo.register(registration.getRegisteredClass(), serializer, v2);
} else {
kryo.register(registration.getRegisteredClass(), v2);
}
// if Kryo already had a serializer for that type then it ignores the registration
if (kryo.getRegistration(v2) !=
null) {
v2++;
}}
}
| 3.26 |
flink_KryoUtils_m0_rdh
|
/**
* Tries to copy the given record from using the provided Kryo instance. If this fails, then the
* record from is copied by serializing it into a byte buffer and deserializing it from there.
*
* @param from
* Element to copy
* @param kryo
* Kryo instance to use
* @param serializer
* TypeSerializer which is used in case of a Kryo failure
* @param <T>
* Type of the element to be copied
* @return Copied element
*/
public static <T>
T m0(T from, Kryo kryo, TypeSerializer<T> serializer) {
try {
return kryo.copy(from);
} catch (KryoException ke) {
// Kryo could not copy the object --> try to serialize/deserialize the object
try {
byte[] byteArray = InstantiationUtil.serializeToByteArray(serializer, from);
return InstantiationUtil.deserializeFromByteArray(serializer, byteArray);
} catch (IOException ioe) {
throw new RuntimeException("Could not copy object by serializing/deserializing" + " it.", ioe);
}
}
}
| 3.26 |
flink_NettyShuffleMaster_computeShuffleMemorySizeForTask_rdh
|
/**
* JM announces network memory requirement from the calculating result of this method. Please
* note that the calculating algorithm depends on both I/O details of a vertex and network
* configuration, e.g. {@link NettyShuffleEnvironmentOptions#NETWORK_BUFFERS_PER_CHANNEL} and
* {@link NettyShuffleEnvironmentOptions#NETWORK_EXTRA_BUFFERS_PER_GATE}, which means we should
* always keep the consistency of configurations between JM, RM and TM in fine-grained resource
* management, thus to guarantee that the processes of memory announcing and allocating respect
* each other.
*/
@Override
public MemorySize computeShuffleMemorySizeForTask(TaskInputsOutputsDescriptor desc) {
checkNotNull(desc);
int numRequiredNetworkBuffers = NettyShuffleUtils.computeNetworkBuffersForAnnouncing(buffersPerInputChannel, floatingBuffersPerGate, maxRequiredBuffersPerGate, sortShuffleMinParallelism, sortShuffleMinBuffers, desc.getInputChannelNums(), desc.getPartitionReuseCount(), desc.getSubpartitionNums(), desc.getInputPartitionTypes(), desc.getPartitionTypes());
return new MemorySize(((long) (networkBufferSize)) * numRequiredNetworkBuffers);
}
| 3.26 |
flink_StatePathExtractor_getStateFilePathFromStreamStateHandle_rdh
|
/**
* This method recursively looks for the contained {@link FileStateHandle}s in a given {@link StreamStateHandle}.
*
* @param handle
* the {@code StreamStateHandle} to check for a contained {@code FileStateHandle}
* @return the file path if the given {@code StreamStateHandle} contains a {@code FileStateHandle} object, null otherwise
*/
@Nullable
private Path getStateFilePathFromStreamStateHandle(StreamStateHandle handle) {
if (handle instanceof FileStateHandle) {
return ((FileStateHandle) (handle)).getFilePath();
} else if (handle instanceof OperatorStateHandle) {
return getStateFilePathFromStreamStateHandle(((OperatorStateHandle) (handle)).getDelegateStateHandle());
} else if (handle instanceof KeyedStateHandle) {
if (handle instanceof
KeyGroupsStateHandle) {
return getStateFilePathFromStreamStateHandle(((KeyGroupsStateHandle) (handle)).getDelegateStateHandle());
}
// other KeyedStateHandles either do not contains FileStateHandle, or are not part of a
// savepoint
}
return null;
}
| 3.26 |
flink_SavepointLoader_loadSavepointMetadata_rdh
|
/**
* Takes the given string (representing a pointer to a checkpoint) and resolves it to a file
* status for the checkpoint's metadata file.
*
* <p>This should only be used when the user code class loader is the current classloader for
* the thread.
*
* @param savepointPath
* The path to an external savepoint.
* @return A state handle to savepoint's metadata.
* @throws IOException
* Thrown, if the path cannot be resolved, the file system not accessed, or
* the path points to a location that does not seem to be a savepoint.
*/ public static CheckpointMetadata loadSavepointMetadata(String savepointPath) throws IOException {
CompletedCheckpointStorageLocation location = AbstractFsCheckpointStorageAccess.resolveCheckpointPointer(savepointPath);
try (DataInputStream stream = new DataInputStream(location.getMetadataHandle().openInputStream())) {
return Checkpoints.loadCheckpointMetadata(stream, Thread.currentThread().getContextClassLoader(), savepointPath);
}
}
| 3.26 |
flink_InputFormatTableSource_isBounded_rdh
|
/**
* Always returns true which indicates this is a bounded source.
*/
@Override
public final boolean isBounded() {
return true;
}
| 3.26 |
flink_DefaultJobGraphStore_localCleanupAsync_rdh
|
/**
* Releases the locks on the specified {@link JobGraph}.
*
* <p>Releasing the locks allows that another instance can delete the job from the {@link JobGraphStore}.
*
* @param jobId
* specifying the job to release the locks for
* @param executor
* the executor being used for the asynchronous execution of the local cleanup.
* @returns The cleanup result future.
*/
@Override
public CompletableFuture<Void> localCleanupAsync(JobID jobId, Executor executor) {
checkNotNull(jobId, "Job ID");
return runAsyncWithLockAssertRunning(() -> {
LOG.debug("Releasing job graph {} from {}.", jobId, jobGraphStateHandleStore);jobGraphStateHandleStore.release(jobGraphStoreUtil.jobIDToName(jobId));
addedJobGraphs.remove(jobId);
LOG.info("Released job graph {} from {}.", jobId, jobGraphStateHandleStore);
}, executor);
}
| 3.26 |
flink_DefaultJobGraphStore_verifyIsRunning_rdh
|
/**
* Verifies that the state is running.
*/
private void verifyIsRunning() {
checkState(running, "Not running. Forgot to call start()?");
}
| 3.26 |
flink_WriteSinkFunction_invoke_rdh
|
/**
* Implementation of the invoke method of the SinkFunction class. Collects the incoming tuples
* in tupleList and appends the list to the end of the target file if updateCondition() is true
* or the current tuple is the endTuple.
*/
@Override
public void invoke(IN tuple) {
tupleList.add(tuple);
if (updateCondition()) {
format.write(path, tupleList);
resetParameters();
}
}
| 3.26 |
flink_WriteSinkFunction_cleanFile_rdh
|
/**
* Creates target file if it does not exist, cleans it if it exists.
*
* @param path
* is the path to the location where the tuples are written
*/
protected void cleanFile(String path) {
try {
PrintWriter writer;
writer = new PrintWriter(path);
writer.print("");
writer.close();
} catch (FileNotFoundException e)
{
throw new RuntimeException("An error occurred while cleaning the file: " + e.getMessage(), e);
}
}
| 3.26 |
flink_QueryableStateConfiguration_getProxyPortRange_rdh
|
// ------------------------------------------------------------------------
/**
* Returns the port range where the queryable state client proxy can listen. See {@link org.apache.flink.configuration.QueryableStateOptions#PROXY_PORT_RANGE
* QueryableStateOptions.PROXY_PORT_RANGE}.
*/
public Iterator<Integer> getProxyPortRange() {
return proxyPortRange;
}
| 3.26 |
flink_QueryableStateConfiguration_m0_rdh
|
/**
* Returns the number of query threads for the queryable state client proxy.
*/
public int m0() {
return numPQueryThreads;
}
| 3.26 |
flink_QueryableStateConfiguration_fromConfiguration_rdh
|
/**
* Creates the {@link QueryableStateConfiguration} from the given Configuration.
*/
public static QueryableStateConfiguration fromConfiguration(Configuration config) {
if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) {
return null;
}
final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(config.getString(QueryableStateOptions.PROXY_PORT_RANGE));
final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(config.getString(QueryableStateOptions.SERVER_PORT_RANGE));
final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS);
final
int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS);
final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS);
final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS);
return new QueryableStateConfiguration(proxyPorts,
serverPorts, numProxyServerNetworkThreads, numProxyServerQueryThreads, numStateServerNetworkThreads, numStateServerQueryThreads);}
| 3.26 |
flink_QueryableStateConfiguration_disabled_rdh
|
// ------------------------------------------------------------------------
/**
* Gets the configuration describing the queryable state as deactivated.
*/
public static QueryableStateConfiguration disabled() {
final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(QueryableStateOptions.PROXY_PORT_RANGE.defaultValue());final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(QueryableStateOptions.SERVER_PORT_RANGE.defaultValue());
return new QueryableStateConfiguration(proxyPorts, serverPorts, 0, 0, 0, 0);
}
| 3.26 |
flink_QueryableStateConfiguration_getStateServerPortRange_rdh
|
/**
* Returns the port range where the queryable state server can listen. See {@link org.apache.flink.configuration.QueryableStateOptions#SERVER_PORT_RANGE
* QueryableStateOptions.SERVER_PORT_RANGE}.
*/
public Iterator<Integer> getStateServerPortRange() {
return qserverPortRange;
}
| 3.26 |
flink_QueryableStateConfiguration_toString_rdh
|
// ------------------------------------------------------------------------
@Override
public String toString() {
return (((((((("QueryableStateConfiguration{" + "numProxyServerThreads=") + numProxyThreads) + ", numProxyQueryThreads=") + numPQueryThreads) + ", numStateServerThreads=") + numServerThreads) + ", numStateQueryThreads=") + numSQueryThreads) + '}';}
| 3.26 |
flink_DeweyNumber_increase_rdh
|
/**
* Creates a new dewey number from this such that its last digit is increased by the supplied
* number.
*
* @param times
* how many times to increase the Dewey number
* @return A new dewey number derived from this whose last digit is increased by given number
*/
public DeweyNumber increase(int times) {
int[] newDeweyNumber = Arrays.copyOf(deweyNumber, deweyNumber.length);
newDeweyNumber[deweyNumber.length
- 1] += times;
return new DeweyNumber(newDeweyNumber);
}
| 3.26 |
flink_DeweyNumber_isCompatibleWith_rdh
|
/**
* Checks whether this dewey number is compatible to the other dewey number.
*
* <p>True iff this contains other as a prefix or iff they differ only in the last digit whereas
* the last digit of this is greater than the last digit of other.
*
* @param other
* The other dewey number to check compatibility against
* @return Whether this dewey number is compatible to the other dewey number
*/
public boolean isCompatibleWith(DeweyNumber other) {
if (length() > other.length()) {
// prefix case
for (int i = 0; i < other.length(); i++) {
if (other.deweyNumber[i] != deweyNumber[i]) {
return false;}
}
return true;
} else if (length() == other.length()) {
// check init digits for equality
int lastIndex = length() - 1;
for (int i = 0; i < lastIndex; i++) {
if (other.deweyNumber[i] != deweyNumber[i]) {
return false;
}
}
// check that the last digit is greater or equal
return deweyNumber[lastIndex] >= other.deweyNumber[lastIndex];
} else {
return false;
}
}
| 3.26 |
flink_DeweyNumber_snapshotConfiguration_rdh
|
// -----------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<DeweyNumber> snapshotConfiguration() {
return new DeweyNumberSerializerSnapshot();
}
| 3.26 |
flink_DeweyNumber_m1_rdh
|
/**
* Creates a dewey number from a string representation. The input string must be a dot separated
* string of integers.
*
* @param deweyNumberString
* Dot separated string of integers
* @return Dewey number generated from the given input string
*/
public static DeweyNumber m1(final String deweyNumberString) {
String[] splits = deweyNumberString.split("\\.");
if (splits.length == 1) {
return new DeweyNumber(Integer.parseInt(deweyNumberString));
} else if (splits.length > 0) {
int[] v9 = new int[splits.length];
for (int i = 0; i < splits.length; i++) {
v9[i] = Integer.parseInt(splits[i]);
}
return new DeweyNumber(v9);
} else {
throw new IllegalArgumentException(("Failed to parse " +
deweyNumberString) + " as a Dewey number");
}
}
| 3.26 |
flink_DeweyNumber_addStage_rdh
|
/**
* Creates a new dewey number from this such that a 0 is appended as new last digit.
*
* @return A new dewey number which contains this as a prefix and has 0 as last digit
*/
public DeweyNumber addStage() {
int[] newDeweyNumber = Arrays.copyOf(deweyNumber, deweyNumber.length + 1);
return new
DeweyNumber(newDeweyNumber);
}
| 3.26 |
flink_LambdaUtil_withContextClassLoader_rdh
|
/**
* Runs the given runnable with the given ClassLoader as the thread's {@link Thread#setContextClassLoader(ClassLoader) context class loader}.
*
* <p>The method will make sure to set the context class loader of the calling thread back to
* what it was before after the runnable completed.
*/
public static <R, E extends Throwable> R withContextClassLoader(final ClassLoader cl, final SupplierWithException<R, E> s) throws E {
try (TemporaryClassLoaderContext v3 = TemporaryClassLoaderContext.of(cl)) {
return s.get();
}
}
| 3.26 |
flink_LambdaUtil_applyToAllWhileSuppressingExceptions_rdh
|
/**
* This method supplies all elements from the input to the consumer. Exceptions that happen on
* elements are suppressed until all elements are processed. If exceptions happened for one or
* more of the inputs, they are reported in a combining suppressed exception.
*
* @param inputs
* iterator for all inputs to the throwingConsumer.
* @param throwingConsumer
* this consumer will be called for all elements delivered by the input
* iterator.
* @param <T>
* the type of input.
* @throws Exception
* collected exceptions that happened during the invocation of the consumer on
* the input elements.
*/
public static <T> void applyToAllWhileSuppressingExceptions(Iterable<T> inputs, ThrowingConsumer<T, ? extends Exception> throwingConsumer) throws Exception {
if ((inputs != null) && (throwingConsumer != null)) {
Exception exception = null;
for (T input : inputs) {
if (input != null) {
try {
throwingConsumer.accept(input);
} catch (Exception ex) {
exception = ExceptionUtils.firstOrSuppressed(ex, exception);
}
}
}
if (exception != null) {
throw exception;
}
}
}
/**
* Runs the given runnable with the given ClassLoader as the thread's {@link Thread#setContextClassLoader(ClassLoader) context class loader}
| 3.26 |
flink_PythonOperatorChainingOptimizer_of_rdh
|
/**
* No chaining happens.
*/
public static ChainInfo of(Transformation<?> newTransformation) {
return new ChainInfo(newTransformation, Collections.emptyList());
}
| 3.26 |
flink_PythonOperatorChainingOptimizer_apply_rdh
|
/**
* Perform chaining optimization. It will iterate the transformations defined in the given
* StreamExecutionEnvironment and update them with the chained transformations. Besides, it will
* return the transformation after chaining optimization for the given transformation.
*/
@SuppressWarnings("unchecked")
public static Transformation<?> apply(StreamExecutionEnvironment env, Transformation<?> transformation) throws Exception {
if (env.getConfiguration().get(PythonOptions.PYTHON_OPERATOR_CHAINING_ENABLED)) {
final Field transformationsField = StreamExecutionEnvironment.class.getDeclaredField("transformations");
transformationsField.setAccessible(true);
final List<Transformation<?>>
transformations = ((List<Transformation<?>>) (transformationsField.get(env)));
final Tuple2<List<Transformation<?>>, Transformation<?>> resultTuple = optimize(transformations, transformation);
transformationsField.set(env, resultTuple.f0);return resultTuple.f1;
} else {
return transformation;
}
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.