name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_MiniBatchGlobalGroupAggFunction_addInput_rdh | /**
* The {@code previousAcc} is accumulator, but input is a row in <key, accumulator>
* schema, the specific generated {@link #localAgg} will project the {@code input} to
* accumulator in merge method.
*/@Override
public RowData addInput(@Nullable
RowData previousAcc, RowData input) throws Exception {
RowData currentAcc;
if (previousAcc == null) {currentAcc = localAgg.createAccumulators();
} else {
currentAcc = previousAcc;
}
localAgg.setAccumulators(currentAcc);localAgg.merge(input);
return localAgg.getAccumulators();
} | 3.26 |
flink_RegistrationResponse_getReason_rdh | /**
* Gets the reason for the failure.
*/
public SerializedThrowable getReason() {
return reason;
} | 3.26 |
flink_DynamicPropertiesUtil_encodeDynamicProperties_rdh | /**
* Parses dynamic properties from the given {@link CommandLine} and sets them on the {@link Configuration}.
*/
public static void encodeDynamicProperties(final CommandLine commandLine,
final Configuration effectiveConfiguration) {
final Properties properties = commandLine.getOptionProperties(DYNAMIC_PROPERTIES.getOpt());
properties.stringPropertyNames().forEach(key -> {
final String value = properties.getProperty(key);
if (value != null) {
effectiveConfiguration.setString(key, value);
} else {
effectiveConfiguration.setString(key, "true");
}
});
} | 3.26 |
flink_AccumulatorRegistry_getSnapshot_rdh | /**
* Creates a snapshot of this accumulator registry.
*
* @return a serialized accumulator map
*/ public AccumulatorSnapshot getSnapshot() {
try {
return new AccumulatorSnapshot(jobID, taskID, f0);
} catch (Throwable e) {
LOG.warn("Failed to serialize accumulators for task.", e);
return null;
}
} | 3.26 |
flink_AccumulatorRegistry_getUserMap_rdh | /**
* Gets the map for user-defined accumulators.
*/
public Map<String, Accumulator<?, ?>> getUserMap() {
return f0;} | 3.26 |
flink_HashPartition_spillPartition_rdh | /**
* Spills this partition to disk and sets it up such that it continues spilling records that are
* added to it. The spilling process must free at least one buffer, either in the partition's
* record buffers, or in the memory segments for overflow buckets. The partition immediately
* takes back one buffer to use it for further spilling.
*
* @param target
* The list to which memory segments from overflow buckets are added.
* @param ioAccess
* The I/O manager to be used to create a writer to disk.
* @param targetChannel
* The id of the target channel for this partition.
* @return The number of buffers that were freed by spilling this partition.
* @throws IOException
* Thrown, if the writing failed.
*/
public int spillPartition(List<MemorySegment> target, IOManager ioAccess, FileIOChannel.ID targetChannel, LinkedBlockingQueue<MemorySegment> bufferReturnQueue) throws IOException { // sanity checks
if (!isInMemory()) { throw new RuntimeException("Bug in Hybrid Hash Join: " + "Request to spill a partition that has already been spilled.");
}
if (getNumOccupiedMemorySegments() < 2) {
throw new RuntimeException("Bug in Hybrid Hash Join: " + "Request to spill a partition with less than two buffers.");
}
// return the memory from the overflow segments
for (int i = 0; i < this.numOverflowSegments; i++) {
target.add(this.overflowSegments[i]);
}
this.overflowSegments = null;
this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
// create the channel block writer and spill the current buffers
// that keep the build side buffers current block, as it is most likely not full, yet
// we return the number of blocks that become available
this.buildSideChannel = ioAccess.createBlockChannelWriter(targetChannel, bufferReturnQueue);
return this.buildSideWriteBuffer.spill(this.buildSideChannel);
} | 3.26 |
flink_HashPartition_insertIntoProbeBuffer_rdh | /**
* Inserts the given record into the probe side buffers. This method is only applicable when the
* partition was spilled while processing the build side.
*
* <p>If this method is invoked when the partition is still being built, it has undefined
* behavior.
*
* @param record
* The record to be inserted into the probe side buffers.
* @throws IOException
* Thrown, if the buffer is full, needs to be spilled, and spilling causes
* an error.
*/public final void insertIntoProbeBuffer(PT record) throws IOException {
this.probeSideSerializer.serialize(record, this.probeSideBuffer);
this.probeSideRecordCounter++;
} | 3.26 |
flink_HashPartition_getRecursionLevel_rdh | /**
* Gets this partition's recursion level.
*
* @return The partition's recursion level.
*/
public int getRecursionLevel() {
return this.recursionLevel;
} | 3.26 |
flink_HashPartition_finalizeProbePhase_rdh | /**
*
* @param keepUnprobedSpilledPartitions
* If true then partitions that were spilled but received
* no further probe requests will be retained; used for build-side outer joins.
* @return The number of write-behind buffers reclaimable after this method call.
* @throws IOException
*/
public int finalizeProbePhase(List<MemorySegment> freeMemory, List<HashPartition<BT, PT>> spilledPartitions, boolean keepUnprobedSpilledPartitions) throws IOException {
if (isInMemory()) {
// in this case, return all memory buffers
// return the overflow segments
for (int k = 0; k < this.numOverflowSegments; k++) {
freeMemory.add(this.overflowSegments[k]);
}
this.overflowSegments = null;this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
// return the partition buffers
for (MemorySegment partitionBuffer : this.partitionBuffers) {freeMemory.add(partitionBuffer);
}
this.partitionBuffers = null;
return 0;
} else if ((this.probeSideRecordCounter == 0) && (!keepUnprobedSpilledPartitions)) {
// partition is empty, no spilled buffers
// return the memory buffer
freeMemory.add(this.probeSideBuffer.getCurrentSegment());
// delete the spill files
this.probeSideChannel.close();
this.buildSideChannel.deleteChannel();
this.probeSideChannel.deleteChannel();
return 0;
} else {
// flush the last probe side buffer and register this partition as pending
this.probeSideBuffer.close();
this.probeSideChannel.close();
spilledPartitions.add(this);
return 1;
}
} | 3.26 |
flink_HashPartition_setReadPosition_rdh | // --------------------------------------------------------------------------------------------------
// Methods to provide input view abstraction for reading probe records
// --------------------------------------------------------------------------------------------------
public void setReadPosition(long pointer) {
final int bufferNum = ((int) (pointer >>> this.segmentSizeBits));
final int offset = ((int) (pointer & (this.memorySegmentSize - 1)));this.currentBufferNum = bufferNum;
seekInput(this.partitionBuffers[bufferNum], offset, bufferNum < (this.partitionBuffers.length - 1) ? this.memorySegmentSize : this.finalBufferLimit);
} | 3.26 |
flink_HashPartition_getPartitionNumber_rdh | // --------------------------------------------------------------------------------------------------
/**
* Gets the partition number of this partition.
*
* @return This partition's number.
*/
public int getPartitionNumber() {
return this.partitionNumber;
} | 3.26 |
flink_HashPartition_prepareProbePhase_rdh | // --------------------------------------------------------------------------------------------------
// ReOpenableHashTable related methods
// --------------------------------------------------------------------------------------------------
public void prepareProbePhase(IOManager
ioAccess, FileIOChannel.Enumerator probeChannelEnumerator, LinkedBlockingQueue<MemorySegment> bufferReturnQueue) throws IOException {
if (isInMemory()) {
return;
}
// ATTENTION: The following lines are duplicated code from finalizeBuildPhase
this.probeSideChannel = ioAccess.createBlockChannelWriter(probeChannelEnumerator.next(), bufferReturnQueue);
this.probeSideBuffer = new ChannelWriterOutputView(this.probeSideChannel, this.memorySegmentSize);
} | 3.26 |
flink_HashPartition_getNumOccupiedMemorySegments_rdh | /**
* Gets the number of memory segments used by this partition, which includes build side memory
* buffers and overflow memory segments.
*
* @return The number of occupied memory segments.
*/
public int getNumOccupiedMemorySegments() {// either the number of memory segments, or one for spilling
final int numPartitionBuffers
= (this.partitionBuffers != null) ? this.partitionBuffers.length : this.buildSideWriteBuffer.getNumOccupiedMemorySegments();
return numPartitionBuffers + numOverflowSegments;
} | 3.26 |
flink_OpaqueMemoryResource_getSize_rdh | /**
* Gets the size, in bytes.
*/
public long getSize() {
return size;
} | 3.26 |
flink_OpaqueMemoryResource_getResourceHandle_rdh | /**
* Gets the handle to the resource.
*/
public T getResourceHandle() {
return resourceHandle;
} | 3.26 |
flink_OpaqueMemoryResource_close_rdh | /**
* Releases this resource. This method is idempotent.
*/
@Override
public void close() throws Exception {
if (closed.compareAndSet(false, true)) {
disposer.run();
}
} | 3.26 |
flink_PrintStyle_tableauWithTypeInferredColumnWidths_rdh | /**
* Create a new {@link TableauStyle} using column widths computed from the type.
*
* @param schema
* the schema of the data to print
* @param converter
* the converter to use to convert field values to string
* @param maxColumnWidth
* Max column width
* @param printNullAsEmpty
* A flag to indicate whether null should be printed as empty string
* more than {@code <NULL>}
* @param printRowKind
* A flag to indicate whether print row kind info.
*/
static TableauStyle tableauWithTypeInferredColumnWidths(ResolvedSchema schema, RowDataToStringConverter converter, int maxColumnWidth, boolean printNullAsEmpty, boolean printRowKind) {
Preconditions.checkArgument(maxColumnWidth > 0, "maxColumnWidth should be greater than 0");
return new TableauStyle(schema, converter, TableauStyle.columnWidthsByType(schema.getColumns(), maxColumnWidth, printNullAsEmpty, printRowKind), maxColumnWidth, printNullAsEmpty, printRowKind);
}
/**
* Like {@link #tableauWithTypeInferredColumnWidths(ResolvedSchema, RowDataToStringConverter,
* int, boolean, boolean)} | 3.26 |
flink_PrintStyle_tableauWithDataInferredColumnWidths_rdh | /**
* Like {@link #tableauWithDataInferredColumnWidths(ResolvedSchema, RowDataToStringConverter,
* int, boolean, boolean)}, but using default values.
*
* <p><b>NOTE:</b> please make sure the data to print is small enough to be stored in java heap
* memory.
*/
static TableauStyle tableauWithDataInferredColumnWidths(ResolvedSchema schema, RowDataToStringConverter converter) {
return PrintStyle.m0(schema, converter, DEFAULT_MAX_COLUMN_WIDTH, false, false);
} | 3.26 |
flink_LongCounter_add_rdh | // ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(long value) {
this.localValue += value;
} | 3.26 |
flink_LongCounter_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String
toString() {
return "LongCounter " + this.localValue;
} | 3.26 |
flink_ListenableCollector_onCollect_rdh | /**
* A callback method when an original record was collected, do nothing by default.
*/
default void onCollect(T record) {
} | 3.26 |
flink_HistoryServerArchiveFetcher_updateJobOverview_rdh | /**
* This method replicates the JSON response that would be given by the JobsOverviewHandler when
* listing both running and finished jobs.
*
* <p>Every job archive contains a joboverview.json file containing the same structure. Since
* jobs are archived on their own however the list of finished jobs only contains a single job.
*
* <p>For the display in the HistoryServer WebFrontend we have to combine these overviews.
*/
private static void updateJobOverview(File webOverviewDir, File webDir) {try (JsonGenerator gen = jacksonFactory.createGenerator(HistoryServer.createOrGetFile(webDir, JobsOverviewHeaders.URL))) {
File[] overviews = new File(webOverviewDir.getPath()).listFiles();
if (overviews != null)
{
Collection<JobDetails> allJobs = new ArrayList<>(overviews.length);
for (File overview : overviews) {
MultipleJobsDetails subJobs = mapper.readValue(overview, MultipleJobsDetails.class);
allJobs.addAll(subJobs.getJobs());
}
mapper.writeValue(gen,
new MultipleJobsDetails(allJobs));
}} catch (IOException ioe) {
LOG.error("Failed to update job overview.", ioe);
}
} | 3.26 |
flink_BufferBuilder_createBufferConsumer_rdh | /**
* This method always creates a {@link BufferConsumer} starting from the current writer offset.
* Data written to {@link BufferBuilder} before creation of {@link BufferConsumer} won't be
* visible for that {@link BufferConsumer}.
*
* @return created matching instance of {@link BufferConsumer} to this {@link BufferBuilder}.
*/
public BufferConsumer createBufferConsumer() {
return createBufferConsumer(f0.cachedPosition);
} | 3.26 |
flink_BufferBuilder_createBufferConsumerFromBeginning_rdh | /**
* This method always creates a {@link BufferConsumer} starting from position 0 of {@link MemorySegment}.
*
* @return created matching instance of {@link BufferConsumer} to this {@link BufferBuilder}.
*/
public BufferConsumer createBufferConsumerFromBeginning() {
return createBufferConsumer(0);
} | 3.26 |
flink_BufferBuilder_commit_rdh | /**
* Make the change visible to the readers. This is costly operation (volatile access) thus in
* case of bulk writes it's better to commit them all together instead one by one.
*/
public void commit() {
f0.commit();
} | 3.26 |
flink_BufferBuilder_append_rdh | /**
* Append as many data as possible from {@code source}. Not everything might be copied if there
* is not enough space in the underlying {@link MemorySegment}
*
* @return number of copied bytes
*/
public int append(ByteBuffer source)
{
checkState(!isFinished());
int needed = source.remaining();
int available = getMaxCapacity() - f0.getCached();
int toCopy = Math.min(needed, available);
memorySegment.put(f0.getCached(), source, toCopy);
f0.move(toCopy);
return toCopy;
} | 3.26 |
flink_BufferBuilder_markFinished_rdh | /**
* Marks this position as finished and returns the current position.
*
* @return current position as of {@link #getCached()}
*/
public int markFinished() {
int currentPosition = getCached();
int newValue = -currentPosition;
if (newValue == 0) {newValue = FINISHED_EMPTY;
}
set(newValue);
return currentPosition;
} | 3.26 |
flink_BufferBuilder_appendAndCommit_rdh | /**
* Same as {@link #append(ByteBuffer)} but additionally {@link #commit()} the appending.
*/
public int appendAndCommit(ByteBuffer
source) {
int writtenBytes = append(source);
commit();
return writtenBytes;
} | 3.26 |
flink_BufferBuilder_finish_rdh | /**
* Mark this {@link BufferBuilder} and associated {@link BufferConsumer} as finished - no new
* data writes will be allowed.
*
* <p>This method should be idempotent to handle failures and task interruptions. Check
* FLINK-8948 for more details.
*
* @return number of written bytes.
*/
public int finish() {
int writtenBytes = f0.markFinished();
commit();
return writtenBytes;
} | 3.26 |
flink_BufferBuilder_trim_rdh | /**
* The result capacity can not be greater than allocated memorySegment. It also can not be less
* than already written data.
*/
public void trim(int newSize) {
maxCapacity = Math.min(Math.max(newSize, f0.getCached()), buffer.getMaxCapacity());
} | 3.26 |
flink_OperatorStateCheckpointOutputStream_closeAndGetHandle_rdh | /**
* This method should not be public so as to not expose internals to user code.
*/@Override
OperatorStateHandle closeAndGetHandle() throws IOException {
StreamStateHandle streamStateHandle = super.closeAndGetHandleAfterLeasesReleased();
if (null == streamStateHandle) {
return null;
}
if (partitionOffsets.isEmpty() && (delegate.getPos() > initialPosition)) {
startNewPartition();
}
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = CollectionUtil.newHashMapWithExpectedSize(1);
OperatorStateHandle.StateMetaInfo metaInfo = new OperatorStateHandle.StateMetaInfo(partitionOffsets.toArray(), Mode.SPLIT_DISTRIBUTE);
offsetsMap.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, metaInfo);
return new OperatorStreamStateHandle(offsetsMap, streamStateHandle);
} | 3.26 |
flink_SequenceGeneratorSource_getRandomKey_rdh | /**
* Returns a random key that belongs to this key range.
*/
int getRandomKey(Random random) {
return random.nextInt(endKey
- startKey) + startKey;
} | 3.26 |
flink_SequenceGeneratorSource_incrementAndGet_rdh | /**
* Increments and returns the current sequence number for the given key.
*/
long incrementAndGet(int
key) {
return ++statesPerKey[key - startKey];
} | 3.26 |
flink_CancelCheckpointMarker_write_rdh | // ------------------------------------------------------------------------
// These known and common event go through special code paths, rather than
// through generic serialization.
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("this method should never be called");
} | 3.26 |
flink_ThreadSafeSimpleCounter_dec_rdh | /**
* Decrement the current count by the given value.
*
* @param n
* value to decrement the current count by
*/
@Override
public void dec(long n) {
longAdder.add(-n);
} | 3.26 |
flink_ThreadSafeSimpleCounter_m0_rdh | /**
* Increment the current count by the given value.
*
* @param n
* value to increment the current count by
*/
@Override
public void m0(long n) {
longAdder.add(n);
} | 3.26 |
flink_ThreadSafeSimpleCounter_inc_rdh | /**
* Increment the current count by 1.
*/
@Override
public void inc() {
longAdder.increment();
} | 3.26 |
flink_ThreadSafeSimpleCounter_getCount_rdh | /**
* Returns the current count.
*
* @return current count
*/
@Override
public long getCount() {return longAdder.longValue();
} | 3.26 |
flink_FileRegionWriteReadUtils_writeHsInternalRegionToFile_rdh | /**
* Write {@link InternalRegion} to {@link FileChannel}.
*
* <p>Note that this type of region's length may be variable because it contains an array to
* indicate each buffer's release state.
*
* @param channel
* the file's channel to write.
* @param headerBuffer
* the buffer to write {@link InternalRegion}'s header.
* @param region
* the region to be written to channel.
*/
public static void writeHsInternalRegionToFile(FileChannel channel, ByteBuffer headerBuffer, InternalRegion region) throws IOException {
// write header buffer.
headerBuffer.clear();
headerBuffer.putInt(region.getFirstBufferIndex());
headerBuffer.putInt(region.getNumBuffers());
headerBuffer.putLong(region.getRegionStartOffset());
headerBuffer.flip();
// write payload buffer.
ByteBuffer payloadBuffer = allocateAndConfigureBuffer(region.getNumBuffers());
boolean[] released = region.getReleased();
for (boolean b : released) {
payloadBuffer.put(b ? ((byte) (1)) : ((byte) (0)));
}
payloadBuffer.flip();
BufferReaderWriterUtil.writeBuffers(channel, headerBuffer.capacity() + payloadBuffer.capacity(), headerBuffer, payloadBuffer);
} | 3.26 |
flink_FileRegionWriteReadUtils_readHsInternalRegionFromFile_rdh | /**
* Read {@link InternalRegion} from {@link FileChannel}.
*
* <p>Note that this type of region's length may be variable because it contains an array to
* indicate each buffer's release state.
*
* @param channel
* the channel to read.
* @param headerBuffer
* the buffer to read {@link InternalRegion}'s header.
* @param fileOffset
* the file offset to start read.
* @return the {@link InternalRegion} that read from this channel.
*/public static
InternalRegion readHsInternalRegionFromFile(FileChannel channel, ByteBuffer headerBuffer, long fileOffset) throws IOException {
headerBuffer.clear();
BufferReaderWriterUtil.readByteBufferFully(channel, headerBuffer, fileOffset);
headerBuffer.flip();
int firstBufferIndex = headerBuffer.getInt();
int v5 = headerBuffer.getInt();
long firstBufferOffset = headerBuffer.getLong();
ByteBuffer
payloadBuffer = allocateAndConfigureBuffer(v5);
BufferReaderWriterUtil.readByteBufferFully(channel, payloadBuffer, fileOffset + InternalRegion.HEADER_SIZE);
boolean[] released = new boolean[v5];
payloadBuffer.flip();
for (int i =
0;
i < v5; i++) {
released[i] = payloadBuffer.get() != 0;
}
return new InternalRegion(firstBufferIndex, firstBufferOffset, v5, released);
} | 3.26 |
flink_FileRegionWriteReadUtils_allocateAndConfigureBuffer_rdh | /**
* Allocate a buffer with specific size and configure it to native order.
*
* @param bufferSize
* the size of buffer to allocate.
* @return a native order buffer with expected size.
*/
public static ByteBuffer allocateAndConfigureBuffer(int bufferSize) {
ByteBuffer buffer = ByteBuffer.allocateDirect(bufferSize);
buffer.order(ByteOrder.nativeOrder());
return buffer;
} | 3.26 |
flink_FileRegionWriteReadUtils_readFixedSizeRegionFromFile_rdh | /**
* Read {@link FixedSizeRegion} from {@link FileChannel}.
*
* <p>Note that this type of region's length is fixed.
*
* @param channel
* the channel to read.
* @param regionBuffer
* the buffer to read {@link FixedSizeRegion}'s header.
* @param fileOffset
* the file offset to start read.
* @return the {@link FixedSizeRegion} that read from this channel.
*/
public static FixedSizeRegion readFixedSizeRegionFromFile(FileChannel channel, ByteBuffer regionBuffer, long fileOffset) throws IOException {
regionBuffer.clear();
BufferReaderWriterUtil.readByteBufferFully(channel, regionBuffer, fileOffset);
regionBuffer.flip();
int firstBufferIndex = regionBuffer.getInt();
int
numBuffers = regionBuffer.getInt();
long firstBufferOffset = regionBuffer.getLong();
long lastBufferEndOffset = regionBuffer.getLong();
return new FixedSizeRegion(firstBufferIndex, firstBufferOffset, lastBufferEndOffset, numBuffers);
} | 3.26 |
flink_FileRegionWriteReadUtils_writeFixedSizeRegionToFile_rdh | /**
* Write {@link FixedSizeRegion} to {@link FileChannel}.
*
* <p>Note that this type of region's length is fixed.
*
* @param channel
* the file's channel to write.
* @param regionBuffer
* the buffer to write {@link FixedSizeRegion}'s header.
* @param region
* the region to be written to channel.
*/
public static void writeFixedSizeRegionToFile(FileChannel channel, ByteBuffer regionBuffer, FileDataIndexRegionHelper.Region region) throws IOException {
regionBuffer.clear();
regionBuffer.putInt(region.getFirstBufferIndex());
regionBuffer.putInt(region.getNumBuffers());
regionBuffer.putLong(region.getRegionStartOffset());
regionBuffer.putLong(region.getRegionEndOffset());regionBuffer.flip();
BufferReaderWriterUtil.writeBuffers(channel, regionBuffer.capacity(), regionBuffer);
} | 3.26 |
flink_InputFormatSourceFunction_getFormat_rdh | /**
* Returns the {@code InputFormat}. This is only needed because we need to set the input split
* assigner on the {@code StreamGraph}.
*/
public InputFormat<OUT, InputSplit>
getFormat() {
return format;
} | 3.26 |
flink_TieredStorageConsumerClient_createTierConsumerAgents_rdh | // Internal methods
// --------------------------------------------------------------------------------------------
private List<TierConsumerAgent>
createTierConsumerAgents(List<TieredStorageConsumerSpec> tieredStorageConsumerSpecs) {
ArrayList<TierConsumerAgent> tierConsumerAgents = new ArrayList<>();
for (TierFactory tierFactory : tierFactories) {
tierConsumerAgents.add(tierFactory.createConsumerAgent(tieredStorageConsumerSpecs, nettyService));
}
return tierConsumerAgents;
} | 3.26 |
flink_UpdatingTopCityExample_createTemporaryDirectory_rdh | /**
* Creates an empty temporary directory for CSV files and returns the absolute path.
*/private static String createTemporaryDirectory() throws IOException {
final Path tempDirectory = Files.createTempDirectory("population");
return tempDirectory.toString();
} | 3.26 |
flink_SqlGatewayStreamExecutionEnvironment_setAsContext_rdh | /**
* The SqlGatewayStreamExecutionEnvironment is a {@link StreamExecutionEnvironment} that runs the
* program with SQL gateway.
*/
| 3.26 |
flink_SpanningWrapper_transferFrom_rdh | /**
* Copies the data and transfers the "ownership" (i.e. clears the passed wrapper).
*/
void transferFrom(NonSpanningWrapper partial, int nextRecordLength) throws IOException {
updateLength(nextRecordLength);
accumulatedRecordBytes = (isAboveSpillingThreshold()) ? spill(partial) : partial.copyContentTo(buffer);
partial.clear();
} | 3.26 |
flink_SpanningWrapper_transferLeftOverTo_rdh | /**
* Copies the leftover data and transfers the "ownership" (i.e. clears this wrapper).
*/
void transferLeftOverTo(NonSpanningWrapper nonSpanningWrapper) {
nonSpanningWrapper.clear();
if (leftOverData != null) {
nonSpanningWrapper.initializeFromMemorySegment(leftOverData, leftOverStart, leftOverLimit);
}
clear();
} | 3.26 |
flink_MessageAcknowledgingSourceBase_addId_rdh | /**
* Adds an ID to be stored with the current checkpoint. In order to achieve exactly-once
* guarantees, implementing classes should only emit records with IDs for which this method
* return true.
*
* @param uid
* The ID to add.
* @return True if the id has not been processed previously.
*/
protected boolean addId(UId uid) {
idsForCurrentCheckpoint.add(uid);
return idsProcessedButNotAcknowledged.add(uid);
} | 3.26 |
flink_MessageAcknowledgingSourceBase_snapshotState_rdh | // ------------------------------------------------------------------------
// Checkpointing the data
// ------------------------------------------------------------------------
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
Preconditions.checkState(this.checkpointedState != null, ("The " + getClass().getSimpleName()) + " has not been properly initialized.");
if (LOG.isDebugEnabled()) {
LOG.debug("Checkpointing: Messages: {}, checkpoint id: {}, timestamp: {}", idsForCurrentCheckpoint, context.getCheckpointId(), context.getCheckpointTimestamp());
}
pendingCheckpoints.addLast(new Tuple2<>(context.getCheckpointId(), idsForCurrentCheckpoint));
idsForCurrentCheckpoint = CollectionUtil.newHashSetWithExpectedSize(64);
this.checkpointedState.update(Collections.singletonList(SerializedCheckpointData.fromDeque(pendingCheckpoints,
idSerializer)));
} | 3.26 |
flink_ProcTimeMiniBatchAssignerOperator_processWatermark_rdh | /**
* Override the base implementation to completely ignore watermarks propagated from upstream (we
* rely only on the {@link AssignerWithPeriodicWatermarks} to emit watermarks from here).
*/
@Override
public void processWatermark(Watermark mark) throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if ((mark.getTimestamp() == Long.MAX_VALUE) && (currentWatermark != Long.MAX_VALUE)) {
currentWatermark = Long.MAX_VALUE;
output.emitWatermark(mark);
}
} | 3.26 |
flink_IterativeDataSet_registerAggregationConvergenceCriterion_rdh | /**
* Registers an {@link Aggregator} for the iteration together with a {@link ConvergenceCriterion}. For a general description of aggregators, see {@link #registerAggregator(String, Aggregator)} and {@link Aggregator}. At the end of each
* iteration, the convergence criterion takes the aggregator's global aggregate value and
* decided whether the iteration should terminate. A typical use case is to have an aggregator
* that sums up the total error of change in an iteration step and have to have a convergence
* criterion that signals termination as soon as the aggregate value is below a certain
* threshold.
*
* @param name
* The name under which the aggregator is registered.
* @param aggregator
* The aggregator class.
* @param convergenceCheck
* The convergence criterion.
* @return The IterativeDataSet itself, to allow chaining function calls.
*/
@PublicEvolving
public <X extends Value> IterativeDataSet<T> registerAggregationConvergenceCriterion(String name, Aggregator<X> aggregator, ConvergenceCriterion<X> convergenceCheck) {
this.aggregators.registerAggregationConvergenceCriterion(name, aggregator, convergenceCheck);
return this;
}
/**
* Gets the registry for aggregators. On the registry, one can add {@link Aggregator}s and an
* aggregator-based {@link ConvergenceCriterion}. This method offers an alternative way to
* registering the aggregators via {@link #registerAggregator(String, Aggregator)} and {@link #registerAggregationConvergenceCriterion(String, Aggregator, ConvergenceCriterion)} | 3.26 |
flink_IterativeDataSet_getMaxIterations_rdh | /**
* Gets the maximum number of iterations.
*
* @return The maximum number of iterations.
*/
public int getMaxIterations() {
return maxIterations;
}
/**
* Registers an {@link Aggregator} for the iteration. Aggregators can be used to maintain simple
* statistics during the iteration, such as number of elements processed. The aggregators
* compute global aggregates: After each iteration step, the values are globally aggregated to
* produce one aggregate that represents statistics across all parallel instances. The value of
* an aggregator can be accessed in the next iteration.
*
* <p>Aggregators can be accessed inside a function via the {@link org.apache.flink.api.common.functions.AbstractRichFunction#getIterationRuntimeContext()} | 3.26 |
flink_IterativeDataSet_translateToDataFlow_rdh | // --------------------------------------------------------------------------------------------
@Override
protected SingleInputOperator<T, T, ?> translateToDataFlow(Operator<T> input) {
// All the translation magic happens when the iteration end is encountered.
throw new InvalidProgramException("A data set that is part of an iteration was used as a sink or action." + " Did you forget to close the iteration?");
} | 3.26 |
flink_DefaultVertexParallelismStore_applyJobResourceRequirements_rdh | /**
* Create a new {@link VertexParallelismStore} that reflects given {@link JobResourceRequirements}.
*
* @param oldVertexParallelismStore
* old vertex parallelism store that serves as a base for the
* new one
* @param jobResourceRequirements
* to apply over the old vertex parallelism store
* @return new vertex parallelism store iff it was updated
*/
public static Optional<VertexParallelismStore> applyJobResourceRequirements(VertexParallelismStore oldVertexParallelismStore, JobResourceRequirements jobResourceRequirements) {
final DefaultVertexParallelismStore newVertexParallelismStore = new DefaultVertexParallelismStore();
boolean changed = false;
for (final JobVertexID jobVertexId : jobResourceRequirements.getJobVertices()) {
final VertexParallelismInformation oldVertexParallelismInfo = oldVertexParallelismStore.getParallelismInfo(jobVertexId);
final JobVertexResourceRequirements.Parallelism v4 = jobResourceRequirements.getParallelism(jobVertexId);
final int minParallelism = v4.getLowerBound();
final int parallelism = v4.getUpperBound();
newVertexParallelismStore.setParallelismInfo(jobVertexId, new DefaultVertexParallelismInfo(minParallelism, parallelism, oldVertexParallelismInfo.getMaxParallelism(), RESCALE_MAX_REJECT));
changed |= (oldVertexParallelismInfo.getMinParallelism() != minParallelism) || (oldVertexParallelismInfo.getParallelism() != parallelism);
}
return changed ? Optional.of(newVertexParallelismStore) : Optional.empty();
} | 3.26 |
flink_TableEnvironmentInternal_explainInternal_rdh | /**
* Returns the AST of this table and the execution plan to compute the result of this table.
*
* @param operations
* The operations to be explained.
* @param extraDetails
* The extra explain details which the explain result should include, e.g.
* estimated cost, changelog mode for streaming
* @return AST and the execution plan.
*/
default String explainInternal(List<Operation> operations, ExplainDetail... extraDetails) {
return explainInternal(operations, ExplainFormat.TEXT, extraDetails);
} | 3.26 |
flink_SimpleVersionedListState_serialize_rdh | // ------------------------------------------------------------------------
// utils
// ------------------------------------------------------------------------
private byte[] serialize(T value) throws IOException {
return SimpleVersionedSerialization.writeVersionAndSerialize(serializer, value);
} | 3.26 |
flink_UnresolvedDataType_toDataType_rdh | /**
* Converts this instance to a resolved {@link DataType} possibly enriched with additional
* nullability and conversion class information.
*/
public DataType toDataType(DataTypeFactory factory) {
DataType resolvedDataType = resolutionFactory.apply(factory);
if (isNullable == Boolean.TRUE) { resolvedDataType = resolvedDataType.nullable();
} else if (isNullable == Boolean.FALSE) {
resolvedDataType = resolvedDataType.notNull();
}
if (conversionClass != null) {
resolvedDataType = resolvedDataType.bridgedTo(conversionClass);
}
return resolvedDataType;
} | 3.26 |
flink_Description_list_rdh | /**
* Adds a bulleted list to the description.
*/
public DescriptionBuilder list(InlineElement... elements)
{
blocks.add(ListElement.list(elements));
return this;
} | 3.26 |
flink_Description_linebreak_rdh | /**
* Creates a line break in the description.
*/
public DescriptionBuilder linebreak() {
blocks.add(LineBreakElement.linebreak());
return this;
} | 3.26 |
flink_Description_text_rdh | /**
* Creates a simple block of text.
*
* @param text
* a simple block of text
* @return block of text
*/
public DescriptionBuilder text(String text) {
blocks.add(TextElement.text(text));
return this;
} | 3.26 |
flink_Description_build_rdh | /**
* Creates description representation.
*/
public Description build() {
return new Description(blocks);
} | 3.26 |
flink_Description_add_rdh | /**
* Block of description add.
*
* @param block
* block of description to add
* @return block of description
*/
public DescriptionBuilder add(BlockElement block) {blocks.add(block);
return this;
} | 3.26 |
flink_TestUserClassLoaderJobLib_getValue_rdh | /**
* This class is depended by {@link TestUserClassLoaderJob}.
*/class TestUserClassLoaderJobLib {
int getValue() {
return 0;
} | 3.26 |
flink_HeartbeatServices_fromConfiguration_rdh | /**
* Creates an HeartbeatServices instance from a {@link Configuration}.
*
* @param configuration
* Configuration to be used for the HeartbeatServices creation
* @return An HeartbeatServices instance created from the given configuration
*/
static HeartbeatServices fromConfiguration(Configuration configuration) {
long heartbeatInterval = configuration.getLong(HeartbeatManagerOptions.HEARTBEAT_INTERVAL);
long heartbeatTimeout
= configuration.getLong(HeartbeatManagerOptions.HEARTBEAT_TIMEOUT);
int failedRpcRequestsUntilUnreachable = configuration.get(HeartbeatManagerOptions.HEARTBEAT_RPC_FAILURE_THRESHOLD);
return new HeartbeatServicesImpl(heartbeatInterval, heartbeatTimeout, failedRpcRequestsUntilUnreachable);
} | 3.26 |
flink_TaskEventHandler_publish_rdh | /**
* Publishes the task event to all subscribed event listeners.
*
* @param event
* The event to publish.
*/
public void publish(TaskEvent event) {
synchronized(listeners) {
for (EventListener<TaskEvent>
listener : listeners.get(event.getClass())) {
listener.onEvent(event);
}
}
} | 3.26 |
flink_WikipediaEditEvent_getTimestamp_rdh | /**
* Returns the timestamp when this event arrived at the source.
*
* @return The timestamp assigned at the source.
*/public long getTimestamp() {
return timestamp;
} | 3.26 |
flink_DelegationTokenReceiverRepository_onNewTokensObtained_rdh | /**
* Callback function when new delegation tokens obtained.
*
* @param container
* Serialized form of delegation tokens stored in DelegationTokenContainer. All
* the available tokens will be forwarded to the appropriate {@link DelegationTokenReceiver}
* based on service name.
*/
public void onNewTokensObtained(DelegationTokenContainer container) throws Exception {
LOG.info("New delegation tokens arrived, sending them to receivers");
for (Map.Entry<String, byte[]> entry : container.getTokens().entrySet()) {
String serviceName = entry.getKey();
byte[] tokens = entry.getValue();
if (!delegationTokenReceivers.containsKey(serviceName)) {
throw new IllegalStateException("Tokens arrived for service but no receiver found for it: " + serviceName);
}
try {delegationTokenReceivers.get(serviceName).onNewTokensObtained(tokens);
} catch (Exception e) {
LOG.warn("Failed to send tokens to delegation token receiver {}", serviceName, e);
}
}
LOG.info("Delegation tokens sent to receivers");
} | 3.26 |
flink_ValueSerializer_snapshotConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<T> snapshotConfiguration() {
return new ValueSerializerSnapshot<>(type);
} | 3.26 |
flink_ValueSerializer_isImmutableType_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
} | 3.26 |
flink_ValueSerializer_readObject_rdh | // --------------------------------------------------------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
// kryoRegistrations may be null if this value serializer is deserialized from an old
// version
if (kryoRegistrations == null) {
this.kryoRegistrations = asKryoRegistrations(type);
}
} | 3.26 |
flink_ValueSerializer_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return this.type.hashCode();
} | 3.26 |
flink_TableConfig_get_rdh | /**
* {@inheritDoc }
*
* <p>This method gives read-only access to the full configuration. However,
* application-specific configuration has precedence. Configuration of outer layers is used for
* defaults and fallbacks. See the docs of {@link TableConfig} for more information.
*
* @param option
* metadata of the option to read
* @param <T>
* type of the value to read
* @return read value or {@link ConfigOption#defaultValue()} if not found
*/
@Override
public <T> T get(ConfigOption<T> option) {
return f0.getOptional(option).orElseGet(() -> rootConfiguration.get(option));
} | 3.26 |
flink_TableConfig_setSqlDialect_rdh | /**
* Sets the current SQL dialect to parse a SQL query. Flink's SQL behavior by default.
*/
public void setSqlDialect(SqlDialect sqlDialect) {
set(TableConfigOptions.TABLE_SQL_DIALECT, sqlDialect.name().toLowerCase());
} | 3.26 |
flink_TableConfig_setPlannerConfig_rdh | /**
* Sets the configuration of Planner for Table API and SQL queries. Changing the configuration
* has no effect after the first query has been defined.
*/
public void setPlannerConfig(PlannerConfig plannerConfig) {
this.plannerConfig = Preconditions.checkNotNull(plannerConfig);
} | 3.26 |
flink_TableConfig_getIdleStateRetention_rdh | /**
*
* @return The duration until state which was not updated will be retained.
*/
public Duration getIdleStateRetention() {
return f0.get(ExecutionConfigOptions.IDLE_STATE_RETENTION);} | 3.26 |
flink_TableConfig_addJobParameter_rdh | /**
* Sets a custom user parameter that can be accessed via {@link FunctionContext#getJobParameter(String, String)}.
*
* <p>This will add an entry to the current value of {@link PipelineOptions#GLOBAL_JOB_PARAMETERS}.
*
* <p>It is also possible to set multiple parameters at once, which will override any previously
* set parameters:
*
* <pre>{@code Map<String, String> params = ...
* TableConfig config = tEnv.getConfig();
* config.set(PipelineOptions.GLOBAL_JOB_PARAMETERS, params);}</pre>
*/public void addJobParameter(String key, String value) {
final Map<String, String> params = getOptional(PipelineOptions.GLOBAL_JOB_PARAMETERS).map(HashMap::new).orElseGet(HashMap::new);
params.put(key, value);
set(PipelineOptions.GLOBAL_JOB_PARAMETERS, params);
} | 3.26 |
flink_TableConfig_setRootConfiguration_rdh | /**
* Sets the given configuration as {@link #rootConfiguration}, which contains any configuration
* set in the execution context. See the docs of {@link TableConfig} for more information.
*
* @param rootConfiguration
* root configuration to be set
*/
@Internal
public void setRootConfiguration(ReadableConfig rootConfiguration) {
this.rootConfiguration = rootConfiguration;
} | 3.26 |
flink_TableConfig_setMaxGeneratedCodeLength_rdh | /**
* Sets current threshold where generated code will be split into sub-function calls. Java has a
* maximum method length of 64 KB. This setting allows for finer granularity if necessary.
*
* <p>Default value is 4000 instead of 64KB as by default JIT refuses to work on methods with
* more than 8K byte code.
*/
public void setMaxGeneratedCodeLength(Integer maxGeneratedCodeLength) {
this.f0.setInteger(TableConfigOptions.MAX_LENGTH_GENERATED_CODE, maxGeneratedCodeLength);
} | 3.26 |
flink_TableConfig_getMaxIdleStateRetentionTime_rdh | /**
* NOTE: Currently the concept of min/max idle state retention has been deprecated and only idle
* state retention time is supported. The min idle state retention is regarded as idle state
* retention and the max idle state retention is derived from idle state retention as 1.5 x idle
* state retention.
*
* @return The maximum time until state which was not updated will be retained.
* @deprecated use{@link getIdleStateRetention} instead.
*/
@Deprecated
public long getMaxIdleStateRetentionTime() {
return (getMinIdleStateRetentionTime() * 3) / 2;
} | 3.26 |
flink_TableConfig_getLocalTimeZone_rdh | /**
* Returns the current session time zone id. It is used when converting to/from {@code TIMESTAMP
* WITH LOCAL TIME ZONE}. See {@link #setLocalTimeZone(ZoneId)} for more details.
*
* @see org.apache.flink.table.types.logical.LocalZonedTimestampType
*/
public ZoneId getLocalTimeZone() {
final String zone = f0.getString(TableConfigOptions.LOCAL_TIME_ZONE);
if (TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)) {
return ZoneId.systemDefault();
}
validateTimeZone(zone);
return ZoneId.of(zone);} | 3.26 |
flink_TableConfig_getConfiguration_rdh | /**
* Gives direct access to the underlying application-specific key-value map for advanced
* configuration.
*/
public Configuration getConfiguration() {
return f0;
} | 3.26 |
flink_TableConfig_setIdleStateRetentionTime_rdh | /**
* Specifies a minimum and a maximum time interval for how long idle state, i.e., state which
* was not updated, will be retained. State will never be cleared until it was idle for less
* than the minimum time and will never be kept if it was idle for more than the maximum time.
*
* <p>When new data arrives for previously cleaned-up state, the new data will be handled as if
* it was the first data. This can result in previous results being overwritten.
*
* <p>Set to 0 (zero) to never clean-up the state.
*
* <p>NOTE: Cleaning up state requires additional bookkeeping which becomes less expensive for
* larger differences of minTime and maxTime. The difference between minTime and maxTime must be
* at least 5 minutes.
*
* <p>NOTE: Currently maxTime will be ignored and it will automatically derived from minTime as
* 1.5 x minTime.
*
* @param minTime
* The minimum time interval for which idle state is retained. Set to 0 (zero) to
* never clean-up the state.
* @param maxTime
* The maximum time interval for which idle state is retained. Must be at least 5
* minutes greater than minTime. Set to 0 (zero) to never clean-up the state.
* @deprecated use {@link #setIdleStateRetention(Duration)} instead.
*/
@Deprecated
public void setIdleStateRetentionTime(Time minTime, Time
maxTime) {
if (((maxTime.toMilliseconds() - minTime.toMilliseconds()) < 300000) && (!((maxTime.toMilliseconds() == 0) && (minTime.toMilliseconds() == 0)))) {
throw new IllegalArgumentException(((("Difference between minTime: " + minTime) + " and maxTime: ") + maxTime) + " should be at least 5 minutes.");
}
setIdleStateRetention(Duration.ofMillis(minTime.toMilliseconds()));
} | 3.26 |
flink_TableConfig_getMaxGeneratedCodeLength_rdh | /**
* Returns the current threshold where generated code will be split into sub-function calls.
* Java has a maximum method length of 64 KB. This setting allows for finer granularity if
* necessary.
*
* <p>Default value is 4000 instead of 64KB as by default JIT refuses to work on methods with
* more than 8K byte code.
*/
public Integer getMaxGeneratedCodeLength() {return this.f0.getInteger(TableConfigOptions.MAX_LENGTH_GENERATED_CODE);
} | 3.26 |
flink_TableConfig_getPlannerConfig_rdh | /**
* Returns the current configuration of Planner for Table API and SQL queries.
*/
public PlannerConfig getPlannerConfig() {return plannerConfig;
} | 3.26 |
flink_TableConfig_getSqlDialect_rdh | /**
* Returns the current SQL dialect.
*/
public SqlDialect getSqlDialect() {
return SqlDialect.valueOf(get(TableConfigOptions.TABLE_SQL_DIALECT).toUpperCase());
} | 3.26 |
flink_TableConfig_setIdleStateRetention_rdh | /**
* Specifies a retention time interval for how long idle state, i.e., state which was not
* updated, will be retained. State will never be cleared until it was idle for less than the
* retention time and will be cleared on a best effort basis after the retention time.
*
* <p>When new data arrives for previously cleaned-up state, the new data will be handled as if
* it was the first data. This can result in previous results being overwritten.
*
* <p>Set to 0 (zero) to never clean-up the state.
*
* @param duration
* The retention time interval for which idle state is retained. Set to 0 (zero)
* to never clean-up the state.
* @see org.apache.flink.api.common.state.StateTtlConfig
*/public void setIdleStateRetention(Duration duration) {
f0.set(ExecutionConfigOptions.IDLE_STATE_RETENTION, duration);
} | 3.26 |
flink_TableConfig_addConfiguration_rdh | /**
* Adds the given key-value configuration to the underlying application-specific configuration.
* It overwrites existing keys.
*
* @param configuration
* key-value configuration to be added
*/
public void addConfiguration(Configuration configuration) {
Preconditions.checkNotNull(configuration);
this.f0.addAll(configuration);
} | 3.26 |
flink_TableConfig_set_rdh | /**
* Sets an application-specific string-based value for the given string-based key.
*
* <p>The value will be parsed by the framework on access.
*
* <p>This method exists for convenience when configuring a session with string-based
* properties. Use {@link #set(ConfigOption, Object)} for more type-safety and inline
* documentation.
*
* @see TableConfigOptions
* @see ExecutionConfigOptions
* @see OptimizerConfigOptions
*/
public TableConfig set(String key, String value) {
f0.setString(key, value);
return this;
} | 3.26 |
flink_TableConfig_setLocalTimeZone_rdh | /**
* Sets the current session time zone id. It is used when converting to/from {@link DataTypes#TIMESTAMP_WITH_LOCAL_TIME_ZONE()}. Internally, timestamps with local time zone are
* always represented in the UTC time zone. However, when converting to data types that don't
* include a time zone (e.g. TIMESTAMP, TIME, or simply STRING), the session time zone is used
* during conversion.
*
* <p>Example:
*
* <pre>{@code TableConfig config = tEnv.getConfig();
* config.setLocalTimeZone(ZoneOffset.ofHours(2));
* tEnv.executeSql("CREATE TABLE testTable (id BIGINT, tmstmp TIMESTAMP WITH LOCAL TIME ZONE)");
* tEnv.executeSql("INSERT INTO testTable VALUES ((1, '2000-01-01 2:00:00'), (2, TIMESTAMP '2000-01-01 2:00:00'))");
* tEnv.executeSql("SELECT * FROM testTable"); // query with local time zone set to UTC+2}</pre>
*
* <p>should produce:
*
* <pre>
* =============================
* id | tmstmp
* =============================
* 1 | 2000-01-01 2:00:00'
* 2 | 2000-01-01 2:00:00'
* </pre>
*
* <p>If we change the local time zone and query the same table:
*
* <pre>{@code config.setLocalTimeZone(ZoneOffset.ofHours(0));
* tEnv.executeSql("SELECT * FROM testTable"); // query with local time zone set to UTC+0}</pre>
*
* <p>we should get:
*
* <pre>
* =============================
* id | tmstmp
* =============================
* 1 | 2000-01-01 0:00:00'
* 2 | 2000-01-01 0:00:00'
* </pre>
*
* @see org.apache.flink.table.types.logical.LocalZonedTimestampType
*/
public void setLocalTimeZone(ZoneId zoneId) {
final String zone;
if (zoneId instanceof ZoneOffset) {
// Give ZoneOffset a timezone for backwards compatibility reasons.
// In general, advertising either TZDB ID, GMT+xx:xx, or UTC is the best we can do.
zone = ZoneId.ofOffset("GMT", ((ZoneOffset) (zoneId))).toString();
} else {
zone = zoneId.toString();
}
validateTimeZone(zone);
f0.setString(TableConfigOptions.LOCAL_TIME_ZONE, zone);
} | 3.26 |
flink_TableConfig_getMinIdleStateRetentionTime_rdh | /**
* NOTE: Currently the concept of min/max idle state retention has been deprecated and only idle
* state retention time is supported. The min idle state retention is regarded as idle state
* retention and the max idle state retention is derived from idle state retention as 1.5 x idle
* state retention.
*
* @return The minimum time until state which was not updated will be retained.
* @deprecated use{@link getIdleStateRetention} instead.
*/
@Deprecated
public long getMinIdleStateRetentionTime()
{
return f0.get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis();
} | 3.26 |
flink_DataInputDecoder_readFixed_rdh | // --------------------------------------------------------------------------------------------
// bytes
// --------------------------------------------------------------------------------------------
@Override
public void readFixed(byte[] bytes, int start, int length) throws IOException {
in.readFully(bytes, start, length);
} | 3.26 |
flink_DataInputDecoder_readString_rdh | // --------------------------------------------------------------------------------------------
// strings
// --------------------------------------------------------------------------------------------
@Override
public Utf8 readString(Utf8 old) throws IOException {
int length
= readInt();
Utf8 result =
(old != null) ? old : new Utf8();
result.setByteLength(length);
if (length > 0) {
in.readFully(result.getBytes(), 0, length);
}
return result;
} | 3.26 |
flink_DataInputDecoder_readArrayStart_rdh | // --------------------------------------------------------------------------------------------
// collection types
// --------------------------------------------------------------------------------------------
@Override
public long
readArrayStart() throws IOException {
return readVarLongCount(in);
} | 3.26 |
flink_SecurityOptions_isRestSSLAuthenticationEnabled_rdh | /**
* Checks whether mutual SSL authentication for the external REST endpoint is enabled.
*/
public static boolean isRestSSLAuthenticationEnabled(Configuration sslConfig) {
checkNotNull(sslConfig, "sslConfig");
return isRestSSLEnabled(sslConfig) && sslConfig.getBoolean(SSL_REST_AUTHENTICATION_ENABLED);
} | 3.26 |
flink_SecurityOptions_isRestSSLEnabled_rdh | /**
* Checks whether SSL for the external REST endpoint is enabled.
*/
public static boolean isRestSSLEnabled(Configuration sslConfig) {
@SuppressWarnings("deprecation")
final boolean fallbackFlag = sslConfig.getBoolean(SSL_ENABLED);
return sslConfig.getBoolean(SSL_REST_ENABLED, fallbackFlag);
} | 3.26 |
flink_TaskSlotTable_freeSlot_rdh | /**
* Try to free the slot. If the slot is empty it will set the state of the task slot to free and
* return its index. If the slot is not empty, then it will set the state of the task slot to
* releasing, fail all tasks and return -1.
*
* @param allocationId
* identifying the task slot to be freed
* @throws SlotNotFoundException
* if there is not task slot for the given allocation id
* @return Index of the freed slot if the slot could be freed; otherwise -1
*/
default int freeSlot(AllocationID allocationId) throws SlotNotFoundException {return freeSlot(allocationId, new Exception("The task slot of this task is being freed."));
} | 3.26 |
flink_FlinkTestcontainersConfigurator_configure_rdh | /**
* Configures and creates {@link FlinkContainers}.
*/
public FlinkContainers configure() {
// Create temporary directory for building Flink image
final Path imageBuildingTempDir;
try {
imageBuildingTempDir = Files.createTempDirectory("flink-image-build");
} catch (IOException e) {
throw new RuntimeException("Failed to create temporary directory",
e);
}
// Build JobManager
final GenericContainer<?> jobManager = configureJobManagerContainer(imageBuildingTempDir);
// Build TaskManagers
final List<GenericContainer<?>> taskManagers = configureTaskManagerContainers(imageBuildingTempDir);
// Setup Zookeeper HA
GenericContainer<?> zookeeper =
null;
// Mount HA storage to JobManager
if (flinkContainersSettings.isZookeeperHA()) {
zookeeper = m0();
createTempDirAndMountToContainer("flink-recovery", flinkContainersSettings.getHaStoragePath(), jobManager);
}
// Mount checkpoint storage to JobManager
createTempDirAndMountToContainer("flink-checkpoint", flinkContainersSettings.getCheckpointPath(), jobManager); return new FlinkContainers(jobManager, taskManagers, zookeeper, flinkContainersSettings.getFlinkConfig());
} | 3.26 |
flink_CliResultView_init_rdh | // --------------------------------------------------------------------------------------------
@Override
protected void init() {
refreshThread.start();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.