name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SortUtil_putTimestampNormalizedKey_rdh | /**
* Support the compact precision TimestampData.
*/
public static void
putTimestampNormalizedKey(TimestampData value, MemorySegment target, int offset, int numBytes) {
assert value.getNanoOfMillisecond() == 0;
putLongNormalizedKey(value.getMillisecond(), target, offset, numBytes);
} | 3.26 |
flink_SortUtil_putDoubleNormalizedKey_rdh | /**
* See http://stereopsis.com/radix.html for more details.
*/
public static void putDoubleNormalizedKey(double value,
MemorySegment target, int offset, int numBytes) {
long lValue = Double.doubleToLongBits(value);
lValue ^= (lValue >> (Long.SIZE - 1)) |
Long.MIN_VALUE;
NormalizedKeyUtil.putUnsignedLongNormalizedKey(lValue, target, offset, numBytes);
} | 3.26 |
flink_SortUtil_putDecimalNormalizedKey_rdh | /**
* Just support the compact precision decimal.
*/
public static void putDecimalNormalizedKey(DecimalData record, MemorySegment target, int offset, int len) {
assert record.isCompact();
putLongNormalizedKey(record.toUnscaledLong(), target, offset, len);
} | 3.26 |
flink_SortUtil_putFloatNormalizedKey_rdh | /**
* See http://stereopsis.com/radix.html for more details.
*/
public static void putFloatNormalizedKey(float value, MemorySegment target, int offset, int numBytes) {
int iValue = Float.floatToIntBits(value);
iValue ^= (iValue >> (Integer.SIZE - 1)) | Integer.MIN_VALUE;
NormalizedKeyUtil.putUnsignedIntegerNormalizedKey(iValue, target, offset, numBytes);
} | 3.26 |
flink_SortUtil_maxNormalizedKey_rdh | /**
* Max unsigned byte is -1.
*/
public static void maxNormalizedKey(MemorySegment target, int offset, int numBytes) {
// write max value.
for (int i = 0; i < numBytes; i++) {
target.put(offset + i, ((byte) (-1)));
}
} | 3.26 |
flink_SortUtil_putStringNormalizedKey_rdh | /**
* UTF-8 supports bytes comparison.
*/
public static void putStringNormalizedKey(StringData value, MemorySegment target, int offset, int numBytes) {
BinaryStringData binaryString = ((BinaryStringData) (value));
final int limit = offset + numBytes;
final int end = binaryString.getSizeInBytes();
for
(int i = 0; (i < end) && (offset < limit); i++) {
target.put(offset++, binaryString.byteAt(i));
}
for (int i = offset; i < limit; i++) {
target.put(i, ((byte) (0)));
}
} | 3.26 |
flink_AbstractFileSource_monitorContinuously_rdh | /**
* Sets this source to streaming ("continuous monitoring") mode.
*
* <p>This makes the source a "continuous streaming" source that keeps running, monitoring
* for new files, and reads these files when they appear and are discovered by the
* monitoring.
*
* <p>The interval in which the source checks for new files is the {@code discoveryInterval}. Shorter intervals mean that files are discovered more quickly, but
* also imply more frequent listing or directory traversal of the file system / object
* store.
*/
public SELF monitorContinuously(Duration discoveryInterval) {
checkNotNull(discoveryInterval, "discoveryInterval");
checkArgument(!(discoveryInterval.isNegative() || discoveryInterval.isZero()), "discoveryInterval must be > 0");
this.continuousSourceSettings = new ContinuousEnumerationSettings(discoveryInterval);
return self();
} | 3.26 |
flink_AbstractFileSource_createSplitEnumerator_rdh | // ------------------------------------------------------------------------
// helpers
// ------------------------------------------------------------------------
private SplitEnumerator<SplitT, PendingSplitsCheckpoint<SplitT>> createSplitEnumerator(SplitEnumeratorContext<SplitT> context, FileEnumerator enumerator, Collection<FileSourceSplit> splits, @Nullable
Collection<Path> alreadyProcessedPaths) {
// cast this to a collection of FileSourceSplit because the enumerator code work
// non-generically just on that base split type
@SuppressWarnings("unchecked")
final SplitEnumeratorContext<FileSourceSplit> fileSplitContext = ((SplitEnumeratorContext<FileSourceSplit>) (context));
final FileSplitAssigner splitAssigner = assignerFactory.create(splits);
if (continuousEnumerationSettings == null) {
// bounded case
return castGeneric(new StaticFileSplitEnumerator(fileSplitContext, splitAssigner));
} else {
// unbounded case
if (alreadyProcessedPaths == null) {
alreadyProcessedPaths = splitsToPaths(splits);
}
return castGeneric(new ContinuousFileSplitEnumerator(fileSplitContext, enumerator, splitAssigner, inputPaths, alreadyProcessedPaths, continuousEnumerationSettings.getDiscoveryInterval().toMillis()));
}} | 3.26 |
flink_AbstractFileSource_setFileEnumerator_rdh | /**
* Configures the {@link FileEnumerator} for the source. The File Enumerator is responsible
* for selecting from the input path the set of files that should be processed (and which to
* filter out). Furthermore, the File Enumerator may split the files further into
* sub-regions, to enable parallelization beyond the number of files.
*/
public SELF setFileEnumerator(FileEnumerator.Provider fileEnumerator) {
this.fileEnumerator = checkNotNull(fileEnumerator);
return self();
} | 3.26 |
flink_AbstractFileSource_processStaticFileSet_rdh | /**
* Sets this source to bounded (batch) mode.
*
* <p>In this mode, the source processes the files that are under the given paths when the
* application is started. Once all files are processed, the source will finish.
*
* <p>This setting is also the default behavior. This method is mainly here to "switch back"
* to bounded (batch) mode, or to make it explicit in the source construction.
*/
public SELF processStaticFileSet() {
this.continuousSourceSettings = null;
return self();
} | 3.26 |
flink_AbstractFileSource_setSplitAssigner_rdh | /**
* Configures the {@link FileSplitAssigner} for the source. The File Split Assigner
* determines which parallel reader instance gets which {@link FileSourceSplit}, and in
* which order these splits are assigned.
*/
public SELF setSplitAssigner(FileSplitAssigner.Provider splitAssigner) {
this.splitAssigner = checkNotNull(splitAssigner);
return self();
} | 3.26 |
flink_AbstractFileSource_getBoundedness_rdh | // ------------------------------------------------------------------------
// Source API Methods
// ------------------------------------------------------------------------
@Override
public Boundedness getBoundedness() {
return continuousEnumerationSettings == null ? Boundedness.BOUNDED : Boundedness.CONTINUOUS_UNBOUNDED;
} | 3.26 |
flink_AbstractFileSource_getAssignerFactory_rdh | // ------------------------------------------------------------------------
// Getters
// ------------------------------------------------------------------------
public Provider getAssignerFactory() {
return assignerFactory;
} | 3.26 |
flink_RemoteTierProducerAgent_releaseAllResources_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void releaseAllResources() {
cacheDataManager.release();
} | 3.26 |
flink_StreamNonDeterministicUpdatePlanVisitor_extractSourceMapping_rdh | /**
* Extracts the out from source field index mapping of the given projects.
*/
private Map<Integer, List<Integer>> extractSourceMapping(final List<RexNode> projects) {
Map<Integer, List<Integer>> mapOutFromInPos = new HashMap<>();
for (int index = 0; index < projects.size(); index++) {
RexNode expr = projects.get(index);
mapOutFromInPos.put(index, FlinkRexUtil.findAllInputRefs(expr).stream().mapToInt(RexSlot::getIndex).boxed().collect(Collectors.toList()));
}
return mapOutFromInPos;
} | 3.26 |
flink_StreamNonDeterministicUpdatePlanVisitor_inputInsertOnly_rdh | // helper methods
private boolean inputInsertOnly(final StreamPhysicalRel rel) {
return ChangelogPlanUtils.inputInsertOnly(rel);
} | 3.26 |
flink_DualInputOperator_setSecondInput_rdh | /**
* Clears all previous connections and connects the second input to the task wrapped in this
* contract
*
* @param input
* The contract that is connected as the second input.
*/
public void setSecondInput(Operator<IN2> input) {
this.input2 = input;
}
/**
* Sets the first input to the union of the given operators.
*
* @param inputs
* The operator(s) that form the first input.
* @deprecated This method will be removed in future versions. Use the {@link Union} | 3.26 |
flink_DualInputOperator_clearSecondInput_rdh | /**
* Clears this operator's second input.
*/
public void clearSecondInput() {
this.input2 = null;
} | 3.26 |
flink_DualInputOperator_accept_rdh | // --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<Operator<?>> visitor) {
boolean descend = visitor.preVisit(this);
if (descend) {
this.input1.accept(visitor);
this.input2.accept(visitor);
for (Operator<?> c : this.broadcastInputs.values()) {
c.accept(visitor);
}
visitor.postVisit(this);
}
} | 3.26 |
flink_DualInputOperator_clearFirstInput_rdh | /**
* Clears this operator's first input.
*/
public void clearFirstInput()
{
this.input1 = null;
} | 3.26 |
flink_DualInputOperator_setFirstInput_rdh | /**
* Clears all previous connections and connects the first input to the task wrapped in this
* contract
*
* @param input
* The contract that is connected as the first input.
*/
public void setFirstInput(Operator<IN1> input) {
this.input1 = input;
} | 3.26 |
flink_DualInputOperator_getNumberOfInputs_rdh | // --------------------------------------------------------------------------------------------
@Override
public final int getNumberOfInputs() {
return 2;
} | 3.26 |
flink_DualInputOperator_getSemanticProperties_rdh | // --------------------------------------------------------------------------------------------
public DualInputSemanticProperties getSemanticProperties() {
return this.semanticProperties;
} | 3.26 |
flink_DualInputOperator_getSecondInput_rdh | /**
* Returns the second input, or null, if none is set.
*
* @return The contract's second input.
*/
public Operator<IN2> getSecondInput() {
return this.input2;
} | 3.26 |
flink_LogicalFile_discardWithCheckpointId_rdh | /**
* When a checkpoint that uses this logical file is subsumed or aborted, discard this logical
* file. If this file is used by a later checkpoint, the file should not be discarded. Note that
* the removal of logical may cause the deletion of physical file.
*
* @param checkpointId
* the checkpoint that is notified subsumed or aborted.
* @throws IOException
* if anything goes wrong with file system.
*/
public void discardWithCheckpointId(long checkpointId) throws IOException {
if ((!discarded) && (checkpointId >= lastUsedCheckpointID)) {
physicalFile.decRefCount();
discarded = true;
}
} | 3.26 |
flink_LogicalFile_advanceLastCheckpointId_rdh | /**
* A logical file may share across checkpoints (especially for shared state). When this logical
* file is used/reused by a checkpoint, update the last checkpoint id that uses this logical
* file.
*
* @param checkpointId
* the checkpoint that uses this logical file.
*/
public void advanceLastCheckpointId(long checkpointId) {
if (checkpointId > lastUsedCheckpointID) {
this.lastUsedCheckpointID = checkpointId;
}
} | 3.26 |
flink_PrintingOutputFormat_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return writer.toString();} | 3.26 |
flink_CheckpointStorageLocationReference_getReferenceBytes_rdh | // ------------------------------------------------------------------------
/**
* Gets the reference bytes.
*
* <p><b>Important:</b> For efficiency, this method does not make a defensive copy, so the
* caller must not modify the bytes in the array.
*/
public byte[] getReferenceBytes() {
// return a non null object always
return encodedReference != null ? encodedReference :
new byte[0];} | 3.26 |
flink_CheckpointStorageLocationReference_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode() {
return encodedReference == null ? 2059243550 : Arrays.hashCode(encodedReference);
} | 3.26 |
flink_CheckpointStorageLocationReference_readResolve_rdh | /**
* readResolve() preserves the singleton property of the default value.
*/
protected final Object readResolve() throws ObjectStreamException {
return encodedReference == null ? DEFAULT : this;
} | 3.26 |
flink_CheckpointStorageLocationReference_isDefaultReference_rdh | /**
* Returns true, if this object is the default reference.
*/
public boolean isDefaultReference() {
return encodedReference == null;
} | 3.26 |
flink_FlinkImageBuilder_asTaskManager_rdh | /**
* Use this image for building a TaskManager.
*/
public FlinkImageBuilder asTaskManager() {
checkStartupCommandNotSet();
this.startupCommand = "bin/taskmanager.sh start-foreground && tail -f /dev/null";
this.imageNameSuffix = "taskmanager";
return this;
} | 3.26 |
flink_FlinkImageBuilder_m0_rdh | /**
* Sets log4j properties.
*
* <p>Containers will use "log4j-console.properties" under flink-dist as the base configuration
* of loggers. Properties specified by this method will be appended to the config file, or
* overwrite the property if already exists in the base config file.
*/
public FlinkImageBuilder m0(Properties logProperties) {this.f0.putAll(logProperties);
return this;
} | 3.26 |
flink_FlinkImageBuilder_copyFile_rdh | /**
* Copies file into the image.
*/
public FlinkImageBuilder copyFile(Path localPath, Path containerPath) {
filesToCopy.put(localPath, containerPath);
return this;
} | 3.26 |
flink_FlinkImageBuilder_setTimeout_rdh | /**
* Sets timeout for building the image.
*/
public FlinkImageBuilder setTimeout(Duration timeout) {
this.timeout = timeout;
return this;
} | 3.26 |
flink_FlinkImageBuilder_setImageNamePrefix_rdh | /**
* Sets the prefix name of building image.
*
* <p>If the name is not specified, {@link #DEFAULT_IMAGE_NAME_BUILD_PREFIX} will be used.
*/
public FlinkImageBuilder setImageNamePrefix(String imageNamePrefix) {
this.f1 = imageNamePrefix;
return
this;
} | 3.26 |
flink_FlinkImageBuilder_setConfiguration_rdh | /**
* Sets Flink configuration. This configuration will be used for generating flink-conf.yaml for
* configuring JobManager and TaskManager.
*/
public FlinkImageBuilder setConfiguration(Configuration conf) {
this.conf = conf;return this;
} | 3.26 |
flink_FlinkImageBuilder_useCustomStartupCommand_rdh | /**
* Use a custom command for starting up the container.
*/
public FlinkImageBuilder useCustomStartupCommand(String command) {
checkStartupCommandNotSet();
this.startupCommand = command;
this.imageNameSuffix = "custom";
return this;
} | 3.26 |
flink_FlinkImageBuilder_asJobManager_rdh | /**
* Use this image for building a JobManager.
*/
public FlinkImageBuilder asJobManager() {
checkStartupCommandNotSet();
this.startupCommand = "bin/jobmanager.sh start-foreground && tail -f /dev/null";
this.imageNameSuffix = "jobmanager";
return this;
} | 3.26 |
flink_FlinkImageBuilder_setTempDirectory_rdh | /**
* Sets temporary path for holding temp files when building the image.
*
* <p>Note that this parameter is required, because the builder doesn't have lifecycle
* management, and it is the caller's responsibility to create and remove the temp directory.
*/
public FlinkImageBuilder setTempDirectory(Path tempDirectory) {
this.tempDirectory = tempDirectory;
return this;
} | 3.26 |
flink_FlinkImageBuilder_build_rdh | /**
* Build the image.
*/
public ImageFromDockerfile build() throws ImageBuildException {
sanityCheck();
final String finalImageName = (f1 + "-") + imageNameSuffix;
try {
if (baseImage == null) {
baseImage = FLINK_BASE_IMAGE_BUILD_NAME;
if (flinkDist == null) {
flinkDist
= FileUtils.findFlinkDist();
}
// Build base image first
buildBaseImage(flinkDist);
}
final Path flinkConfFile = createTemporaryFlinkConfFile(conf, tempDirectory);
final Path log4jPropertiesFile = createTemporaryLog4jPropertiesFile(tempDirectory);
// Copy flink-conf.yaml into image
filesToCopy.put(flinkConfFile, Paths.get(flinkHome, "conf",
GlobalConfiguration.FLINK_CONF_FILENAME));
filesToCopy.put(log4jPropertiesFile, Paths.get(flinkHome, "conf", LOG4J_PROPERTIES_FILENAME));
final ImageFromDockerfile image = new ImageFromDockerfile(finalImageName).withDockerfileFromBuilder(builder -> {
// Build from base image
builder.from(baseImage);
// Copy files into image
filesToCopy.forEach((from, to)
-> builder.copy(to.toString(), to.toString()));
builder.cmd(startupCommand); });
filesToCopy.forEach((from, to) -> image.withFileFromPath(to.toString(), from));
return image;
} catch (Exception e) {
throw new ImageBuildException(finalImageName, e);
}
} | 3.26 |
flink_FlinkImageBuilder_setBaseImage_rdh | /**
* Sets base image.
*
* @param baseImage
* The base image.
* @return A reference to this Builder.
*/
public FlinkImageBuilder setBaseImage(String baseImage) {
this.baseImage = baseImage;
return this;
} | 3.26 |
flink_FlinkImageBuilder_setFlinkHome_rdh | /**
* Sets flink home.
*
* @param flinkHome
* The flink home.
* @return The flink home.
*/
public FlinkImageBuilder setFlinkHome(String flinkHome) {
this.flinkHome = flinkHome;
return this;
} | 3.26 |
flink_FlinkImageBuilder_buildBaseImage_rdh | // ----------------------- Helper functions -----------------------
private void buildBaseImage(Path flinkDist) throws TimeoutException {
if (baseImageExists()) {
return;
}
LOG.info("Building Flink base image with flink-dist at {}",
flinkDist);
new ImageFromDockerfile(FLINK_BASE_IMAGE_BUILD_NAME).withDockerfileFromBuilder(builder -> builder.from(("eclipse-temurin:" + getJavaVersionSuffix()) + "-jre-jammy").copy(flinkHome,
flinkHome).build()).withFileFromPath(flinkHome, flinkDist).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
} | 3.26 |
flink_Tuple0Serializer_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode() {
return Tuple0Serializer.class.hashCode();
} | 3.26 |
flink_Tuple0Serializer_duplicate_rdh | // ------------------------------------------------------------------------
@Override
public Tuple0Serializer duplicate() {
return this;
} | 3.26 |
flink_AsynchronousBlockReader_readBlock_rdh | /**
* Issues a read request, which will asynchronously fill the given segment with the next block
* in the underlying file channel. Once the read request is fulfilled, the segment will be added
* to this reader's return queue.
*
* @param segment
* The segment to read the block into.
* @throws IOException
* Thrown, when the reader encounters an I/O error. Due to the asynchronous
* nature of the reader, the exception thrown here may have been caused by an earlier read
* request.
*/
@Override
public void readBlock(MemorySegment segment) throws IOException {
addRequest(new SegmentReadRequest(this, segment));
} | 3.26 |
flink_AsynchronousBlockReader_getNextReturnedBlock_rdh | /**
* Gets the next memory segment that has been filled with data by the reader. This method blocks
* until such a segment is available, or until an error occurs in the reader, or the reader is
* closed.
*
* <p>WARNING: If this method is invoked without any segment ever returning (for example,
* because the {@link #readBlock(MemorySegment)} method has not been invoked appropriately), the
* method may block forever.
*
* @return The next memory segment from the reader's return queue.
* @throws IOException
* Thrown, if an I/O error occurs in the reader while waiting for the
* request to return.
*/
@Override
public MemorySegment getNextReturnedBlock() throws IOException {
try {
while (true) {
final MemorySegment next = this.returnSegments.poll(1000, TimeUnit.MILLISECONDS);
if (next != null) {
return next;
} else {
if (this.closed) {
throw new IOException("The reader has been asynchronously closed.");
}
checkErroneous();
}
}
} catch (InterruptedException iex) {
throw new IOException("Reader was interrupted while waiting for the next returning segment.");
}
} | 3.26 |
flink_AsynchronousBlockReader_getReturnQueue_rdh | /**
* Gets the queue in which the full memory segments are queued after the asynchronous read is
* complete.
*
* @return The queue with the full memory segments.
*/
@Override
public LinkedBlockingQueue<MemorySegment>
getReturnQueue() {
return this.returnSegments;
} | 3.26 |
flink_MemoryManager_m1_rdh | /**
* Reserves a memory chunk of a certain size for an owner from this memory manager.
*
* @param owner
* The owner to associate with the memory reservation, for the fallback release.
* @param size
* size of memory to reserve.
* @throws MemoryReservationException
* Thrown, if this memory manager does not have the requested
* amount of memory any more.
*/
public void m1(Object owner, long size) throws MemoryReservationException {
checkMemoryReservationPreconditions(owner, size);
if (size == 0L) {
return;
}
memoryBudget.reserveMemory(size);
reservedMemory.compute(owner, (o, memoryReservedForOwner) -> memoryReservedForOwner == null ? size : memoryReservedForOwner + size);
Preconditions.checkState(!isShutDown, "Memory manager has been concurrently shut down."); } | 3.26 |
flink_MemoryManager_shutdown_rdh | // ------------------------------------------------------------------------
// Shutdown
// ------------------------------------------------------------------------
/**
* Shuts the memory manager down, trying to release all the memory it managed. Depending on
* implementation details, the memory does not necessarily become reclaimable by the garbage
* collector, because there might still be references to allocated segments in the code that
* allocated them from the memory manager.
*/
public void shutdown() {
if (!isShutDown) {
// mark as shutdown and release memory
isShutDown = true;
reservedMemory.clear();
// go over all allocated segments and release them
for (Set<MemorySegment> segments : allocatedSegments.values()) {
for
(MemorySegment seg : segments) {
seg.free();
}
segments.clear();
}
allocatedSegments.clear();
}
} | 3.26 |
flink_MemoryManager_getPageSize_rdh | // ------------------------------------------------------------------------
// Properties, sizes and size conversions
// ------------------------------------------------------------------------
/**
* Gets the size of the pages handled by the memory manager.
*
* @return The size of the pages handled by the memory manager.
*/public int getPageSize() {return ((int) (pageSize));
} | 3.26 |
flink_MemoryManager_getSharedMemoryResourceForManagedMemory_rdh | // ------------------------------------------------------------------------
// Shared opaque memory resources
// ------------------------------------------------------------------------
/**
* Acquires a shared memory resource, identified by a type string. If the resource already
* exists, this returns a descriptor to the resource. If the resource does not yet exist, the
* given memory fraction is reserved and the resource is initialized with that size.
*
* <p>The memory for the resource is reserved from the memory budget of this memory manager
* (thus determining the size of the resource), but resource itself is opaque, meaning the
* memory manager does not understand its structure.
*
* <p>The OpaqueMemoryResource object returned from this method must be closed once not used any
* further. Once all acquisitions have closed the object, the resource itself is closed.
*
* <p><b>Important:</b> The failure semantics are as follows: If the memory manager fails to
* reserve the memory, the external resource initializer will not be called. If an exception is
* thrown when the opaque resource is closed (last lease is released), the memory manager will
* still un-reserve the memory to make sure its own accounting is clean. The exception will need
* to be handled by the caller of {@link OpaqueMemoryResource#close()}. For example, if this
* indicates that native memory was not released and the process might thus have a memory leak,
* the caller can decide to kill the process as a result.
*/
public <T extends AutoCloseable> OpaqueMemoryResource<T>
getSharedMemoryResourceForManagedMemory(String type, LongFunctionWithException<T, Exception> initializer, double fractionToInitializeWith) throws Exception
{
// if we need to allocate the resource (no shared resource allocated, yet), this would be
// the size to use
final long numBytes = computeMemorySize(fractionToInitializeWith);
// initializer and releaser as functions that are pushed into the SharedResources,
// so that the SharedResources can decide in (thread-safely execute) when initialization
// and release should happen
final LongFunctionWithException<T, Exception> reserveAndInitialize = size -> {
try {
reserveMemory(type, size);
} catch (MemoryReservationException e) {
throw
new <e>MemoryAllocationException(("Could not created the shared memory resource of size " + size) + ". Not enough memory left to reserve from the slot's managed memory.");
}
try {return initializer.apply(size);
} catch (Throwable t) {
releaseMemory(type, size);
throw t;
}
};
final LongConsumer releaser = size -> releaseMemory(type, size);
// This object identifies the lease in this request. It is used only to identify the release
// operation.
// Using the object to represent the lease is a bit nicer safer than just using a reference
// counter.
final Object leaseHolder =
new Object();
final SharedResources.ResourceAndSize<T> resource = sharedResources.getOrAllocateSharedResource(type, leaseHolder, reserveAndInitialize, numBytes);
// the actual size may theoretically be different from what we requested, if allocated it
// was by
// someone else before with a different value for fraction (should not happen in practice,
// though).
final long size = resource.size();final ThrowingRunnable<Exception> disposer = () -> sharedResources.release(type, leaseHolder, releaser);
return new OpaqueMemoryResource<>(resource.resourceHandle(), size, disposer);
} | 3.26 |
flink_MemoryManager_allocatePages_rdh | /**
* Allocates a set of memory segments from this memory manager.
*
* <p>The total allocated memory will not exceed its size limit, announced in the constructor.
*
* @param owner
* The owner to associate with the memory segment, for the fallback release.
* @param target
* The list into which to put the allocated memory pages.
* @param numberOfPages
* The number of pages to allocate.
* @throws MemoryAllocationException
* Thrown, if this memory manager does not have the requested
* amount of memory pages any more.
*/
public void allocatePages(Object owner, Collection<MemorySegment> target, int numberOfPages) throws MemoryAllocationException {
// sanity check
Preconditions.checkNotNull(owner, "The memory owner must not be null.");
Preconditions.checkState(!isShutDown, "Memory manager has been shut down.");
Preconditions.checkArgument(numberOfPages <= totalNumberOfPages, "Cannot allocate more segments %s than the max number %s", numberOfPages, totalNumberOfPages);
// reserve array space, if applicable
if (target instanceof ArrayList) {
((ArrayList<MemorySegment>) (target)).ensureCapacity(numberOfPages);
}
long memoryToReserve = numberOfPages * pageSize;
try
{
memoryBudget.reserveMemory(memoryToReserve);
} catch (MemoryReservationException e) {
throw new MemoryAllocationException(String.format("Could not allocate %d pages", numberOfPages), e);}
Runnable pageCleanup = this::releasePage;
allocatedSegments.compute(owner, (o, currentSegmentsForOwner) -> {
Set<MemorySegment> segmentsForOwner = (currentSegmentsForOwner == null) ? CollectionUtil.newHashSetWithExpectedSize(numberOfPages) : currentSegmentsForOwner;
for (long i = numberOfPages; i > 0; i--) {
MemorySegment segment = allocateOffHeapUnsafeMemory(getPageSize(), owner, pageCleanup);
target.add(segment);
segmentsForOwner.add(segment);
}
return segmentsForOwner;
});
Preconditions.checkState(!isShutDown, "Memory manager has been concurrently shut down.");
} | 3.26 |
flink_MemoryManager_m2_rdh | /**
* Acquires a shared resource, identified by a type string. If the resource already exists, this
* returns a descriptor to the resource. If the resource does not yet exist, the method
* initializes a new resource using the initializer function and given size.
*
* <p>The resource opaque, meaning the memory manager does not understand its structure.
*
* <p>The OpaqueMemoryResource object returned from this method must be closed once not used any
* further. Once all acquisitions have closed the object, the resource itself is closed.
*/public <T extends AutoCloseable> OpaqueMemoryResource<T> m2(String type, LongFunctionWithException<T, Exception> initializer, long numBytes) throws Exception {
// This object identifies the lease in this request. It is used only to identify the release
// operation.
// Using the object to represent the lease is a bit nicer safer than just using a reference
// counter.
final Object leaseHolder = new Object();
final SharedResources.ResourceAndSize<T> resource = sharedResources.getOrAllocateSharedResource(type, leaseHolder, initializer, numBytes);final ThrowingRunnable<Exception> disposer = () -> sharedResources.release(type, leaseHolder);return new OpaqueMemoryResource<>(resource.resourceHandle(), resource.size(), disposer);
} | 3.26 |
flink_MemoryManager_availableMemory_rdh | /**
* Returns the available amount of memory handled by this memory manager.
*
* @return The available amount of memory.
*/
public long availableMemory() {
return memoryBudget.getAvailableMemorySize();
} | 3.26 |
flink_MemoryManager_getMemorySize_rdh | /**
* Returns the total size of memory handled by this memory manager.
*
* @return The total size of memory.
*/
public long getMemorySize() {
return memoryBudget.getTotalMemorySize();
} | 3.26 |
flink_MemoryManager_create_rdh | /**
* Creates a memory manager with the given capacity and given page size.
*
* <p>This is a production version of MemoryManager which checks for memory leaks ({@link #verifyEmpty()}) once the owner of the MemoryManager is ready to dispose.
*
* @param memorySize
* The total size of the off-heap memory to be managed by this memory manager.
* @param pageSize
* The size of the pages handed out by the memory manager.
*/
public static MemoryManager create(long memorySize, int pageSize) {
return new MemoryManager(memorySize, pageSize);
} | 3.26 |
flink_MemoryManager_releaseAllMemory_rdh | /**
* Releases all reserved memory chunks from an owner to this memory manager.
*
* @param owner
* The owner to associate with the memory reservation, for the fallback release.
*/
public void releaseAllMemory(Object owner) {
checkMemoryReservationPreconditions(owner, 0L);
Long memoryReservedForOwner = reservedMemory.remove(owner);
if (memoryReservedForOwner != null) {
memoryBudget.releaseMemory(memoryReservedForOwner);
}
} | 3.26 |
flink_MemoryManager_release_rdh | /**
* Tries to release many memory segments together.
*
* <p>The segment is only freed and made eligible for reclamation by the GC. Each segment will
* be returned to the memory pool, increasing its available limit for the later allocations.
*
* @param segments
* The segments to be released.
*/
public void release(Collection<MemorySegment> segments) {
if (segments == null) {
return;
}
Preconditions.checkState(!isShutDown, "Memory manager has been shut down.");
// since concurrent modifications to the collection
// can disturb the release, we need to try potentially multiple times
boolean successfullyReleased = false;
do {
// We could just pre-sort the segments by owner and release them in a loop by owner.
// It would simplify the code but require this additional step and memory for the sorted
// map of segments by owner.
// Current approach is more complicated but it traverses the input segments only once
// w/o any additional buffer.
// Later, we can check whether the simpler approach actually leads to any performance
// penalty and
// if not, we can change it to the simpler approach for the better readability.
Iterator<MemorySegment> segmentsIterator = segments.iterator();
try {
MemorySegment segment
= null;
while ((segment == null) && segmentsIterator.hasNext()) {
segment = segmentsIterator.next();}
while (segment != null) {
segment = releaseSegmentsForOwnerUntilNextOwner(segment, segmentsIterator);
}
segments.clear();
// the only way to exit the loop
successfullyReleased = true;
} catch (ConcurrentModificationException | NoSuchElementException e) {
// this may happen in the case where an asynchronous
// call releases the memory. fall through the loop and try again
}
} while (!successfullyReleased );
} | 3.26 |
flink_MemoryManager_computeMemorySize_rdh | /**
* Computes the memory size corresponding to the fraction of all memory governed by this
* MemoryManager.
*
* @param fraction
* The fraction of all memory governed by this MemoryManager
* @return The memory size corresponding to the memory fraction
*/
public long computeMemorySize(double fraction) {
validateFraction(fraction);
return ((long) (Math.floor(memoryBudget.getTotalMemorySize() * fraction)));
} | 3.26 |
flink_MemoryManager_releaseAll_rdh | /**
* Releases all memory segments for the given owner.
*
* @param owner
* The owner memory segments are to be released.
*/
public void releaseAll(Object owner) {
if (owner == null) {return;
}
Preconditions.checkState(!isShutDown, "Memory manager has been shut down.");
// get all segments
Set<MemorySegment> segments = allocatedSegments.remove(owner);
// all segments may have been freed previously individually
if ((segments == null) || segments.isEmpty()) {
return;
}
// free each segment
for (MemorySegment segment : segments) {
segment.free();
}
segments.clear();
} | 3.26 |
flink_MemoryManager_computeNumberOfPages_rdh | /**
* Computes to how many pages the given number of bytes corresponds. If the given number of
* bytes is not an exact multiple of a page size, the result is rounded down, such that a
* portion of the memory (smaller than the page size) is not included.
*
* @param fraction
* the fraction of the total memory per slot
* @return The number of pages to which
*/
public int computeNumberOfPages(double fraction) {
validateFraction(fraction);
return ((int) (totalNumberOfPages * fraction));
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_m0_rdh | /**
* Repartition SPLIT_DISTRIBUTE state.
*/
private void m0(Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState, int newParallelism, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) {
int startParallelOp
= 0;
// Iterate all named states and repartition one named state at a time per iteration
for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : nameToDistributeState.entrySet()) {
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>> current = e.getValue();
// Determine actual number of partitions for this named state
int totalPartitions = 0;
for (Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> offsets : current) {
totalPartitions += offsets.f1.getOffsets().length;
}
// Repartition the state across the parallel operator instances
int lstIdx = 0;
int offsetIdx = 0;
int baseFraction = totalPartitions / newParallelism;
int v38 =
totalPartitions % newParallelism;
int newStartParallelOp = startParallelOp;
for (int i = 0; i < newParallelism; ++i) {
// Preparation: calculate the actual index considering wrap around
int parallelOpIdx = (i + startParallelOp) % newParallelism;
// Now calculate the number of partitions we will assign to the parallel instance in
// this round ...
int numberOfPartitionsToAssign = baseFraction;
// ... and distribute odd partitions while we still have some, one at a time
if (v38 > 0) {
++numberOfPartitionsToAssign;
--v38;
} else if (v38 == 0) {
// We are out of odd partitions now and begin our next redistribution round with
// the current
// parallel operator to ensure fair load balance
newStartParallelOp = parallelOpIdx;
--v38;
}
// Now start collection the partitions for the parallel instance into this list
while (numberOfPartitionsToAssign > 0) {
Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithOffsets = current.get(lstIdx); long[] offsets = handleWithOffsets.f1.getOffsets();
int v45 = offsets.length - offsetIdx;
// Repartition offsets
long[] offs;
if (v45 > numberOfPartitionsToAssign) {
offs = Arrays.copyOfRange(offsets, offsetIdx, offsetIdx + numberOfPartitionsToAssign);
offsetIdx
+= numberOfPartitionsToAssign;
} else {if (OPTIMIZE_MEMORY_USE) {
handleWithOffsets.f1 = null;// GC
}
offs = Arrays.copyOfRange(offsets, offsetIdx, offsets.length);
offsetIdx = 0;
++lstIdx;
}
numberOfPartitionsToAssign -= v45;
// As a last step we merge partitions that use the same StreamStateHandle in a
// single
// OperatorStateHandle
Map<StreamStateHandle, OperatorStateHandle> mergeMap = mergeMapList.get(parallelOpIdx);
OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithOffsets.f0);
if (operatorStateHandle == null) {
operatorStateHandle = new OperatorStreamStateHandle(CollectionUtil.newHashMapWithExpectedSize(nameToDistributeState.size()), handleWithOffsets.f0);
mergeMap.put(handleWithOffsets.f0, operatorStateHandle);
}
operatorStateHandle.getStateNameToPartitionOffsets().put(e.getKey(), new OperatorStateHandle.StateMetaInfo(offs, Mode.SPLIT_DISTRIBUTE));
}
}
startParallelOp = newStartParallelOp;
e.setValue(null);
}
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_collectStates_rdh | /**
* Collect the states from given parallelSubtaskStates with the specific {@code mode}.
*/
private Map<String, StateEntry> collectStates(List<List<OperatorStateHandle>> parallelSubtaskStates, OperatorStateHandle.Mode mode) {
Map<String, StateEntry> states = CollectionUtil.newHashMapWithExpectedSize(parallelSubtaskStates.size());
for (int i = 0; i < parallelSubtaskStates.size(); ++i) {
final int subtaskIndex = i;
List<OperatorStateHandle> subTaskState = parallelSubtaskStates.get(i);
for (OperatorStateHandle operatorStateHandle : subTaskState) {
if (operatorStateHandle == null) {
continue;
}
final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>> partitionOffsetEntries = operatorStateHandle.getStateNameToPartitionOffsets().entrySet();
partitionOffsetEntries.stream().filter(entry -> entry.getValue().getDistributionMode().equals(mode)).forEach(entry -> {
StateEntry stateEntry = states.computeIfAbsent(entry.getKey(), k -> new StateEntry(parallelSubtaskStates.size() * partitionOffsetEntries.size(), parallelSubtaskStates.size()));
stateEntry.addEntry(subtaskIndex, Tuple2.of(operatorStateHandle.getDelegateStateHandle(), entry.getValue()));
});
}
}
return states;
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_repartitionUnionState_rdh | /**
* Repartition UNION state.
*/
private void repartitionUnionState(Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> unionState, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) {
for (Map<StreamStateHandle, OperatorStateHandle> mergeMap : mergeMapList) {
for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : unionState.entrySet()) {
for (Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithMetaInfo : e.getValue()) {
OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithMetaInfo.f0);
if (operatorStateHandle == null) {
operatorStateHandle = new OperatorStreamStateHandle(CollectionUtil.newHashMapWithExpectedSize(unionState.size()), handleWithMetaInfo.f0);
mergeMap.put(handleWithMetaInfo.f0, operatorStateHandle);
}
operatorStateHandle.getStateNameToPartitionOffsets().put(e.getKey(), handleWithMetaInfo.f1);
}
}
}
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_repartitionBroadcastState_rdh | /**
* Repartition BROADCAST state.
*/
private void repartitionBroadcastState(Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> broadcastState, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) {int newParallelism = mergeMapList.size();
for (int i = 0; i < newParallelism; ++i) {
final Map<StreamStateHandle, OperatorStateHandle> mergeMap = mergeMapList.get(i);
// for each name, pick the i-th entry
for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : broadcastState.entrySet()) {
int previousParallelism = e.getValue().size();
Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithMetaInfo = e.getValue().get(i % previousParallelism);
OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithMetaInfo.f0);
if (operatorStateHandle == null) {
operatorStateHandle = new OperatorStreamStateHandle(CollectionUtil.newHashMapWithExpectedSize(broadcastState.size()), handleWithMetaInfo.f0);
mergeMap.put(handleWithMetaInfo.f0, operatorStateHandle);
}
operatorStateHandle.getStateNameToPartitionOffsets().put(e.getKey(), handleWithMetaInfo.f1);}
}
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_groupByStateMode_rdh | /**
* Group by the different named states.
*/@SuppressWarnings("unchecked, rawtype")
private GroupByStateNameResults groupByStateMode(List<List<OperatorStateHandle>> previousParallelSubtaskStates) {
// Reorganize: group by (State Name -> StreamStateHandle + StateMetaInfo)
EnumMap<OperatorStateHandle.Mode,
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>> nameToStateByMode = new EnumMap<>(Mode.class);
for (OperatorStateHandle.Mode mode : OperatorStateHandle.Mode.values()) {
nameToStateByMode.put(mode, new HashMap<>());
}
for (List<OperatorStateHandle> previousParallelSubtaskState : previousParallelSubtaskStates) {for (OperatorStateHandle operatorStateHandle : previousParallelSubtaskState) {
if (operatorStateHandle == null) {
continue;
}
final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>> partitionOffsetEntries = operatorStateHandle.getStateNameToPartitionOffsets().entrySet();
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> e : partitionOffsetEntries) {
OperatorStateHandle.StateMetaInfo metaInfo = e.getValue();
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToState = nameToStateByMode.get(metaInfo.getDistributionMode());
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>> stateLocations = nameToState.computeIfAbsent(e.getKey(), k -> new ArrayList<>(previousParallelSubtaskStates.size() * partitionOffsetEntries.size()));
stateLocations.add(Tuple2.of(operatorStateHandle.getDelegateStateHandle(), e.getValue()));
}
}
}
return new GroupByStateNameResults(nameToStateByMode);
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_repartition_rdh | /**
* Repartition all named states.
*/
private List<Map<StreamStateHandle,
OperatorStateHandle>> repartition(GroupByStateNameResults nameToStateByMode, int
newParallelism) {
// We will use this to merge w.r.t. StreamStateHandles for each parallel subtask inside the
// maps
List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(newParallelism);
// Initialize
for (int i = 0; i < newParallelism; ++i)
{
mergeMapList.add(new HashMap<>());
}
// Start with the state handles we distribute round robin by splitting by offsets
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState = nameToStateByMode.getByMode(Mode.SPLIT_DISTRIBUTE);
m0(nameToDistributeState, newParallelism, mergeMapList);
// Now we also add the state handles marked for union to all parallel instances
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToUnionState = nameToStateByMode.getByMode(Mode.UNION);
repartitionUnionState(nameToUnionState, mergeMapList);
// Now we also add the state handles marked for uniform broadcast to all parallel instances
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToBroadcastState = nameToStateByMode.getByMode(Mode.BROADCAST);
repartitionBroadcastState(nameToBroadcastState, mergeMapList);
return mergeMapList;
} | 3.26 |
flink_RoundRobinOperatorStateRepartitioner_initMergeMapList_rdh | /**
* Init the list of StreamStateHandle -> OperatorStateHandle map with given
* parallelSubtaskStates when parallelism not changed.
*/
private List<Map<StreamStateHandle, OperatorStateHandle>> initMergeMapList(List<List<OperatorStateHandle>> parallelSubtaskStates) {
int parallelism = parallelSubtaskStates.size();
final List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(parallelism);for (List<OperatorStateHandle> previousParallelSubtaskState : parallelSubtaskStates) {
mergeMapList.add(previousParallelSubtaskState.stream().collect(Collectors.toMap(OperatorStateHandle::getDelegateStateHandle, Function.identity())));
}
return mergeMapList;
} | 3.26 |
flink_MergeTableLikeUtil_mergePartitions_rdh | /**
* Merges the partitions part of {@code CREATE TABLE} statement.
*
* <p>Partitioning is a single property of a Table, thus there can be at most a single instance
* of partitioning. Therefore it is not possible to use {@link MergingStrategy#INCLUDING} with
* partitioning defined in both source and derived table.
*/
public List<String> mergePartitions(MergingStrategy mergingStrategy, List<String> sourcePartitions, List<String> derivedPartitions) {
if (((!derivedPartitions.isEmpty()) &&
(!sourcePartitions.isEmpty())) && (mergingStrategy != MergingStrategy.EXCLUDING)) {
throw new ValidationException("The base table already has partitions defined. You might want to specify " + "EXCLUDING PARTITIONS.");
}
if (!derivedPartitions.isEmpty()) {
return derivedPartitions;}
return sourcePartitions;
}
/**
* Merges the options part of {@code CREATE TABLE} | 3.26 |
flink_MergeTableLikeUtil_mergeTables_rdh | /**
* Merges the schema part of {@code CREATE TABLE} statement. It merges
*
* <ul>
* <li>columns
* <li>computed columns
* <li>watermarks
* <li>primary key
* </ul>
*
* <p>Additionally it performs validation of the features of the derived table. This is not done
* in the {@link SqlCreateTable#validate()} anymore because the validation should be done on top
* of the merged properties. E.g. Some of the columns used in computed columns of the derived
* table can be defined in the source table.
*/
public Schema mergeTables(Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder = new SchemaBuilder(mergingStrategies, sourceSchema, ((FlinkTypeFactory) (f0.getTypeFactory())), dataTypeFactory, f0, escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.m0(derivedPrimaryKey);
return schemaBuilder.build();
} | 3.26 |
flink_MergeTableLikeUtil_computeMergingStrategies_rdh | /**
* Calculates merging strategies for all options. It applies options given by a user to the
* {@link #defaultMergingStrategies}. The {@link MergingStrategy} specified for {@link FeatureOption#ALL} overwrites all the default options. Those can be further changed with a
* specific {@link FeatureOption}.
*/
public Map<FeatureOption, MergingStrategy> computeMergingStrategies(List<SqlTableLike.SqlTableLikeOption> mergingOptions) {
Map<FeatureOption, MergingStrategy> result = new HashMap<>(defaultMergingStrategies);
Optional<SqlTableLike.SqlTableLikeOption> maybeAllOption = mergingOptions.stream().filter(option -> option.getFeatureOption() == FeatureOption.ALL).findFirst();
maybeAllOption.ifPresent(allOption ->
{
MergingStrategy strategy = allOption.getMergingStrategy();
for (FeatureOption featureOption : FeatureOption.values()) {
if (featureOption != FeatureOption.ALL) {
result.put(featureOption, strategy);
}
}
});
for (SqlTableLike.SqlTableLikeOption mergingOption : mergingOptions) {
result.put(mergingOption.getFeatureOption(), mergingOption.getMergingStrategy());
}
return result;
} | 3.26 |
flink_AbstractS3FileSystemFactory_configure_rdh | // ------------------------------------------------------------------------
@Override
public void configure(Configuration config) {
flinkConfig = config;
f0.setFlinkConfig(config);
} | 3.26 |
flink_DeltaTrigger_of_rdh | /**
* Creates a delta trigger from the given threshold and {@code DeltaFunction}.
*
* @param threshold
* The threshold at which to trigger.
* @param deltaFunction
* The delta function to use
* @param stateSerializer
* TypeSerializer for the data elements.
* @param <T>
* The type of elements on which this trigger can operate.
* @param <W>
* The type of {@link Window Windows} on which this trigger can operate.
*/
public static <T, W extends Window> DeltaTrigger<T, W> of(double threshold, DeltaFunction<T> deltaFunction, TypeSerializer<T> stateSerializer) {
return
new DeltaTrigger<>(threshold, deltaFunction, stateSerializer);
} | 3.26 |
flink_PendingSplitsCheckpointSerializer_getVersion_rdh | // ------------------------------------------------------------------------
@Override
public int getVersion() {
return VERSION;
} | 3.26 |
flink_ConnectionLimitingFactory_getClassLoader_rdh | // ------------------------------------------------------------------------
@Override
public ClassLoader getClassLoader() {
return factory.getClassLoader();
} | 3.26 |
flink_TestingReaderContext_getNumSplitRequests_rdh | // ------------------------------------------------------------------------
public int getNumSplitRequests() {
return
numSplitRequests;
} | 3.26 |
flink_TestingReaderContext_metricGroup_rdh | // ------------------------------------------------------------------------
@Override
public SourceReaderMetricGroup metricGroup() {
return metrics;
} | 3.26 |
flink_CollectCoordinationResponse_getResults_rdh | // TODO the following two methods might be not so efficient
// optimize them with MemorySegment if needed
public <T> List<T> getResults(TypeSerializer<T> elementSerializer) throws IOException {
List<T> results = new ArrayList<>();
for (byte[] serializedResult : serializedResults) {
ByteArrayInputStream bais = new ByteArrayInputStream(serializedResult);
DataInputViewStreamWrapper wrapper = new DataInputViewStreamWrapper(bais);
results.add(elementSerializer.deserialize(wrapper));
}
return results;
} | 3.26 |
flink_DefaultContext_m0_rdh | // -------------------------------------------------------------------------------------------
/**
* Build the {@link DefaultContext} from flink-conf.yaml, dynamic configuration and users
* specified jars.
*
* @param dynamicConfig
* user specified configuration.
* @param dependencies
* user specified jars
* @param discoverExecutionConfig
* flag whether to load the execution configuration
*/
public static DefaultContext m0(Configuration dynamicConfig, List<URL> dependencies, boolean discoverExecutionConfig) {
// 1. find the configuration directory
String flinkConfigDir = CliFrontend.getConfigurationDirectoryFromEnv();
// 2. load the global configuration
Configuration configuration = GlobalConfiguration.loadConfiguration(flinkConfigDir);
configuration.addAll(dynamicConfig);
// 3. load the custom command lines
List<CustomCommandLine> commandLines = CliFrontend.loadCustomCommandLines(configuration, flinkConfigDir);
// initialize default file system
FileSystem.initialize(configuration, PluginUtils.createPluginManagerFromRootFolder(configuration));
if (discoverExecutionConfig) {
Options commandLineOptions = collectCommandLineOptions(commandLines);
try {
CommandLine deploymentCommandLine = CliFrontendParser.parse(commandLineOptions, new String[]{ }, true);
configuration.addAll(createExecutionConfig(deploymentCommandLine, commandLineOptions, commandLines, dependencies));
} catch (Exception e) {
throw
new SqlGatewayException("Could not load available CLI with Environment Deployment entry.", e);
}
}
return new DefaultContext(configuration, dependencies);
} | 3.26 |
flink_ChannelStateWriteRequest_getReadyFuture_rdh | /**
* It means whether the request is ready, e.g: some requests write the channel state data
* future, the data future may be not ready.
*
* <p>The ready future is used for {@link ChannelStateWriteRequestExecutorImpl}, executor will
* process ready requests first to avoid deadlock.
*/
public CompletableFuture<?> getReadyFuture() {
return AvailabilityProvider.AVAILABLE;
} | 3.26 |
flink_PlanNode_isPruneMarkerSet_rdh | /**
* Checks whether the pruning marker was set.
*
* @return True, if the pruning marker was set, false otherwise.
*/
public boolean isPruneMarkerSet() {
return this.pFlag;
} | 3.26 |
flink_PlanNode_setRelativeMemoryPerSubtask_rdh | /**
* Sets the memory dedicated to each task for this node.
*
* @param relativeMemoryPerSubtask
* The relative memory per sub-task
*/
public void setRelativeMemoryPerSubtask(double relativeMemoryPerSubtask) {
this.relativeMemoryPerSubTask = relativeMemoryPerSubtask;
} | 3.26 |
flink_PlanNode_updatePropertiesWithUniqueSets_rdh | // --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
public void updatePropertiesWithUniqueSets(Set<FieldSet> uniqueFieldCombinations) {
if ((uniqueFieldCombinations ==
null) || uniqueFieldCombinations.isEmpty()) {
return;
}
for (FieldSet fields : uniqueFieldCombinations) {
this.globalProps.addUniqueFieldCombination(fields);
this.localProps = this.localProps.addUniqueFields(fields);
}
} | 3.26 |
flink_PlanNode_m1_rdh | /**
* Gets a list of all outgoing channels leading to successors.
*
* @return A list of all channels leading to successors.
*/
public List<Channel> m1() {
return this.outChannels;
} | 3.26 |
flink_PlanNode_getOriginalOptimizerNode_rdh | // --------------------------------------------------------------------------------------------
// Accessors
// --------------------------------------------------------------------------------------------
/**
* Gets the node from the optimizer DAG for which this plan candidate node was created.
*
* @return The optimizer's DAG node.
*/
public OptimizerNode getOriginalOptimizerNode() {
return this.template;
} | 3.26 |
flink_PlanNode_getCumulativeCosts_rdh | /**
* Gets the cumulative costs of this nose. The cumulative costs are the sum of the costs of this
* node and of all nodes in the subtree below this node.
*
* @return The cumulative costs, or null, if not yet set.
*/
public Costs getCumulativeCosts() {return this.cumulativeCosts;
} | 3.26 |
flink_PlanNode_setPruningMarker_rdh | /**
* Sets the pruning marker to true.
*/public void setPruningMarker() {
this.pFlag = true;
} | 3.26 |
flink_PlanNode_setBroadcastInputs_rdh | /**
* Sets a list of all broadcast inputs attached to this node.
*/
public void setBroadcastInputs(List<NamedChannel> broadcastInputs) {
if (broadcastInputs != null) {
this.broadcastInputs = broadcastInputs;
// update the branch map
for (NamedChannel nc : broadcastInputs) {
PlanNode source = nc.getSource();
m0(branchPlan, source.branchPlan);
}
}
// do a sanity check that if we are branching, we have now candidates for each branch point
if (this.template.hasUnclosedBranches()) {
if (this.branchPlan == null) {
throw new CompilerException("Branching and rejoining logic did not find a candidate for the branching point.");
}
for (UnclosedBranchDescriptor v11 : this.template.getOpenBranches())
{
OptimizerNode brancher = v11.getBranchingNode();
if (this.branchPlan.get(brancher) == null) {
throw new CompilerException("Branching and rejoining logic did not find a candidate for the branching point.");
}
}
}
} | 3.26 |
flink_PlanNode_setDriverStrategy_rdh | /**
* Sets the driver strategy for this node. Usually should not be changed.
*
* @param newDriverStrategy
* The driver strategy.
*/
public void setDriverStrategy(DriverStrategy newDriverStrategy) {
this.driverStrategy = newDriverStrategy;
} | 3.26 |
flink_PlanNode_getNodeName_rdh | /**
* Gets the name of the plan node.
*
* @return The name of the plan node.
*/public String getNodeName() {
return this.nodeName;
} | 3.26 |
flink_PlanNode_getOptimizerNode_rdh | // --------------------------------------------------------------------------------------------
@Override
public OptimizerNode getOptimizerNode() {
return this.template;
} | 3.26 |
flink_PlanNode_getBroadcastInputs_rdh | /**
* Gets a list of all broadcast inputs attached to this node.
*/
public List<NamedChannel> getBroadcastInputs() {
return this.broadcastInputs;
} | 3.26 |
flink_PlanNode_setCosts_rdh | /**
* Sets the basic cost for this node to the given value, and sets the cumulative costs to those
* costs plus the cost shares of all inputs (regular and broadcast).
*
* @param nodeCosts
* The already knows costs for this node (this cost a produces by a concrete
* {@code OptimizerNode} subclass.
*/public void setCosts(Costs nodeCosts) {
// set the node costs
this.nodeCosts = nodeCosts;
// the cumulative costs are the node costs plus the costs of all inputs
this.cumulativeCosts = nodeCosts.clone();
// add all the normal inputs
for (PlanNode pred :
getPredecessors()) {
Costs parentCosts = pred.getCumulativeCostsShare();
if (parentCosts != null) {
this.cumulativeCosts.addCosts(parentCosts);
} else {
throw new CompilerException("Trying to set the costs of an operator before the predecessor costs are computed.");
}
}
// add all broadcast variable inputs
if
(this.broadcastInputs != null) {
for (NamedChannel nc : this.broadcastInputs) {
Costs bcInputCost = nc.getSource().getCumulativeCostsShare();
if (bcInputCost != null)
{
this.cumulativeCosts.addCosts(bcInputCost);
} else {
throw new CompilerException("Trying to set the costs of an operator before the broadcast input costs are computed.");
}
}
}
} | 3.26 |
flink_PlanNode_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return ((((((((this.template.getOperatorName() + " \"") + getProgramOperator().getName()) + "\" : ") + this.driverStrategy) + " [[ ") + this.globalProps) + " ]] [[ ") +
this.localProps) + " ]]";
} | 3.26 |
flink_PlanNode_addOutgoingChannel_rdh | /**
* Adds a channel to a successor node to this node.
*
* @param channel
* The channel to the successor.
*/
public void addOutgoingChannel(Channel channel) {
this.outChannels.add(channel);
} | 3.26 |
flink_SqlTimeParser_parseField_rdh | /**
* Static utility to parse a field of type Time from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final Time parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if ((limitedLen > 0) && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[(startPos + limitedLen) - 1]))) {
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);return Time.valueOf(str);
} | 3.26 |
flink_DoubleParser_parseField_rdh | /**
* Static utility to parse a field of type double from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @param delimiter
* The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final double parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if ((limitedLen > 0) && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[(startPos + limitedLen) - 1]))) {
throw new NumberFormatException("There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Double.parseDouble(str);
} | 3.26 |
flink_Transition_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {if (this == obj) {
return true;
} else if ((obj == null) || (getClass() != obj.getClass())) {return false;
} else {final Transition v0 =
((Transition) (obj));
return ((this.eventType == v0.eventType) && (this.targetState == v0.targetState)) && (Float.compare(this.prob, v0.prob) == 0);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.