name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_AbstractBlockResettableIterator_open_rdh
// -------------------------------------------------------------------------------------------- public void open() {if (f0.isDebugEnabled()) { f0.debug("Block Resettable Iterator opened."); } }
3.26
flink_AbstractBlockResettableIterator_close_rdh
/** * This method closes the iterator and releases all resources. This method works both as a * regular shutdown and as a canceling method. The method may be called multiple times and will * not produce an error. */ public void close() { synchronized(this) { if (this.closed) { return;} this.closed = true; } this.numRecordsInBuffer = 0; this.numRecordsReturned = 0; // add the full segments to the empty ones for (int i = this.fullSegments.size() - 1; i >= 0; i--) { this.emptySegments.add(this.fullSegments.remove(i)); } // release the memory segment this.memoryManager.release(this.emptySegments); this.emptySegments.clear(); if (f0.isDebugEnabled()) { f0.debug("Block Resettable Iterator closed."); } }
3.26
flink_Costs_setCpuCost_rdh
/** * Sets the cost for the CPU. * * @param cost * The CPU Cost. */ public void setCpuCost(double cost) { if ((cost == UNKNOWN) || (cost >= 0)) { this.cpuCost = cost; } else { throw new IllegalArgumentException(); } }
3.26
flink_Costs_addCpuCost_rdh
/** * Adds the given CPU cost to the current CPU cost for this Costs object. * * @param cost * The CPU cost to add. */ public void addCpuCost(double cost) { this.cpuCost = ((this.cpuCost < 0) || (cost < 0)) ? UNKNOWN : this.cpuCost + cost; }
3.26
flink_Costs_setHeuristicDiskCost_rdh
/** * Sets the heuristic costs for disk for this Costs object. * * @param cost * The heuristic disk cost to set. */public void setHeuristicDiskCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicDiskCost = cost; }
3.26
flink_Costs_addCosts_rdh
// -------------------------------------------------------------------------------------------- /** * Adds the given costs to these costs. If for one of the different cost components (network, * disk), the costs are unknown, the resulting costs will be unknown. * * @param other * The costs to add. */ public void addCosts(Costs other) { // ---------- quantifiable costs ---------- if ((this.networkCost == UNKNOWN) || (other.networkCost == UNKNOWN)) { this.networkCost = UNKNOWN; } else { this.networkCost += other.networkCost; } if ((this.diskCost == UNKNOWN) || (other.diskCost == UNKNOWN)) { this.diskCost = UNKNOWN; } else { this.diskCost += other.diskCost; } if ((this.cpuCost == UNKNOWN) || (other.cpuCost == UNKNOWN)) { this.cpuCost = UNKNOWN; } else { this.cpuCost += other.cpuCost; } // ---------- heuristic costs ---------- this.heuristicNetworkCost += other.heuristicNetworkCost; this.heuristicDiskCost += other.heuristicDiskCost; this.heuristicCpuCost += other.heuristicCpuCost; }
3.26
flink_Costs_compareTo_rdh
// -------------------------------------------------------------------------------------------- /** * The order of comparison is: network first, then disk, then CPU. The comparison here happens * each time primarily after the heuristic costs, then after the quantifiable costs. * * @see java.lang.Comparable#compareTo(java.lang.Object) */ @Override public int compareTo(Costs o) { // check the network cost. if we have actual costs on both, use them, otherwise use the // heuristic costs. if ((this.networkCost != UNKNOWN) && (o.networkCost != UNKNOWN)) { if (this.networkCost != o.networkCost) { return this.networkCost < o.networkCost ? -1 : 1; } } else if (this.heuristicNetworkCost < o.heuristicNetworkCost) { return -1; } else if (this.heuristicNetworkCost > o.heuristicNetworkCost) { return 1; } // next, check the disk cost. again, if we have actual costs on both, use them, otherwise // use the heuristic costs. if ((this.diskCost != UNKNOWN) && (o.diskCost != UNKNOWN)) { if (this.diskCost != o.diskCost) {return this.diskCost < o.diskCost ? -1 : 1; } } else if (this.heuristicDiskCost < o.heuristicDiskCost) { return -1; } else if (this.heuristicDiskCost > o.heuristicDiskCost) { return 1; } // next, check the CPU cost. again, if we have actual costs on both, use them, otherwise use // the heuristic costs. if ((this.cpuCost != UNKNOWN) && (o.cpuCost != UNKNOWN)) { return this.cpuCost < o.cpuCost ? -1 : this.cpuCost > o.cpuCost ? 1 : 0; } else if (this.heuristicCpuCost < o.heuristicCpuCost) { return -1; } else if (this.heuristicCpuCost > o.heuristicCpuCost) { return 1; } else { return 0; } }
3.26
flink_Costs_setDiskCost_rdh
/** * Sets the costs for disk for this Costs object. * * @param bytes * The disk cost to set, in bytes to be written and read. */ public void setDiskCost(double bytes) { if ((bytes == UNKNOWN) || (bytes >= 0)) { this.diskCost = bytes; } else { throw new IllegalArgumentException(); } }
3.26
flink_Costs_addHeuristicDiskCost_rdh
/** * Adds the heuristic costs for disk to the current heuristic disk costs for this Costs object. * * @param cost * The heuristic disk cost to add. */ public void addHeuristicDiskCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicDiskCost += cost; // check for overflow if (this.heuristicDiskCost < 0) { this.heuristicDiskCost = Double.MAX_VALUE; } }
3.26
flink_Costs_subtractCosts_rdh
/** * Subtracts the given costs from these costs. If the given costs are unknown, then these costs * are remain unchanged. * * @param other * The costs to subtract. */ public void subtractCosts(Costs other) { if ((this.networkCost != UNKNOWN) && (other.networkCost != UNKNOWN)) { this.networkCost -= other.networkCost; if (this.networkCost < 0) { throw new IllegalArgumentException("Cannot subtract more cost then there is."); } } if ((this.diskCost != UNKNOWN) && (other.diskCost != UNKNOWN)) { this.diskCost -= other.diskCost; if (this.diskCost < 0) { throw new IllegalArgumentException("Cannot subtract more cost then there is."); } } if ((this.cpuCost != UNKNOWN) && (other.cpuCost != UNKNOWN)) { this.cpuCost -= other.cpuCost; if (this.cpuCost < 0) { throw new IllegalArgumentException("Cannot subtract more cost then there is."); } } // ---------- relative costs ---------- this.heuristicNetworkCost -= other.heuristicNetworkCost; if (this.heuristicNetworkCost < 0) { throw new IllegalArgumentException("Cannot subtract more cost then there is."); } this.heuristicDiskCost -= other.heuristicDiskCost; if (this.heuristicDiskCost < 0) { throw new IllegalArgumentException("Cannot subtract more cost then there is."); } this.heuristicCpuCost -= other.heuristicCpuCost; if (this.heuristicCpuCost < 0) { throw new IllegalArgumentException("Cannot subtract more cost then there is."); } }
3.26
flink_Costs_setHeuristicNetworkCost_rdh
/** * Sets the heuristic network cost for this Costs object. * * @param cost * The heuristic network cost to set, in bytes to be transferred. */ public void setHeuristicNetworkCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicNetworkCost = cost; }
3.26
flink_Costs_setNetworkCost_rdh
/** * Sets the network cost for this Costs object. * * @param bytes * The network cost to set, in bytes to be transferred. */ public void setNetworkCost(double bytes) { if ((bytes == UNKNOWN) || (bytes >= 0)) { this.networkCost = bytes; } else { throw new IllegalArgumentException(); } }
3.26
flink_Costs_addHeuristicNetworkCost_rdh
/** * Adds the heuristic costs for network to the current heuristic network costs for this Costs * object. * * @param cost * The heuristic network cost to add. */ public void addHeuristicNetworkCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicNetworkCost += cost; // check for overflow if (this.heuristicNetworkCost < 0) { this.heuristicNetworkCost = Double.MAX_VALUE; } }
3.26
flink_Costs_getCpuCost_rdh
/** * Gets the cost for the CPU. * * @return The CPU Cost. */ public double getCpuCost() { return this.cpuCost; }
3.26
flink_Costs_addHeuristicCpuCost_rdh
/** * Adds the given heuristic CPU cost to the current heuristic CPU cost for this Costs object. * * @param cost * The heuristic CPU cost to add. */ public void addHeuristicCpuCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicCpuCost += cost; // check for overflow if (this.heuristicCpuCost < 0) { this.heuristicCpuCost = Double.MAX_VALUE; } }
3.26
flink_Costs_addDiskCost_rdh
/** * Adds the costs for disk to the current disk costs for this Costs object. * * @param bytes * The disk cost to add, in bytes to be written and read. */ public void addDiskCost(double bytes) { this.diskCost = ((this.diskCost < 0) || (bytes < 0)) ? UNKNOWN : this.diskCost + bytes; }
3.26
flink_Costs_addNetworkCost_rdh
/** * Adds the costs for network to the current network costs for this Costs object. * * @param bytes * The network cost to add, in bytes to be transferred. */ public void addNetworkCost(double bytes) { this.networkCost = ((this.networkCost < 0) || (bytes < 0)) ? UNKNOWN : this.networkCost + bytes; }
3.26
flink_Costs_getHeuristicCpuCost_rdh
/** * Gets the heuristic cost for the CPU. * * @return The heuristic CPU Cost. */ public double getHeuristicCpuCost() { return this.heuristicCpuCost; }
3.26
flink_Costs_getHeuristicNetworkCost_rdh
// -------------------------------------------------------------------------------------------- /** * Gets the heuristic network cost. * * @return The heuristic network cost, in bytes to be transferred. */ public double getHeuristicNetworkCost() { return this.heuristicNetworkCost; }
3.26
flink_Costs_getNetworkCost_rdh
// -------------------------------------------------------------------------------------------- /** * Gets the network cost. * * @return The network cost, in bytes to be transferred. */ public double getNetworkCost() { return networkCost; }
3.26
flink_Costs_setHeuristicCpuCost_rdh
/** * Sets the heuristic cost for the CPU. * * @param cost * The heuristic CPU Cost. */ public void setHeuristicCpuCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicCpuCost = cost; }
3.26
flink_SlicingWindowOperator_m2_rdh
// ------------------------------------------------------------------------------ // Visible For Testing // ------------------------------------------------------------------------------ @VisibleForTesting public Counter m2() { return numLateRecordsDropped; }
3.26
flink_DefaultBlocklistTracker_tryAddOrMerge_rdh
/** * Try to add a new blocked node record. If the node (identified by node id) already exists, the * newly added one will be merged with the existing one. * * @param newNode * the new blocked node record * @return the add status */ private AddStatus tryAddOrMerge(BlockedNode newNode) { checkNotNull(newNode); final String nodeId = newNode.getNodeId(); final BlockedNode existingNode = f0.get(nodeId); if (existingNode == null) { f0.put(nodeId, newNode); return AddStatus.ADDED; } else { BlockedNode merged = (newNode.getEndTimestamp() >= existingNode.getEndTimestamp()) ? newNode : existingNode; if (!merged.equals(existingNode)) { f0.put(nodeId, merged); return AddStatus.MERGED; } return AddStatus.f1; } }
3.26
flink_EndOfSegmentEvent_hashCode_rdh
// ------------------------------------------------------------------------ @Override public int hashCode() { return 1965146672; }
3.26
flink_CreditBasedPartitionRequestClientHandler_exceptionCaught_rdh
/** * Called on exceptions in the client handler pipeline. * * <p>Remote exceptions are received as regular payload. */ @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { if (cause instanceof TransportException) { notifyAllChannelsOfErrorAndClose(cause); } else {final SocketAddress remoteAddr = ctx.channel().remoteAddress(); final TransportException tex; // Improve on the connection reset by peer error message if ((cause.getMessage() != null) && cause.getMessage().contains("Connection reset by peer")) { tex = new RemoteTransportException(((((("Lost connection to task manager '" + remoteAddr) + " [ ") + connectionID.getResourceID().getStringWithMetadata()) + " ] ") + "'. ") + "This indicates that the remote task manager was lost.", remoteAddr, cause); } else { final SocketAddress localAddr = ctx.channel().localAddress(); tex = new LocalTransportException(String.format("%s (connection to '%s [%s]')", cause.getMessage(), remoteAddr, connectionID.getResourceID().getStringWithMetadata()), localAddr, cause); } notifyAllChannelsOfErrorAndClose(tex); } }
3.26
flink_CreditBasedPartitionRequestClientHandler_addInputChannel_rdh
// ------------------------------------------------------------------------ // Input channel/receiver registration // ------------------------------------------------------------------------ @Override public void addInputChannel(RemoteInputChannel listener) throws IOException { checkError(); inputChannels.putIfAbsent(listener.getInputChannelId(), listener); }
3.26
flink_CreditBasedPartitionRequestClientHandler_channelActive_rdh
// ------------------------------------------------------------------------ // Network events // ------------------------------------------------------------------------ @Override public void channelActive(final ChannelHandlerContext ctx) throws Exception { if (this.ctx == null) { this.ctx = ctx; } super.channelActive(ctx); }
3.26
flink_CreditBasedPartitionRequestClientHandler_checkError_rdh
// ------------------------------------------------------------------------ /** * Checks for an error and rethrows it if one was reported. */ @VisibleForTesting void checkError() throws IOException { final Throwable t = channelError.get(); if (t != null) { if (t instanceof IOException) { throw ((IOException) (t)); } else { throw new IOException("There has been an error in the channel.", t);} } }
3.26
flink_CreditBasedPartitionRequestClientHandler_writeAndFlushNextMessageIfPossible_rdh
/** * Tries to write&flush unannounced credits for the next input channel in queue. * * <p>This method may be called by the first input channel enqueuing, or the complete future's * callback in previous input channel, or the channel writability changed event. */ private void writeAndFlushNextMessageIfPossible(Channel channel) { if ((channelError.get() != null) || (!channel.isWritable())) { return; } while (true) { ClientOutboundMessage outboundMessage = clientOutboundMessages.poll(); // The input channel may be null because of the write callbacks // that are executed after each write. if (outboundMessage == null) { return; } // It is no need to notify credit or resume data consumption for the released channel. if (!outboundMessage.inputChannel.isReleased()) { Object msg = outboundMessage.buildMessage(); if (msg == null) { continue; } // Write and flush and wait until this is done before // trying to continue with the next input channel. channel.writeAndFlush(msg).addListener(writeListener); return; } } }
3.26
flink_CreditBasedPartitionRequestClientHandler_userEventTriggered_rdh
/** * Triggered by notifying credit available in the client handler pipeline. * * <p>Enqueues the input channel and will trigger write&flush unannounced credits for this input * channel if it is the first one in the queue. */ @Override public void userEventTriggered(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof ClientOutboundMessage) { boolean triggerWrite = clientOutboundMessages.isEmpty(); clientOutboundMessages.add(((ClientOutboundMessage) (msg))); if (triggerWrite) { writeAndFlushNextMessageIfPossible(ctx.channel()); } } else if (msg instanceof ConnectionErrorMessage) { notifyAllChannelsOfErrorAndClose(((ConnectionErrorMessage) (msg)).getCause()); } else { ctx.fireUserEventTriggered(msg); } }
3.26
flink_StructuredType_newBuilder_rdh
/** * Creates a builder for a {@link StructuredType} that is not stored in a catalog and is * identified by an implementation {@link Class}. */ public static StructuredType.Builder newBuilder(Class<?> implementationClass) { return new StructuredType.Builder(implementationClass); }
3.26
flink_UnionIterator_iterator_rdh
// ------------------------------------------------------------------------ @Override public Iterator<T> iterator() { if (iteratorAvailable) { iteratorAvailable = false; return this; } else { throw new TraversableOnceException(); } }
3.26
flink_LogicalScopeProvider_castFrom_rdh
/** * Casts the given metric group to a {@link LogicalScopeProvider}, if it implements the * interface. * * @param metricGroup * metric group to cast * @return cast metric group * @throws IllegalStateException * if the metric group did not implement the LogicalScopeProvider * interface */static LogicalScopeProvider castFrom(MetricGroup metricGroup) throws IllegalStateException {if (metricGroup instanceof LogicalScopeProvider) { return ((LogicalScopeProvider) (metricGroup));} else { throw new IllegalStateException("The given metric group does not implement the LogicalScopeProvider interface."); } }
3.26
flink_FlinkAggregateRemoveRule_onMatch_rdh
// ~ Methods ---------------------------------------------------------------- public void onMatch(RelOptRuleCall call) { final Aggregate aggregate = call.rel(0); final RelNode input = call.rel(1); // Distinct is "GROUP BY c1, c2" (where c1, c2 are a set of columns on // which the input is unique, i.e. contain a key) and has no aggregate // functions or the functions we enumerated. It can be removed. final RelNode newInput = convert(input, aggregate.getTraitSet().simplify()); // If aggregate was projecting a subset of columns, add a project for the // same effect. final RelBuilder relBuilder = call.builder(); relBuilder.push(newInput); List<Integer> projectIndices = new ArrayList<>(aggregate.getGroupSet().asList()); for (AggregateCall aggCall : aggregate.getAggCallList()) { projectIndices.addAll(aggCall.getArgList()); } relBuilder.project(relBuilder.fields(projectIndices)); // Create a project if some of the columns have become // NOT NULL due to aggregate functions are removed relBuilder.convert(aggregate.getRowType(), true); call.transformTo(relBuilder.build()); }
3.26
flink_TieredStorageNettyServiceImpl_setupInputChannels_rdh
/** * Set up input channels in {@link SingleInputGate}. The method will be invoked by the pekko rpc * thread at first, and then the method {@link TieredStorageNettyService#registerConsumer(TieredStoragePartitionId, * TieredStorageSubpartitionId)} will be invoked by the same thread sequentially, which ensures * thread safety. * * @param tieredStorageConsumerSpecs * specs indicates {@link TieredResultPartition} and {@link TieredStorageSubpartitionId}. * @param inputChannelProviders * it provides input channels for subpartitions. */ public void setupInputChannels(List<TieredStorageConsumerSpec> tieredStorageConsumerSpecs, List<Supplier<InputChannel>> inputChannelProviders) { checkState(tieredStorageConsumerSpecs.size() == inputChannelProviders.size()); for (int index = 0; index < tieredStorageConsumerSpecs.size(); ++index) { setupInputChannel(index, tieredStorageConsumerSpecs.get(index).getPartitionId(), tieredStorageConsumerSpecs.get(index).getSubpartitionId(), inputChannelProviders.get(index)); } }
3.26
flink_TieredStorageNettyServiceImpl_createResultSubpartitionView_rdh
/** * Create a {@link ResultSubpartitionView} for the netty server. * * @param partitionId * partition id indicates the unique id of {@link TieredResultPartition}. * @param subpartitionId * subpartition id indicates the unique id of subpartition. * @param availabilityListener * listener is used to listen the available status of data. * @return the {@link TieredStorageResultSubpartitionView}. */ public ResultSubpartitionView createResultSubpartitionView(TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId, BufferAvailabilityListener availabilityListener) { List<NettyServiceProducer> serviceProducers = registeredServiceProducers.get(partitionId); if (serviceProducers == null) { return new TieredStorageResultSubpartitionView(availabilityListener, new ArrayList<>(), new ArrayList<>(), new ArrayList<>()); } List<NettyPayloadManager> nettyPayloadManagers = new ArrayList<>(); List<NettyConnectionId> nettyConnectionIds = new ArrayList<>(); for (NettyServiceProducer serviceProducer : serviceProducers) { NettyPayloadManager nettyPayloadManager = new NettyPayloadManager(); NettyConnectionWriterImpl writer = new NettyConnectionWriterImpl(nettyPayloadManager, availabilityListener); serviceProducer.connectionEstablished(subpartitionId, writer); nettyConnectionIds.add(writer.getNettyConnectionId()); nettyPayloadManagers.add(nettyPayloadManager); } return new TieredStorageResultSubpartitionView(availabilityListener, nettyPayloadManagers, nettyConnectionIds, registeredServiceProducers.get(partitionId)); }
3.26
flink_ProcessingTimeTriggers_every_rdh
/** * Creates a trigger that fires by a certain interval after reception of the first element. * * @param time * the certain interval */ public static <W extends Window> AfterFirstElementPeriodic<W> every(Duration time) { return new AfterFirstElementPeriodic<>(time.toMillis()); }
3.26
flink_ProcessingTimeTriggers_afterEndOfWindow_rdh
/** * Creates a trigger that fires when the processing time passes the end of the window. */ public static <W extends Window> AfterEndOfWindow<W> afterEndOfWindow() { return new AfterEndOfWindow<>(); }
3.26
flink_BlobCacheSizeTracker_untrackAll_rdh
/** * Unregister all the tracked BLOBs related to the given job. */ public void untrackAll(JobID jobId) { checkNotNull(jobId); synchronized(lock) { Set<BlobKey> keysToRemove = blobKeyByJob.remove(jobId); if (keysToRemove != null) { for (BlobKey key : keysToRemove) { untrack(jobId, key); } } } }
3.26
flink_BlobCacheSizeTracker_untrack_rdh
/** * Remove the BLOB from the tracker. */ private void untrack(JobID jobId, BlobKey blobKey) { checkNotNull(jobId); checkNotNull(blobKey); untrack(Tuple2.of(jobId, blobKey)); }
3.26
flink_BlobCacheSizeTracker_track_rdh
/** * Register the BLOB to the tracker. */ public void track(JobID jobId, BlobKey blobKey, long size) { checkNotNull(jobId); checkNotNull(blobKey); checkArgument(size >= 0); synchronized(lock) { if (caches.putIfAbsent(Tuple2.of(jobId, blobKey), size) == null) {blobKeyByJob.computeIfAbsent(jobId, ignore -> new HashSet<>()).add(blobKey); total += size; if (total > sizeLimit) { LOG.warn(("The overall size of BLOBs in the cache exceeds " + "the limit. Limit = [{}], Current: [{}], ") + "The size of next BLOB: [{}].", sizeLimit, total, size); } } else { LOG.warn(("Attempt to track a duplicated BLOB. This may indicate a duplicate upload " + "or a hash collision. Ignoring newest upload. ") + "JobID = [{}], BlobKey = [{}]", jobId, blobKey); } } }
3.26
flink_BlobCacheSizeTracker_update_rdh
/** * Update the least used index for the BLOBs so that the tracker can easily find out the least * recently used BLOBs. */ public void update(JobID jobId, BlobKey blobKey) { checkNotNull(jobId); checkNotNull(blobKey); synchronized(lock) { caches.get(Tuple2.of(jobId, blobKey)); } }
3.26
flink_BlobCacheSizeTracker_checkLimit_rdh
/** * Check the size limit and return the BLOBs to delete. * * @param size * size of the BLOB intended to put into the cache * @return list of BLOBs to delete before putting into the target BLOB */ public List<Tuple2<JobID, BlobKey>> checkLimit(long size) { checkArgument(size >= 0); synchronized(lock) { List<Tuple2<JobID, BlobKey>> blobsToDelete = new ArrayList<>(); long current = total; for (Map.Entry<Tuple2<JobID, BlobKey>, Long> entry : caches.entrySet()) { if ((current + size) > sizeLimit) { blobsToDelete.add(entry.getKey()); current -= entry.getValue(); } } return blobsToDelete; } }
3.26
flink_SharedBufferNode_snapshotConfiguration_rdh
// ----------------------------------------------------------------------------------- @Override public TypeSerializerSnapshot<SharedBufferNode> snapshotConfiguration() { return new SharedBufferNodeSerializerSnapshot(this); }
3.26
flink_HiveFunction_createTypeInference_rdh
/** * Creates {@link TypeInference} for the function. */ default TypeInference createTypeInference() { TypeInference.Builder builder = TypeInference.newBuilder(); builder.inputTypeStrategy(new HiveFunctionInputStrategy(this));builder.outputTypeStrategy(new HiveFunctionOutputStrategy(this)); return builder.build(); }
3.26
flink_TaskManagerServices_shutDown_rdh
// -------------------------------------------------------------------------------------------- // Shut down method // -------------------------------------------------------------------------------------------- /** * Shuts the {@link TaskExecutor} services down. */ public void shutDown() throws FlinkException { Exception exception = null; try { f0.shutdown(); } catch (Exception e) { exception = e; } try { ioManager.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception);} try { shuffleEnvironment.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { kvStateService.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { taskSlotTable.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception);} try { jobLeaderService.stop(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { ioExecutor.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { jobTable.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } try { libraryCacheManager.shutdown(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } taskEventDispatcher.clearAll(); if (exception != null) { throw new FlinkException("Could not properly shut down the TaskManager services.", exception); } }
3.26
flink_TaskManagerServices_checkTempDirs_rdh
/** * Validates that all the directories denoted by the strings do actually exist or can be * created, are proper directories (not files), and are writable. * * @param tmpDirs * The array of directory paths to check. * @throws IOException * Thrown if any of the directories does not exist and cannot be created or * is not writable or is a file, rather than a directory. */ private static void checkTempDirs(String[] tmpDirs) throws IOException { for (String dir : tmpDirs) { if ((dir != null) && (!dir.equals(""))) { File file = new File(dir); if (!file.exists()) { if (!file.mkdirs()) { throw new IOException(("Temporary file directory " + file.getAbsolutePath()) + " does not exist and could not be created."); } } if (!file.isDirectory()) { throw new IOException(("Temporary file directory " + file.getAbsolutePath()) + " is not a directory."); } if (!file.canWrite()) { throw new IOException(("Temporary file directory " + file.getAbsolutePath()) + " is not writable."); } if (LOG.isInfoEnabled()) { long totalSpaceGb = file.getTotalSpace() >> 30; long usableSpaceGb = file.getUsableSpace() >> 30; double usablePercentage = (((double) (usableSpaceGb)) / totalSpaceGb) * 100; String path = file.getAbsolutePath(); LOG.info(String.format("Temporary file directory '%s': total %d GB, " + "usable %d GB (%.2f%% usable)", path, totalSpaceGb, usableSpaceGb, usablePercentage)); } } else { throw new IllegalArgumentException("Temporary file directory #$id is null."); } }}
3.26
flink_TaskManagerServices_fromConfiguration_rdh
// -------------------------------------------------------------------------------------------- // Static factory methods for task manager services // -------------------------------------------------------------------------------------------- /** * Creates and returns the task manager services. * * @param taskManagerServicesConfiguration * task manager configuration * @param permanentBlobService * permanentBlobService used by the services * @param taskManagerMetricGroup * metric group of the task manager * @param ioExecutor * executor for async IO operations * @param scheduledExecutor * scheduled executor in rpc service * @param fatalErrorHandler * to handle class loading OOMs * @param workingDirectory * the working directory of the process * @return task manager components * @throws Exception */ public static TaskManagerServices fromConfiguration(TaskManagerServicesConfiguration taskManagerServicesConfiguration, PermanentBlobService permanentBlobService, MetricGroup taskManagerMetricGroup, ExecutorService ioExecutor, ScheduledExecutor scheduledExecutor, FatalErrorHandler fatalErrorHandler, WorkingDirectory workingDirectory) throws Exception { // pre-start checks checkTempDirs(taskManagerServicesConfiguration.getTmpDirPaths()); final TaskEventDispatcher taskEventDispatcher = new TaskEventDispatcher();// start the I/O manager, it will create some temp directories. final IOManager ioManager = new IOManagerAsync(taskManagerServicesConfiguration.getTmpDirPaths()); final ShuffleEnvironment<?, ?> shuffleEnvironment = createShuffleEnvironment(taskManagerServicesConfiguration, taskEventDispatcher, taskManagerMetricGroup, ioExecutor, scheduledExecutor); final int listeningDataPort = shuffleEnvironment.start(); LOG.info("TaskManager data connection initialized successfully; listening internally on port: {}", listeningDataPort); final KvStateService kvStateService = KvStateService.fromConfiguration(taskManagerServicesConfiguration); kvStateService.start(); final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation = // we expose the task manager location with the listening port // iff the external data port is not explicitly defined new UnresolvedTaskManagerLocation(taskManagerServicesConfiguration.getResourceID(), taskManagerServicesConfiguration.getExternalAddress(), taskManagerServicesConfiguration.getExternalDataPort() > 0 ? taskManagerServicesConfiguration.getExternalDataPort() : listeningDataPort, taskManagerServicesConfiguration.getNodeId()); final BroadcastVariableManager broadcastVariableManager = new BroadcastVariableManager(); final TaskSlotTable<Task> taskSlotTable = createTaskSlotTable(taskManagerServicesConfiguration.getNumberOfSlots(), taskManagerServicesConfiguration.getTaskExecutorResourceSpec(), taskManagerServicesConfiguration.getTimerServiceShutdownTimeout(), taskManagerServicesConfiguration.getPageSize(), ioExecutor); final JobTable jobTable = DefaultJobTable.create(); final JobLeaderService jobLeaderService = new DefaultJobLeaderService(unresolvedTaskManagerLocation, taskManagerServicesConfiguration.getRetryingRegistrationConfiguration()); final TaskExecutorLocalStateStoresManager taskStateManager = new TaskExecutorLocalStateStoresManager(taskManagerServicesConfiguration.isLocalRecoveryEnabled(), taskManagerServicesConfiguration.getLocalRecoveryStateDirectories(), ioExecutor); final TaskExecutorStateChangelogStoragesManager changelogStoragesManager = new TaskExecutorStateChangelogStoragesManager(); final TaskExecutorChannelStateExecutorFactoryManager channelStateExecutorFactoryManager = new TaskExecutorChannelStateExecutorFactoryManager(); final TaskExecutorFileMergingManager fileMergingManager = new TaskExecutorFileMergingManager(); final boolean failOnJvmMetaspaceOomError = taskManagerServicesConfiguration.getConfiguration().getBoolean(CoreOptions.FAIL_ON_USER_CLASS_LOADING_METASPACE_OOM); final boolean checkClassLoaderLeak = taskManagerServicesConfiguration.getConfiguration().getBoolean(CoreOptions.CHECK_LEAKED_CLASSLOADER); final LibraryCacheManager libraryCacheManager = new BlobLibraryCacheManager(permanentBlobService, BlobLibraryCacheManager.defaultClassLoaderFactory(taskManagerServicesConfiguration.getClassLoaderResolveOrder(), taskManagerServicesConfiguration.getAlwaysParentFirstLoaderPatterns(), failOnJvmMetaspaceOomError ? fatalErrorHandler : null, checkClassLoaderLeak), false); final SlotAllocationSnapshotPersistenceService slotAllocationSnapshotPersistenceService; if (taskManagerServicesConfiguration.isLocalRecoveryEnabled()) { slotAllocationSnapshotPersistenceService = new FileSlotAllocationSnapshotPersistenceService(workingDirectory.getSlotAllocationSnapshotDirectory()); } else { slotAllocationSnapshotPersistenceService = NoOpSlotAllocationSnapshotPersistenceService.INSTANCE; } final GroupCache<JobID, PermanentBlobKey, JobInformation> v19 = new DefaultGroupCache.Factory<JobID, PermanentBlobKey, JobInformation>().create(); final GroupCache<JobID, PermanentBlobKey, TaskInformation> taskInformationCache = new DefaultGroupCache.Factory<JobID, PermanentBlobKey, TaskInformation>().create(); final GroupCache<JobID, PermanentBlobKey, ShuffleDescriptorGroup> shuffleDescriptorsCache = new DefaultGroupCache.Factory<JobID, PermanentBlobKey, ShuffleDescriptorGroup>().create(); return new TaskManagerServices(unresolvedTaskManagerLocation, taskManagerServicesConfiguration.getManagedMemorySize().getBytes(), ioManager, shuffleEnvironment, kvStateService, broadcastVariableManager, taskSlotTable, jobTable, jobLeaderService, taskStateManager, fileMergingManager, changelogStoragesManager, channelStateExecutorFactoryManager, taskEventDispatcher, ioExecutor, libraryCacheManager, slotAllocationSnapshotPersistenceService, new SharedResources(), v19, taskInformationCache, shuffleDescriptorsCache); }
3.26
flink_TaskManagerServices_getManagedMemorySize_rdh
// -------------------------------------------------------------------------------------------- // Getter/Setter // -------------------------------------------------------------------------------------------- public long getManagedMemorySize() { return managedMemorySize; }
3.26
flink_MemorySegment_getShort_rdh
/** * Reads a short integer value (16 bit, 2 bytes) from the given position, composing them into a * short value according to the current byte order. * * @param index * The position from which the memory will be read. * @return The short value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public short getShort(int index) { final long pos = address + index; if ((index >= 0) && (pos <= (f0 - 2))) { return UNSAFE.getShort(heapMemory, pos); } else if (address > f0) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } } /** * Reads a short integer value (16 bit, 2 bytes) from the given position, in little-endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #getShort(int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #getShort(int)}
3.26
flink_MemorySegment_putFloatBigEndian_rdh
/** * Writes the given single-precision float value (32bit, 4 bytes) to the given position in big * endian byte order. This method's speed depends on the system's native byte order, and it is * possibly slower than {@link #putFloat(int, float)}. For most cases (such as transient storage * in memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #putFloat(int, float)} is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The long value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putFloatBigEndian(int index, float value) { putIntBigEndian(index, Float.floatToRawIntBits(value)); }
3.26
flink_MemorySegment_putShortBigEndian_rdh
/** * Writes the given short integer value (16 bit, 2 bytes) to the given position in big-endian * byte order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #putShort(int, short)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #putShort(int, short)} is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The short value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putShortBigEndian(int index, short value) { if (LITTLE_ENDIAN) { putShort(index, Short.reverseBytes(value)); } else { putShort(index, value); } }
3.26
flink_MemorySegment_copyFromUnsafe_rdh
/** * Bulk copy method. Copies {@code numBytes} bytes from source unsafe object and pointer. NOTE: * This is an unsafe method, no check here, please be careful. * * @param offset * The position where the bytes are started to be write in this memory segment. * @param source * The unsafe memory to copy the bytes from. * @param sourcePointer * The position in the source unsafe memory to copy the chunk from. * @param numBytes * The number of bytes to copy. * @throws IndexOutOfBoundsException * If this segment can not contain the given number of bytes * (starting from offset). */public void copyFromUnsafe(int offset, Object source, int sourcePointer, int numBytes) {final long thisPointer = this.address + offset; if ((thisPointer + numBytes) > f0) { throw new IndexOutOfBoundsException(String.format("offset=%d, numBytes=%d, address=%d", offset, numBytes, this.address)); } UNSAFE.copyMemory(source, sourcePointer, this.heapMemory, thisPointer, numBytes); }
3.26
flink_MemorySegment_getDoubleLittleEndian_rdh
/** * Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in * little endian byte order. This method's speed depends on the system's native byte order, and * it is possibly slower than {@link #getDouble(int)}. For most cases (such as transient storage * in memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #getDouble(int)} is the preferable choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public double getDoubleLittleEndian(int index) { return Double.longBitsToDouble(getLongLittleEndian(index)); }
3.26
flink_MemorySegment_getFloat_rdh
/** * Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in * the system's native byte order. This method offers the best speed for float reading and * should be used unless a specific byte order is required. In most cases, it suffices to know * that the byte order in which the value is written is the same as the one in which it is read * (such as transient storage in memory, or serialization for I/O and network), making this * method the preferable choice. * * @param index * The position from which the value will be read. * @return The float value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public float getFloat(int index) { return Float.intBitsToFloat(m0(index)); }
3.26
flink_MemorySegment_getLong_rdh
/** * Reads a long value (64bit, 8 bytes) from the given position, in the system's native byte * order. This method offers the best speed for long integer reading and should be used unless a * specific byte order is required. In most cases, it suffices to know that the byte order in * which the value is written is the same as the one in which it is read (such as transient * storage in memory, or serialization for I/O and network), making this method the preferable * choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public long getLong(int index) { final long pos = address + index; if ((index >= 0) && (pos <= (f0 - 8))) { return UNSAFE.getLong(heapMemory, pos); } else if (address > f0) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.26
flink_MemorySegment_getArray_rdh
/** * Returns the byte array of on-heap memory segments. * * @return underlying byte array * @throws IllegalStateException * if the memory segment does not represent on-heap memory */ public byte[] getArray() { if (heapMemory != null) { return heapMemory; } else { throw new IllegalStateException("Memory segment does not represent heap memory"); } }
3.26
flink_MemorySegment_get_rdh
/** * Bulk get method. Copies {@code numBytes} bytes from this memory segment, starting at position * {@code offset} to the target {@code ByteBuffer}. The bytes will be put into the target buffer * starting at the buffer's current position. If this method attempts to write more bytes than * the target byte buffer has remaining (with respect to {@link ByteBuffer#remaining()}), this * method will cause a {@link java.nio.BufferOverflowException}. * * @param offset * The position where the bytes are started to be read from in this memory * segment. * @param target * The ByteBuffer to copy the bytes to. * @param numBytes * The number of bytes to copy. * @throws IndexOutOfBoundsException * If the offset is invalid, or this segment does not contain * the given number of bytes (starting from offset), or the target byte buffer does not have * enough space for the bytes. * @throws ReadOnlyBufferException * If the target buffer is read-only. */public void get(int offset, ByteBuffer target, int numBytes) { // check the byte array offset and length if (((offset | numBytes) | (offset + numBytes)) < 0) { throw new IndexOutOfBoundsException(); } if (target.isReadOnly()) { throw new ReadOnlyBufferException(); } final int targetOffset = target.position(); final int remaining = target.remaining(); if (remaining < numBytes) { throw new BufferOverflowException(); } if (target.isDirect()) { // copy to the target memory directly final long targetPointer = getByteBufferAddress(target) + targetOffset; final long sourcePointer = address + offset; if (sourcePointer <= (f0 - numBytes)) { UNSAFE.copyMemory(heapMemory, sourcePointer, null, targetPointer, numBytes); target.position(targetOffset + numBytes); } else if (address > f0) { throw new IllegalStateException("segment has been freed"); } else { throw new IndexOutOfBoundsException(); } } else if (target.hasArray()) { // move directly into the byte array get(offset, target.array(), targetOffset + target.arrayOffset(), numBytes); // this must be after the get() call to ensue that the byte buffer is not // modified in case the call fails target.position(targetOffset + numBytes); } else { // other types of byte buffers throw new IllegalArgumentException("The target buffer is not direct, and has no array."); } }
3.26
flink_MemorySegment_getIntLittleEndian_rdh
/** * Reads an int value (32bit, 4 bytes) from the given position, in little-endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #getInt(int)}. For most cases (such as transient storage in memory or serialization * for I/O and network), it suffices to know that the byte order in which the value is written * is the same as the one in which it is read, and {@link #getInt(int)} is the preferable * choice. * * @param index * The position from which the value will be read. * @return The int value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public int getIntLittleEndian(int index) { if (LITTLE_ENDIAN) { return m0(index); } else { return Integer.reverseBytes(m0(index)); } }
3.26
flink_MemorySegment_getDoubleBigEndian_rdh
/** * Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in * big endian byte order. This method's speed depends on the system's native byte order, and it * is possibly slower than {@link #getDouble(int)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #getDouble(int)} is the preferable choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public double getDoubleBigEndian(int index) { return Double.longBitsToDouble(getLongBigEndian(index)); }
3.26
flink_MemorySegment_wrap_rdh
/** * Wraps the chunk of the underlying memory located between <tt>offset</tt> and <tt>offset + * length</tt> in a NIO ByteBuffer. The ByteBuffer has the full segment as capacity and the * offset and length parameters set the buffers position and limit. * * @param offset * The offset in the memory segment. * @param length * The number of bytes to be wrapped as a buffer. * @return A <tt>ByteBuffer</tt> backed by the specified portion of the memory segment. * @throws IndexOutOfBoundsException * Thrown, if offset is negative or larger than the memory * segment size, or if the offset plus the length is larger than the segment size. */ public ByteBuffer wrap(int offset, int length) { if (!allowWrap) { throw new UnsupportedOperationException("Wrap is not supported by this segment. This usually indicates that the underlying memory is unsafe, thus transferring of ownership is not allowed."); } return wrapInternal(offset, length);}
3.26
flink_MemorySegment_getIntBigEndian_rdh
/** * Reads an int value (32bit, 4 bytes) from the given position, in big-endian byte order. This * method's speed depends on the system's native byte order, and it is possibly slower than * {@link #getInt(int)}. For most cases (such as transient storage in memory or serialization * for I/O and network), it suffices to know that the byte order in which the value is written * is the same as the one in which it is read, and {@link #getInt(int)} is the preferable * choice. * * @param index * The position from which the value will be read. * @return The int value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public int getIntBigEndian(int index) {if (LITTLE_ENDIAN) { return Integer.reverseBytes(m0(index)); } else { return m0(index); } }
3.26
flink_MemorySegment_processAsByteBuffer_rdh
/** * Supplies a {@link ByteBuffer} that represents this entire segment to the given process * consumer. * * <p>Note: The {@link ByteBuffer} passed into the process consumer is temporary and could * become invalid after the processing. Thus, the process consumer should not try to keep any * reference of the {@link ByteBuffer}. * * @param processConsumer * to accept the segment as {@link ByteBuffer}. */ public void processAsByteBuffer(Consumer<ByteBuffer> processConsumer) { Preconditions.checkNotNull(processConsumer).accept(wrapInternal(0, size)); }
3.26
flink_MemorySegment_getCharLittleEndian_rdh
/** * Reads a character value (16 bit, 2 bytes) from the given position, in little-endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #getChar(int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #getChar(int)} is the * preferable choice. * * @param index * The position from which the value will be read. * @return The character value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public char getCharLittleEndian(int index) { if (LITTLE_ENDIAN) { return getChar(index); } else { return Character.reverseBytes(getChar(index)); } }
3.26
flink_MemorySegment_getFloatLittleEndian_rdh
/** * Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in * little endian byte order. This method's speed depends on the system's native byte order, and * it is possibly slower than {@link #getFloat(int)}. For most cases (such as transient storage * in memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #getFloat(int)} is the preferable choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public float getFloatLittleEndian(int index) { return Float.intBitsToFloat(getIntLittleEndian(index)); }
3.26
flink_MemorySegment_m0_rdh
/** * Reads an int value (32bit, 4 bytes) from the given position, in the system's native byte * order. This method offers the best speed for integer reading and should be used unless a * specific byte order is required. In most cases, it suffices to know that the byte order in * which the value is written is the same as the one in which it is read (such as transient * storage in memory, or serialization for I/O and network), making this method the preferable * choice. * * @param index * The position from which the value will be read. * @return The int value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public int m0(int index) { final long pos = address + index; if ((index >= 0) && (pos <= (f0 - 4))) { return UNSAFE.getInt(heapMemory, pos); } else if (address > f0) {throw new IllegalStateException("segment has been freed"); } else {// index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.26
flink_MemorySegment_copyToUnsafe_rdh
/** * Bulk copy method. Copies {@code numBytes} bytes to target unsafe object and pointer. NOTE: * This is an unsafe method, no check here, please be careful. * * @param offset * The position where the bytes are started to be read from in this memory * segment. * @param target * The unsafe memory to copy the bytes to. * @param targetPointer * The position in the target unsafe memory to copy the chunk to. * @param numBytes * The number of bytes to copy. * @throws IndexOutOfBoundsException * If the source segment does not contain the given number of * bytes (starting from offset). */ public void copyToUnsafe(int offset, Object target, int targetPointer, int numBytes) { final long thisPointer = this.address + offset; if ((thisPointer + numBytes) > f0) {throw new IndexOutOfBoundsException(String.format("offset=%d, numBytes=%d, address=%d", offset, numBytes, this.address)); } UNSAFE.copyMemory(this.heapMemory, thisPointer, target, targetPointer, numBytes); }
3.26
flink_MemorySegment_putLongLittleEndian_rdh
/** * Writes the given long value (64bit, 8 bytes) to the given position in little endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #putLong(int, long)}. For most cases (such as transient storage in memory * or serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putLong(int, long)} * is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The long value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public void putLongLittleEndian(int index, long value) { if (LITTLE_ENDIAN) { putLong(index, value); } else {putLong(index, Long.reverseBytes(value)); } }
3.26
flink_MemorySegment_getChar_rdh
/** * Reads a char value from the given position, in the system's native byte order. * * @param index * The position from which the memory will be read. * @return The char value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ @SuppressWarnings("restriction") public char getChar(int index) { final long pos = address + index; if ((index >= 0) && (pos <= (f0 - 2))) { return UNSAFE.getChar(heapMemory, pos); } else if (address > f0) { throw new IllegalStateException("This segment has been freed."); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.26
flink_MemorySegment_compare_rdh
/** * Compares two memory segment regions with different length. * * @param seg2 * Segment to compare this segment with * @param offset1 * Offset of this segment to start comparing * @param offset2 * Offset of seg2 to start comparing * @param len1 * Length of this memory region to compare * @param len2 * Length of seg2 to compare * @return 0 if equal, -1 if seg1 &lt; seg2, 1 otherwise */ public int compare(MemorySegment seg2, int offset1, int offset2, int len1, int len2) { final int minLength = Math.min(len1, len2); int c = compare(seg2, offset1, offset2, minLength); return c == 0 ? len1 - len2 : c; }
3.26
flink_MemorySegment_putDoubleLittleEndian_rdh
/** * Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position * in little endian byte order. This method's speed depends on the system's native byte order, * and it is possibly slower than {@link #putDouble(int, double)}. For most cases (such as * transient storage in memory or serialization for I/O and network), it suffices to know that * the byte order in which the value is written is the same as the one in which it is read, and * {@link #putDouble(int, double)} is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The long value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public void putDoubleLittleEndian(int index, double value) { putLongLittleEndian(index, Double.doubleToRawLongBits(value)); }
3.26
flink_MemorySegment_getShortBigEndian_rdh
/** * Reads a short integer value (16 bit, 2 bytes) from the given position, in big-endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #getShort(int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #getShort(int)} is * the preferable choice. * * @param index * The position from which the value will be read. * @return The short value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public short getShortBigEndian(int index) { if (LITTLE_ENDIAN) { return Short.reverseBytes(getShort(index)); } else { return getShort(index); } }
3.26
flink_MemorySegment_getFloatBigEndian_rdh
/** * Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in * big endian byte order. This method's speed depends on the system's native byte order, and it * is possibly slower than {@link #getFloat(int)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #getFloat(int)} is the preferable choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public float getFloatBigEndian(int index) { return Float.intBitsToFloat(getIntBigEndian(index)); }
3.26
flink_MemorySegment_putDoubleBigEndian_rdh
/** * Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position * in big endian byte order. This method's speed depends on the system's native byte order, and * it is possibly slower than {@link #putDouble(int, double)}. For most cases (such as transient * storage in memory or serialization for I/O and network), it suffices to know that the byte * order in which the value is written is the same as the one in which it is read, and {@link #putDouble(int, double)} is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The long value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public void putDoubleBigEndian(int index, double value) { putLongBigEndian(index, Double.doubleToRawLongBits(value)); }
3.26
flink_MemorySegment_putShortLittleEndian_rdh
/** * Writes the given short integer value (16 bit, 2 bytes) to the given position in little-endian * byte order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #putShort(int, short)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link #putShort(int, short)} is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The short value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putShortLittleEndian(int index, short value) { if (LITTLE_ENDIAN) { putShort(index, value); } else { putShort(index, Short.reverseBytes(value)); } }
3.26
flink_MemorySegment_swapBytes_rdh
/** * Swaps bytes between two memory segments, using the given auxiliary buffer. * * @param tempBuffer * The auxiliary buffer in which to put data during triangle swap. * @param seg2 * Segment to swap bytes with * @param offset1 * Offset of this segment to start swapping * @param offset2 * Offset of seg2 to start swapping * @param len * Length of the swapped memory region */ public void swapBytes(byte[] tempBuffer, MemorySegment seg2, int offset1, int offset2, int len) { if ((((offset1 | offset2) | len) | (tempBuffer.length - len)) >= 0) { final long thisPos = this.address + offset1; final long v38 = seg2.address + offset2; if ((thisPos <= (this.f0 - len)) && (v38 <= (seg2.f0 - len))) { // this -> temp buffer UNSAFE.copyMemory(this.heapMemory, thisPos, tempBuffer, BYTE_ARRAY_BASE_OFFSET, len); // other -> this UNSAFE.copyMemory(seg2.heapMemory, v38, this.heapMemory, thisPos, len); // temp buffer -> other UNSAFE.copyMemory(tempBuffer, BYTE_ARRAY_BASE_OFFSET, seg2.heapMemory, v38, len); return; } else if (this.address > this.f0) { throw new IllegalStateException("this memory segment has been freed."); } else if (seg2.address > seg2.f0) { throw new IllegalStateException("other memory segment has been freed."); } } // index is in fact invalid throw new IndexOutOfBoundsException(String.format("offset1=%d, offset2=%d, len=%d, bufferSize=%d, address1=%d, address2=%d", offset1, offset2, len, tempBuffer.length, this.address, seg2.address)); }
3.26
flink_MemorySegment_free_rdh
/** * Frees this memory segment. * * <p>After this operation has been called, no further operations are possible on the memory * segment and will fail. The actual memory (heap or off-heap) will only be released after this * memory segment object has become garbage collected. */ public void free() { if (isFreedAtomic.getAndSet(true)) { // the segment has already been freed if (checkMultipleFree) {throw new IllegalStateException("MemorySegment can be freed only once!"); } } else { // this ensures we can place no more data and trigger // the checks for the freed segment address = f0 + 1;offHeapBuffer = null;// to enable GC of unsafe memory if (cleaner != null) { cleaner.run(); cleaner = null; } } }
3.26
flink_MemorySegment_isFreed_rdh
/** * Checks whether the memory segment was freed. * * @return <tt>true</tt>, if the memory segment has been freed, <tt>false</tt> otherwise. */ @VisibleForTesting public boolean isFreed() { return address > f0; }
3.26
flink_MemorySegment_putShort_rdh
/** * Writes the given short value into this buffer at the given position, using the native byte * order of the system. * * @param index * The position at which the value will be written. * @param value * The short value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putShort(int index, short value) { final long pos = address + index; if ((index >= 0) && (pos <= (f0 - 2))) { UNSAFE.putShort(heapMemory, pos, value); } else if (address > f0) { throw new IllegalStateException("segment has been freed");} else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.26
flink_MemorySegment_putInt_rdh
/** * Writes the given int value (32bit, 4 bytes) to the given position in the system's native byte * order. This method offers the best speed for integer writing and should be used unless a * specific byte order is required. In most cases, it suffices to know that the byte order in * which the value is written is the same as the one in which it is read (such as transient * storage in memory, or serialization for I/O and network), making this method the preferable * choice. * * @param index * The position at which the value will be written. * @param value * The int value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putInt(int index, int value) {final long pos = address + index; if ((index >= 0) && (pos <= (f0 - 4))) { UNSAFE.putInt(heapMemory, pos, value); } else if (address > f0) { throw new IllegalStateException("segment has been freed"); } else { // index is in fact invalid throw new IndexOutOfBoundsException(); } }
3.26
flink_MemorySegment_putLongBigEndian_rdh
/** * Writes the given long value (64bit, 8 bytes) to the given position in big endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #putLong(int, long)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putLong(int, long)} * is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The long value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public void putLongBigEndian(int index, long value) { if (LITTLE_ENDIAN) { putLong(index, Long.reverseBytes(value)); } else { putLong(index, value); }}
3.26
flink_MemorySegment_put_rdh
/** * Bulk put method. Copies {@code numBytes} bytes from the given {@code ByteBuffer}, into this * memory segment. The bytes will be read from the target buffer starting at the buffer's * current position, and will be written to this memory segment starting at {@code offset}. If * this method attempts to read more bytes than the target byte buffer has remaining (with * respect to {@link ByteBuffer#remaining()}), this method will cause a {@link java.nio.BufferUnderflowException}. * * @param offset * The position where the bytes are started to be written to in this memory * segment. * @param source * The ByteBuffer to copy the bytes from. * @param numBytes * The number of bytes to copy. * @throws IndexOutOfBoundsException * If the offset is invalid, or the source buffer does not * contain the given number of bytes, or this segment does not have enough space for the * bytes (counting from offset). */ public void put(int offset, ByteBuffer source, int numBytes) {// check the byte array offset and length if (((offset | numBytes) | (offset + numBytes)) < 0) { throw new IndexOutOfBoundsException(); } final int sourceOffset = source.position(); final int remaining = source.remaining(); if (remaining < numBytes) { throw new BufferUnderflowException(); } if (source.isDirect()) { // copy to the target memory directly final long sourcePointer = getByteBufferAddress(source) + sourceOffset; final long v22 = address + offset; if (v22 <= (f0 - numBytes)) { UNSAFE.copyMemory(null, sourcePointer, heapMemory, v22, numBytes); source.position(sourceOffset + numBytes); } else if (address > f0) { throw new IllegalStateException("segment has been freed"); } else { throw new IndexOutOfBoundsException(); } } else if (source.hasArray()) { // move directly into the byte array put(offset, source.array(), sourceOffset + source.arrayOffset(), numBytes); // this must be after the get() call to ensue that the byte buffer is not // modified in case the call fails source.position(sourceOffset + numBytes); } else { // other types of byte buffers for (int i = 0; i < numBytes; i++) { put(offset++, source.get());} } }
3.26
flink_MemorySegment_copyTo_rdh
/** * Bulk copy method. Copies {@code numBytes} bytes from this memory segment, starting at * position {@code offset} to the target memory segment. The bytes will be put into the target * segment starting at position {@code targetOffset}. * * @param offset * The position where the bytes are started to be read from in this memory * segment. * @param target * The memory segment to copy the bytes to. * @param targetOffset * The position in the target memory segment to copy the chunk to. * @param numBytes * The number of bytes to copy. * @throws IndexOutOfBoundsException * If either of the offsets is invalid, or the source segment * does not contain the given number of bytes (starting from offset), or the target segment * does not have enough space for the bytes (counting from targetOffset). */ public void copyTo(int offset, MemorySegment target, int targetOffset, int numBytes) { final byte[] thisHeapRef = this.heapMemory; final byte[] otherHeapRef = target.heapMemory; final long thisPointer = this.address + offset; final long otherPointer = target.address + targetOffset; if (((((numBytes | offset) | targetOffset) >= 0) && (thisPointer <= (this.f0 - numBytes))) && (otherPointer <= (target.f0 - numBytes))) { UNSAFE.copyMemory(thisHeapRef, thisPointer, otherHeapRef, otherPointer, numBytes); } else if (this.address > this.f0) { throw new IllegalStateException("this memory segment has been freed."); } else if (target.address > target.f0) { throw new IllegalStateException("target memory segment has been freed."); } else { throw new IndexOutOfBoundsException(String.format("offset=%d, targetOffset=%d, numBytes=%d, address=%d, targetAddress=%d", offset, targetOffset, numBytes, this.address, target.address)); } }
3.26
flink_MemorySegment_putFloat_rdh
/** * Writes the given single-precision float value (32bit, 4 bytes) to the given position in the * system's native byte order. This method offers the best speed for float writing and should be * used unless a specific byte order is required. In most cases, it suffices to know that the * byte order in which the value is written is the same as the one in which it is read (such as * transient storage in memory, or serialization for I/O and network), making this method the * preferable choice. * * @param index * The position at which the value will be written. * @param value * The float value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putFloat(int index, float value) { putInt(index, Float.floatToRawIntBits(value)); }
3.26
flink_MemorySegment_getCharBigEndian_rdh
/** * Reads a character value (16 bit, 2 bytes) from the given position, in big-endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #getChar(int)}. For most cases (such as transient storage in memory or serialization * for I/O and network), it suffices to know that the byte order in which the value is written * is the same as the one in which it is read, and {@link #getChar(int)} is the preferable * choice. * * @param index * The position from which the value will be read. * @return The character value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public char getCharBigEndian(int index) { if (LITTLE_ENDIAN) { return Character.reverseBytes(getChar(index)); } else { return getChar(index); }}
3.26
flink_MemorySegment_size_rdh
// ------------------------------------------------------------------------ /** * Gets the size of the memory segment, in bytes. * * @return The size of the memory segment. */ public int size() {return size; }
3.26
flink_MemorySegment_putCharBigEndian_rdh
/** * Writes the given character (16 bit, 2 bytes) to the given position in big-endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #putChar(int, char)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putChar(int, char)} * is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The char value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putCharBigEndian(int index, char value) { if (LITTLE_ENDIAN) { putChar(index, Character.reverseBytes(value)); } else { putChar(index, value); } }
3.26
flink_MemorySegment_getBoolean_rdh
/** * Reads one byte at the given position and returns its boolean representation. * * @param index * The position from which the memory will be read. * @return The boolean value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 1. */ public boolean getBoolean(int index) { return get(index) != 0; }
3.26
flink_MemorySegment_putBoolean_rdh
/** * Writes one byte containing the byte value into this buffer at the given position. * * @param index * The position at which the memory will be written. * @param value * The char value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 1. */ public void putBoolean(int index, boolean value) { put(index, ((byte) (value ? 1 : 0))); }
3.26
flink_MemorySegment_putCharLittleEndian_rdh
/** * Writes the given character (16 bit, 2 bytes) to the given position in little-endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #putChar(int, char)}. For most cases (such as transient storage in memory * or serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putChar(int, char)} * is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The char value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putCharLittleEndian(int index, char value) { if (LITTLE_ENDIAN) { putChar(index, value); } else { putChar(index, Character.reverseBytes(value)); } }
3.26
flink_MemorySegment_putDouble_rdh
/** * Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position * in the system's native byte order. This method offers the best speed for double writing and * should be used unless a specific byte order is required. In most cases, it suffices to know * that the byte order in which the value is written is the same as the one in which it is read * (such as transient storage in memory, or serialization for I/O and network), making this * method the preferable choice. * * @param index * The position at which the memory will be written. * @param value * The double value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public void putDouble(int index, double value) { putLong(index, Double.doubleToRawLongBits(value)); }
3.26
flink_MemorySegment_getLongBigEndian_rdh
/** * Reads a long integer value (64bit, 8 bytes) from the given position, in big endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #getLong(int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #getLong(int)} is the * preferable choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public long getLongBigEndian(int index) { if (LITTLE_ENDIAN) { return Long.reverseBytes(getLong(index)); } else { return getLong(index); } }
3.26
flink_MemorySegment_equalTo_rdh
/** * Equals two memory segment regions. * * @param seg2 * Segment to equal this segment with * @param offset1 * Offset of this segment to start equaling * @param offset2 * Offset of seg2 to start equaling * @param length * Length of the equaled memory region * @return true if equal, false otherwise */ public boolean equalTo(MemorySegment seg2, int offset1, int offset2, int length) { int i = 0; // we assume unaligned accesses are supported. // Compare 8 bytes at a time. while (i <= (length - 8)) { if (getLong(offset1 + i) != seg2.getLong(offset2 + i)) { return false; } i += 8; } // cover the last (length % 8) elements. while (i < length) { if (get(offset1 + i) != seg2.get(offset2 + i)) { return false; } i += 1; } return true; }
3.26
flink_MemorySegment_getDouble_rdh
/** * Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in * the system's native byte order. This method offers the best speed for double reading and * should be used unless a specific byte order is required. In most cases, it suffices to know * that the byte order in which the value is written is the same as the one in which it is read * (such as transient storage in memory, or serialization for I/O and network), making this * method the preferable choice. * * @param index * The position from which the value will be read. * @return The double value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public double getDouble(int index) { return Double.longBitsToDouble(getLong(index)); }
3.26
flink_MemorySegment_getOffHeapBuffer_rdh
/** * Returns the off-heap buffer of memory segments. * * @return underlying off-heap buffer * @throws IllegalStateException * if the memory segment does not represent off-heap buffer */ public ByteBuffer getOffHeapBuffer() { if (offHeapBuffer != null) { return offHeapBuffer; } else { throw new IllegalStateException("Memory segment does not represent off-heap buffer"); } }
3.26
flink_MemorySegment_putFloatLittleEndian_rdh
/** * Writes the given single-precision float value (32bit, 4 bytes) to the given position in * little endian byte order. This method's speed depends on the system's native byte order, and * it is possibly slower than {@link #putFloat(int, float)}. For most cases (such as transient * storage in memory or serialization for I/O and network), it suffices to know that the byte * order in which the value is written is the same as the one in which it is read, and {@link #putFloat(int, float)} is the preferable choice. * * @param index * The position at which the value will be written. * @param value * The long value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putFloatLittleEndian(int index, float value) { putIntLittleEndian(index, Float.floatToRawIntBits(value)); }
3.26
flink_MemorySegment_putIntBigEndian_rdh
/** * Writes the given int value (32bit, 4 bytes) to the given position in big endian byte order. * This method's speed depends on the system's native byte order, and it is possibly slower than * {@link #putInt(int, int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putInt(int, int)} is * the preferable choice. * * @param index * The position at which the value will be written. * @param value * The int value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putIntBigEndian(int index, int value) { if (LITTLE_ENDIAN) { putInt(index, Integer.reverseBytes(value)); } else { putInt(index, value); } }
3.26
flink_MemorySegment_putIntLittleEndian_rdh
/** * Writes the given int value (32bit, 4 bytes) to the given position in little endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #putInt(int, int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #putInt(int, int)} is * the preferable choice. * * @param index * The position at which the value will be written. * @param value * The int value to be written. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putIntLittleEndian(int index, int value) { if (LITTLE_ENDIAN) { putInt(index, value); } else { putInt(index, Integer.reverseBytes(value)); } }
3.26
flink_MemorySegment_getLongLittleEndian_rdh
/** * Reads a long integer value (64bit, 8 bytes) from the given position, in little endian byte * order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #getLong(int)}. For most cases (such as transient storage in memory or * serialization for I/O and network), it suffices to know that the byte order in which the * value is written is the same as the one in which it is read, and {@link #getLong(int)} is the * preferable choice. * * @param index * The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException * Thrown, if the index is negative, or larger than the * segment size minus 8. */ public long getLongLittleEndian(int index) { if (LITTLE_ENDIAN) { return getLong(index); } else { return Long.reverseBytes(getLong(index)); } }
3.26
flink_InternalTimersSnapshotReaderWriters_getWriterForVersion_rdh
// ------------------------------------------------------------------------------- // Writers // - pre-versioned: Flink 1.4.0 // - v1: Flink 1.4.1 // - v2: Flink 1.8.0 // ------------------------------------------------------------------------------- public static <K, N> InternalTimersSnapshotWriter getWriterForVersion(int version, InternalTimersSnapshot<K, N> timersSnapshot, TypeSerializer<K> keySerializer, TypeSerializer<N> namespaceSerializer) { switch (version) { case NO_VERSION : case 1 : throw new IllegalStateException("Since Flink 1.17 not versioned (<= Flink 1.4.0) and version 1 (< Flink 1.8.0) of " + "InternalTimersSnapshotWriter is no longer supported."); case InternalTimerServiceSerializationProxy.VERSION : return new InternalTimersSnapshotWriterV2<>(timersSnapshot, keySerializer, namespaceSerializer); default : // guard for future throw new IllegalStateException("Unrecognized internal timers snapshot writer version: " + version); } }
3.26