name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_CliClient_printExecutionException_rdh
// -------------------------------------------------------------------------------------------- // Utils // -------------------------------------------------------------------------------------------- private void printExecutionException(Throwable t) { final String errorMessage = CliStrings.MESSAGE_SQL_EXECUTION_ERROR; LOG.warn(errorMessage, t); boolean isVerbose = f0.getSessionConfig().get(SqlClientOptions.VERBOSE); terminal.writer().println(CliStrings.messageError(errorMessage, t, isVerbose).toAnsi()); terminal.flush(); }
3.26
flink_CliClient_executeInitialization_rdh
/** * Initialize the Cli Client with the content. */ public boolean executeInitialization(String content) { try { OutputStream outputStream = new ByteArrayOutputStream(256); terminal = TerminalUtils.createDumbTerminal(outputStream); boolean v1 = executeFile(content, outputStream, ExecutionMode.INITIALIZATION); LOG.info(outputStream.toString()); return v1; } finally { closeTerminal(); } }
3.26
flink_CliClient_close_rdh
/** * Closes the CLI instance. */ public void close() { if (terminal != null) { closeTerminal(); } }
3.26
flink_CliClient_executeInteractive_rdh
// -------------------------------------------------------------------------------------------- /** * Execute statement from the user input and prints status information and/or errors on the * terminal. */private void executeInteractive(LineReader inputLineReader) {// make space from previous output and test the writer terminal.writer().println(); terminal.writer().flush(); // print welcome terminal.writer().append(CliStrings.MESSAGE_WELCOME); LineReader lineReader = (inputLineReader == null) ? createLineReader(terminal, ExecutionMode.INTERACTIVE_EXECUTION) : inputLineReader;getAndExecuteStatements(lineReader, false); }
3.26
flink_CliClient_executeFile_rdh
/** * Execute content from Sql file and prints status information and/or errors on the terminal. * * @param content * SQL file content */private boolean executeFile(String content, OutputStream outputStream, ExecutionMode mode) { terminal.writer().println(CliStrings.messageInfo(CliStrings.MESSAGE_EXECUTE_FILE).toAnsi()); // append line delimiter try (InputStream inputStream = new ByteArrayInputStream(SqlMultiLineParser.formatSqlFile(content).getBytes());Terminal dumbTerminal = TerminalUtils.createDumbTerminal(inputStream, outputStream)) { LineReader lineReader = createLineReader(dumbTerminal, mode); return getAndExecuteStatements(lineReader, true); } catch (Throwable e) { printExecutionException(e); return false; } }
3.26
flink_BashJavaUtils_getTmResourceParams_rdh
/** * Generate and print JVM parameters and dynamic configs of task executor resources. The last * two lines of the output should be JVM parameters and dynamic configs respectively. */ private static List<String> getTmResourceParams(Configuration configuration) { Configuration v5 = TaskExecutorProcessUtils.getConfigurationMapLegacyTaskManagerHeapSizeToConfigOption(configuration, TaskManagerOptions.TOTAL_FLINK_MEMORY); TaskExecutorProcessSpec taskExecutorProcessSpec = TaskExecutorProcessUtils.processSpecFromConfig(v5); logTaskExecutorConfiguration(taskExecutorProcessSpec); return Arrays.asList(ProcessMemoryUtils.generateJvmParametersStr(taskExecutorProcessSpec), TaskExecutorProcessUtils.generateDynamicConfigsStr(taskExecutorProcessSpec)); }
3.26
flink_BashJavaUtils_getJmResourceParams_rdh
/** * Generate and print JVM parameters of Flink Master resources as one line. */ @VisibleForTestingstatic List<String> getJmResourceParams(Configuration configuration) { JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap(configuration, JobManagerOptions.JVM_HEAP_MEMORY); logMasterConfiguration(jobManagerProcessSpec); return Arrays.asList(JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, configuration), JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec)); }
3.26
flink_StreamGraphHasherV2_traverseStreamGraphAndGenerateHashes_rdh
/** * Returns a map with a hash for each {@link StreamNode} of the {@link StreamGraph}. The hash is * used as the {@link JobVertexID} in order to identify nodes across job submissions if they * didn't change. * * <p>The complete {@link StreamGraph} is traversed. The hash is either computed from the * transformation's user-specified id (see {@link Transformation#getUid()}) or generated in a * deterministic way. * * <p>The generated hash is deterministic with respect to: * * <ul> * <li>node-local properties (node ID), * <li>chained output nodes, and * <li>input nodes hashes * </ul> * * @return A map from {@link StreamNode#id} to hash as 16-byte array. */ @Override public Map<Integer, byte[]> traverseStreamGraphAndGenerateHashes(StreamGraph streamGraph) { // The hash function used to generate the hash final HashFunction hashFunction = Hashing.murmur3_128(0); final Map<Integer, byte[]> hashes = new HashMap<>(); Set<Integer> visited = new HashSet<>(); Queue<StreamNode> remaining = new ArrayDeque<>(); // We need to make the source order deterministic. The source IDs are // not returned in the same order, which means that submitting the same // program twice might result in different traversal, which breaks the // deterministic hash assignment. List<Integer> v4 = new ArrayList<>();for (Integer sourceNodeId : streamGraph.getSourceIDs()) { v4.add(sourceNodeId); } Collections.sort(v4); // // Traverse the graph in a breadth-first manner. Keep in mind that // the graph is not a tree and multiple paths to nodes can exist. // // Start with source nodes for (Integer sourceNodeId : v4) { remaining.add(streamGraph.getStreamNode(sourceNodeId)); visited.add(sourceNodeId); } StreamNode currentNode; while ((currentNode = remaining.poll()) != null) { // Generate the hash code. Because multiple path exist to each // node, we might not have all required inputs available to // generate the hash code. if (generateNodeHash(currentNode, hashFunction, hashes, streamGraph.isChainingEnabled(), streamGraph)) { // Add the child nodes for (StreamEdge outEdge : currentNode.getOutEdges()) { StreamNode v9 = streamGraph.getTargetVertex(outEdge); if (!visited.contains(v9.getId())) { remaining.add(v9); visited.add(v9.getId()); } } } else { // We will revisit this later. visited.remove(currentNode.getId()); } } return hashes; }
3.26
flink_StreamGraphHasherV2_generateUserSpecifiedHash_rdh
/** * Generates a hash from a user-specified ID. */ private byte[] generateUserSpecifiedHash(StreamNode node, Hasher hasher) { hasher.putString(node.getTransformationUID(), Charset.forName("UTF-8")); return hasher.hash().asBytes(); }
3.26
flink_StreamGraphHasherV2_generateDeterministicHash_rdh
/** * Generates a deterministic hash from node-local properties and input and output edges. */ private byte[] generateDeterministicHash(StreamNode node, Hasher hasher, Map<Integer, byte[]> hashes, boolean isChainingEnabled, StreamGraph streamGraph) { // Include stream node to hash. We use the current size of the computed // hashes as the ID. We cannot use the node's ID, because it is // assigned from a static counter. This will result in two identical // programs having different hashes. generateNodeLocalHash(hasher, hashes.size()); // Include chained nodes to hash for (StreamEdge outEdge : node.getOutEdges()) { if (isChainable(outEdge, isChainingEnabled, streamGraph)) { // Use the hash size again, because the nodes are chained to // this node. This does not add a hash for the chained nodes. generateNodeLocalHash(hasher, hashes.size()); } } byte[] hash = hasher.hash().asBytes(); // Make sure that all input nodes have their hash set before entering // this loop (calling this method). for (StreamEdge inEdge : node.getInEdges()) { byte[] v20 = hashes.get(inEdge.getSourceId()); // Sanity check if (v20 == null) { throw new IllegalStateException(((("Missing hash for input node " + streamGraph.getSourceVertex(inEdge)) + ". Cannot generate hash for ") + node) + "."); } for (int j = 0; j < hash.length; j++) { hash[j] = ((byte) ((hash[j] * 37) ^ v20[j])); } } if (LOG.isDebugEnabled()) { String udfClassName = ""; if (node.getOperatorFactory() instanceof UdfStreamOperatorFactory) { udfClassName = ((UdfStreamOperatorFactory) (node.getOperatorFactory())).getUserFunctionClassName(); } LOG.debug((((((((((((("Generated hash '" + byteToHexString(hash)) + "' for node ") + "'") + node.toString()) + "' {id: ") + node.getId()) + ", ") + "parallelism: ") + node.getParallelism()) + ", ") + "user function: ") + udfClassName) + "}"); } return hash; }
3.26
flink_StreamGraphHasherV2_generateNodeLocalHash_rdh
/** * Applies the {@link Hasher} to the {@link StreamNode} . The hasher encapsulates the current * state of the hash. * * <p>The specified ID is local to this node. We cannot use the {@link StreamNode#id}, because * it is incremented in a static counter. Therefore, the IDs for identical jobs will otherwise * be different. */ private void generateNodeLocalHash(Hasher hasher, int id) { // This resolves conflicts for otherwise identical source nodes. BUT // the generated hash codes depend on the ordering of the nodes in the // stream graph. hasher.putInt(id); }
3.26
flink_BaseHybridHashTable_close_rdh
/** * Closes the hash table. This effectively releases all internal structures and closes all open * files and removes them. The call to this method is valid both as a cleanup after the complete * inputs were properly processed, and as an cancellation call, which cleans up all resources * that are currently held by the hash join. */ public void close() { // make sure that we close only once if (!this.closed.compareAndSet(false, true)) { return; } // clear the current build side channel, if there exist one if (this.currentSpilledBuildSide != null) { try { this.currentSpilledBuildSide.getChannel().closeAndDelete();} catch (Throwable t) { LOG.warn("Could not close and delete the temp file for the current spilled partition build side.", t); } } // clear the current probe side channel, if there is one if (this.currentSpilledProbeSide != null) { try { this.currentSpilledProbeSide.getChannel().closeAndDelete(); } catch (Throwable t) { LOG.warn("Could not close and delete the temp file for the current spilled partition probe side.", t); } } // clear the memory in the partitions clearPartitions(); // return the write-behind buffers for (int i = 0; i < this.buildSpillRetBufferNumbers; i++) { try { returnPage(this.buildSpillReturnBuffers.take()); } catch (InterruptedException iex) { throw new RuntimeException("Hashtable closing was interrupted"); } } this.buildSpillRetBufferNumbers = 0; }
3.26
flink_BaseHybridHashTable_findSmallerPrime_rdh
/** * Let prime number be the numBuckets, to avoid partition hash and bucket hash congruences. */ private static int findSmallerPrime(int num) { for (; num > 1; num--) { if (isPrimeNumber(num)) { return num; } } return num; }
3.26
flink_BaseHybridHashTable_m0_rdh
/** * Bulk memory acquisition. NOTE: Failure to get memory will throw an exception. */ public MemorySegment[] m0(int bufferSize) { MemorySegment[] memorySegments = new MemorySegment[bufferSize]; for (int i = 0; i < bufferSize; i++) { MemorySegment nextBuffer = getNextBuffer(); if (nextBuffer == null) { throw new RuntimeException("No enough buffers!"); } memorySegments[i] = nextBuffer; } return memorySegments; }
3.26
flink_BaseHybridHashTable_nextSegment_rdh
/** * This is the method called by the partitions to request memory to serialize records. It * automatically spills partitions, if memory runs out. * * @return The next available memory segment. */ @Override public MemorySegment nextSegment() { final MemorySegment seg = getNextBuffer(); if (seg != null) { return seg; } else { try { spillPartition(); } catch (IOException ioex) { throw new RuntimeException("Error spilling Hash Join Partition" + (ioex.getMessage() == null ? "." : ": " + ioex.getMessage()), ioex);} MemorySegment fromSpill = getNextBuffer(); if (fromSpill == null) { throw new RuntimeException("BUG in Hybrid Hash Join: Spilling did not free a buffer."); } else { return fromSpill;} } }
3.26
flink_BaseHybridHashTable_getNextBuffer_rdh
/** * Gets the next buffer to be used with the hash-table, either for an in-memory partition, or * for the table buckets. This method returns <tt>null</tt>, if no more buffer is available. * Spilling a partition may free new buffers then. * * @return The next buffer to be used by the hash-table, or null, if no buffer remains. */ public MemorySegment getNextBuffer() { // check if the pool directly offers memory MemorySegment segment = this.internalPool.nextSegment(); if (segment != null) { return segment; } // check if there are write behind buffers that actually are to be used for the hash table if (this.buildSpillRetBufferNumbers > 0) { // grab at least one, no matter what MemorySegment toReturn; try { toReturn = this.buildSpillReturnBuffers.take(); } catch (InterruptedException iex) { throw new RuntimeException("Hybrid Hash Join was interrupted while taking a buffer."); } this.buildSpillRetBufferNumbers--; // grab as many more buffers as are available directly returnSpillBuffers(); return toReturn; } else { return null; } }
3.26
flink_BaseHybridHashTable_releaseMemoryCacheForSMJ_rdh
/** * Due to adaptive hash join is introduced, the cached memory segments should be released to * {@link MemoryManager} before switch to sort merge join. Otherwise, open sort merge join * operator maybe fail because of insufficient memory. * * <p>Note: this method should only be invoked for sort merge join. */ public void releaseMemoryCacheForSMJ() { // return build spill buffer memory first returnSpillBuffers(); freeCurrent(); }
3.26
flink_BaseHybridHashTable_maxInitBufferOfBucketArea_rdh
/** * Give up to one-sixth of the memory of the bucket area. */ public int maxInitBufferOfBucketArea(int partitions) { return Math.max(1, ((totalNumBuffers - 2) / 6) / partitions); }
3.26
flink_BaseHybridHashTable_ensureNumBuffersReturned_rdh
/** * This method makes sure that at least a certain number of memory segments is in the list of * free segments. Free memory can be in the list of free segments, or in the return-queue where * segments used to write behind are put. The number of segments that are in that return-queue, * but are actually reclaimable is tracked. This method makes sure at least a certain number of * buffers is reclaimed. * * @param minRequiredAvailable * The minimum number of buffers that needs to be reclaimed. */ public void ensureNumBuffersReturned(final int minRequiredAvailable) { if (minRequiredAvailable > (internalPool.freePages() + this.buildSpillRetBufferNumbers)) { throw new IllegalArgumentException("More buffers requested available than totally available."); } try { while (internalPool.freePages() < minRequiredAvailable) { returnPage(this.buildSpillReturnBuffers.take());this.buildSpillRetBufferNumbers--;} } catch (InterruptedException iex) { throw new RuntimeException("Hash Join was interrupted."); } }
3.26
flink_BaseHybridHashTable_hash_rdh
/** * The level parameter is needed so that we can have different hash functions when we * recursively apply the partitioning, so that the working set eventually fits into memory. */ public static int hash(int hashCode, int level) { final int rotation = level * 11; int code = Integer.rotateLeft(hashCode, rotation); return code >= 0 ? code : -(code + 1); }
3.26
flink_BaseHybridHashTable_partitionLevelHash_rdh
/** * Partition level hash again, for avoid two layer hash conflict. */ static int partitionLevelHash(int hash) { return hash ^ (hash >>> 16); }
3.26
flink_BaseHybridHashTable_freeCurrent_rdh
/** * Free the memory not used. */ public void freeCurrent() { internalPool.cleanCache(); }
3.26
flink_BaseHybridHashTable_getPartitioningFanOutNoEstimates_rdh
/** * Gets the number of partitions to be used for an initial hash-table. */ private int getPartitioningFanOutNoEstimates() { return Math.max(11, findSmallerPrime(((int) (Math.min((buildRowCount * avgRecordLen) / (10 * segmentSize), MAX_NUM_PARTITIONS))))); }
3.26
flink_FineGrainedSlotManager_declareNeededResources_rdh
/** * DO NOT call this method directly. Use {@link #declareNeededResourcesWithDelay()} instead. */ private void declareNeededResources() {Map<InstanceID, WorkerResourceSpec> unWantedTaskManagers = taskManagerTracker.getUnWantedTaskManager(); Map<WorkerResourceSpec, Set<InstanceID>> unWantedTaskManagerBySpec = unWantedTaskManagers.entrySet().stream().collect(Collectors.groupingBy(Map.Entry::getValue, Collectors.mapping(Map.Entry::getKey, Collectors.toSet()))); // registered TaskManagers except unwanted worker. Stream<WorkerResourceSpec> registeredTaskManagerStream = taskManagerTracker.getRegisteredTaskManagers().stream().filter(t -> !unWantedTaskManagers.containsKey(t.getInstanceId())).map(t -> WorkerResourceSpec.fromTotalResourceProfile(t.getTotalResource(), t.getDefaultNumSlots())); // pending TaskManagers. Stream<WorkerResourceSpec> pendingTaskManagerStream = taskManagerTracker.getPendingTaskManagers().stream().map(t -> WorkerResourceSpec.fromTotalResourceProfile(t.getTotalResourceProfile(), t.getNumSlots())); Map<WorkerResourceSpec, Integer> requiredWorkers = Stream.concat(registeredTaskManagerStream, pendingTaskManagerStream).collect(Collectors.groupingBy(Function.identity(), Collectors.summingInt(e -> 1))); Set<WorkerResourceSpec> workerResourceSpecs = new HashSet<>(requiredWorkers.keySet()); workerResourceSpecs.addAll(unWantedTaskManagerBySpec.keySet()); List<ResourceDeclaration> resourceDeclarations = new ArrayList<>(); workerResourceSpecs.forEach(spec -> resourceDeclarations.add(new ResourceDeclaration(spec, requiredWorkers.getOrDefault(spec, 0), unWantedTaskManagerBySpec.getOrDefault(spec, Collections.emptySet())))); resourceAllocator.declareResourceNeeded(resourceDeclarations); }
3.26
flink_FineGrainedSlotManager_getNumberRegisteredSlots_rdh
// Legacy APIs // --------------------------------------------------------------------------------------------- @Override public int getNumberRegisteredSlots() { return taskManagerTracker.getNumberRegisteredSlots(); }
3.26
flink_FineGrainedSlotManager_freeSlot_rdh
/** * Free the given slot from the given allocation. If the slot is still allocated by the given * allocation id, then the slot will be freed. * * @param slotId * identifying the slot to free, will be ignored * @param allocationId * with which the slot is presumably allocated */ @Override public void freeSlot(SlotID slotId, AllocationID allocationId) { checkInit(); LOG.debug("Freeing slot {}.", allocationId); if (taskManagerTracker.getAllocatedOrPendingSlot(allocationId).isPresent()) { slotStatusSyncer.freeSlot(allocationId); checkResourceRequirementsWithDelay(); } else { LOG.debug("Trying to free a slot {} which has not been allocated. Ignoring this message.", allocationId); } }
3.26
flink_FineGrainedSlotManager_close_rdh
/** * Closes the slot manager. * * @throws Exception * if the close operation fails */ @Override public void close() throws Exception { LOG.info("Closing the slot manager."); m0(); }
3.26
flink_FineGrainedSlotManager_checkResourceRequirementsWithDelay_rdh
// --------------------------------------------------------------------------------------------- // Requirement matching // --------------------------------------------------------------------------------------------- /** * Depending on the implementation of {@link ResourceAllocationStrategy}, checking resource * requirements and potentially making a re-allocation can be heavy. In order to cover more * changes with each check, thus reduce the frequency of unnecessary re-allocations, the checks * are performed with a slight delay. */ private void checkResourceRequirementsWithDelay() { if (requirementsCheckDelay.toMillis() <= 0) { checkResourceRequirements(); } else if ((requirementsCheckFuture == null) || requirementsCheckFuture.isDone()) { requirementsCheckFuture = new CompletableFuture<>(); scheduledExecutor.schedule(() -> mainThreadExecutor.execute(() -> { checkResourceRequirements(); Preconditions.checkNotNull(requirementsCheckFuture).complete(null); }), requirementsCheckDelay.toMillis(), TimeUnit.MILLISECONDS); } } /** * DO NOT call this method directly. Use {@link #checkResourceRequirementsWithDelay()}
3.26
flink_FineGrainedSlotManager_checkClusterReconciliation_rdh
// --------------------------------------------------------------------------------------------- // Internal periodic check methods // --------------------------------------------------------------------------------------------- private void checkClusterReconciliation() { if (checkResourcesNeedReconcile()) { // only declare on needed. m1(); } }
3.26
flink_FineGrainedSlotManager_reportSlotStatus_rdh
/** * Reports the current slot allocations for a task manager identified by the given instance id. * * @param instanceId * identifying the task manager for which to report the slot status * @param slotReport * containing the status for all of its slots * @return true if the slot status has been updated successfully, otherwise false */ @Override public boolean reportSlotStatus(InstanceID instanceId, SlotReport slotReport) { checkInit(); LOG.debug("Received slot report from instance {}: {}.", instanceId, slotReport); if (taskManagerTracker.getRegisteredTaskManager(instanceId).isPresent()) { if (!slotStatusSyncer.reportSlotStatus(instanceId, slotReport)) { checkResourceRequirementsWithDelay(); } return true;} else { LOG.debug("Received slot report for unknown task manager with instance id {}. Ignoring this report.", instanceId);return false; } }
3.26
flink_FineGrainedSlotManager_checkInit_rdh
// --------------------------------------------------------------------------------------------- // Internal utility methods // --------------------------------------------------------------------------------------------- private void checkInit() { Preconditions.checkState(started, "The slot manager has not been started."); Preconditions.checkNotNull(resourceManagerId); Preconditions.checkNotNull(mainThreadExecutor); Preconditions.checkNotNull(resourceAllocator); Preconditions.checkNotNull(resourceEventListener); }
3.26
flink_FineGrainedSlotManager_clearResourceRequirements_rdh
// --------------------------------------------------------------------------------------------- // Public API // --------------------------------------------------------------------------------------------- @Override public void clearResourceRequirements(JobID jobId) { maybeReclaimInactiveSlots(jobId); f2.remove(jobId); f0.notifyResourceRequirements(jobId, Collections.emptyList()); if (resourceAllocator.isSupported()) { taskManagerTracker.clearPendingAllocationsOfJob(jobId); checkResourcesNeedReconcile();m1(); } }
3.26
flink_FineGrainedSlotManager_start_rdh
// --------------------------------------------------------------------------------------------- // Component lifecycle methods // --------------------------------------------------------------------------------------------- /** * Starts the slot manager with the given leader id and resource manager actions. * * @param newResourceManagerId * to use for communication with the task managers * @param newMainThreadExecutor * to use to run code in the ResourceManager's main thread * @param newResourceAllocator * to use for resource (de-)allocations * @param newBlockedTaskManagerChecker * to query whether a task manager is blocked */ @Override public void start(ResourceManagerId newResourceManagerId, Executor newMainThreadExecutor, ResourceAllocator newResourceAllocator, ResourceEventListener newResourceEventListener, BlockedTaskManagerChecker newBlockedTaskManagerChecker) { LOG.info("Starting the slot manager.");resourceManagerId = Preconditions.checkNotNull(newResourceManagerId); mainThreadExecutor = Preconditions.checkNotNull(newMainThreadExecutor); resourceAllocator = Preconditions.checkNotNull(newResourceAllocator); resourceEventListener = Preconditions.checkNotNull(newResourceEventListener); slotStatusSyncer.initialize(taskManagerTracker, f0, resourceManagerId, mainThreadExecutor); blockedTaskManagerChecker = Preconditions.checkNotNull(newBlockedTaskManagerChecker); started = true; if (resourceAllocator.isSupported()) { clusterReconciliationCheck = scheduledExecutor.scheduleWithFixedDelay(() -> mainThreadExecutor.execute(this::checkClusterReconciliation), 0L, f1.toMilliseconds(), TimeUnit.MILLISECONDS); } registerSlotManagerMetrics(); }
3.26
flink_FineGrainedSlotManager_allocateTaskManagersAccordingTo_rdh
/** * Allocate pending task managers, returns the ids of pending task managers that can not be * allocated. */private Set<PendingTaskManagerId> allocateTaskManagersAccordingTo(List<PendingTaskManager> pendingTaskManagers) {Preconditions.checkState(resourceAllocator.isSupported()); final Set<PendingTaskManagerId> failedAllocations = new HashSet<>(); for (PendingTaskManager pendingTaskManager : pendingTaskManagers) { if (!allocateResource(pendingTaskManager)) { failedAllocations.add(pendingTaskManager.getPendingTaskManagerId()); } } return failedAllocations; }
3.26
flink_FileBasedStateOutputStream_write_rdh
// ------------------------------------------------------------------------ // I/O // ------------------------------------------------------------------------ @Override public final void write(int b) throws IOException { out.write(b); }
3.26
flink_FileBasedStateOutputStream_m0_rdh
// ------------------------------------------------------------------------ // Closing // ------------------------------------------------------------------------ public boolean m0() { return closed; }
3.26
flink_BinaryKVInMemorySortBuffer_getIterator_rdh
/** * Gets an iterator over all KV records in this buffer in their logical order. * * @return An iterator returning the records in their logical order. */ public final MutableObjectIterator<Tuple2<BinaryRowData, BinaryRowData>> getIterator() { return new MutableObjectIterator<Tuple2<BinaryRowData, BinaryRowData>>() { private final int size = size(); private int f0 = 0; private int currentSegment = 0; private int currentOffset = 0; private MemorySegment f1 = sortIndex.get(0); @Override public Tuple2<BinaryRowData, BinaryRowData> next(Tuple2<BinaryRowData, BinaryRowData> kv) { if (this.f0 < this.size) { this.f0++;if (this.currentOffset > lastIndexEntryOffset) { this.currentOffset = 0; this.f1 = sortIndex.get(++this.currentSegment); } long pointer = this.f1.getLong(this.currentOffset); this.currentOffset += indexEntrySize; try { return getRecordFromBuffer(kv.f0, kv.f1, pointer); } catch (IOException ioe) { throw new RuntimeException(ioe); } } else { return null; } } @Override public Tuple2<BinaryRowData, BinaryRowData> next() { throw new RuntimeException("Not support!"); } }; }
3.26
flink_ProjectableDecodingFormat_supportsNestedProjection_rdh
/** * Returns whether this format supports nested projection. */ default boolean supportsNestedProjection() { return false;}
3.26
flink_TypeInferenceOperandChecker_castTo_rdh
/** * Adopted from {@link org.apache.calcite.sql.validate.implicit.AbstractTypeCoercion}. */ private SqlNode castTo(SqlNode node, RelDataType type) { return SqlStdOperatorTable.CAST.createCall(SqlParserPos.ZERO, node, SqlTypeUtil.convertTypeToSpec(type).withNullable(type.isNullable())); }
3.26
flink_TypeInferenceOperandChecker_checkOperandTypesOrError_rdh
// -------------------------------------------------------------------------------------------- private boolean checkOperandTypesOrError(SqlCallBinding callBinding, CallContext callContext) { final CallContext adaptedCallContext; try { adaptedCallContext = adaptArguments(f0, callContext, null); } catch (ValidationException e) { throw createInvalidInputException(f0, callContext, e); } insertImplicitCasts(callBinding, adaptedCallContext.getArgumentDataTypes()); return true; }
3.26
flink_TypeInferenceOperandChecker_updateInferredType_rdh
/** * Adopted from {@link org.apache.calcite.sql.validate.implicit.AbstractTypeCoercion}. */ private void updateInferredType(SqlValidator validator, SqlNode node, RelDataType type) { validator.setValidatedNodeType(node, type); final SqlValidatorNamespace namespace = validator.getNamespace(node); if (namespace != null) { namespace.setType(type); } }
3.26
flink_StreamOperatorStateHandler_getPartitionedState_rdh
/** * Creates a partitioned state handle, using the state backend configured for this task. * * @throws IllegalStateException * Thrown, if the key/value state was already initialized. * @throws Exception * Thrown, if the state backend cannot create the key/value state. */ protected <S extends State, N> S getPartitionedState(N namespace, TypeSerializer<N> namespaceSerializer, StateDescriptor<S, ?> stateDescriptor) throws Exception { /* TODO: NOTE: This method does a lot of work caching / retrieving states just to update the namespace. This method should be removed for the sake of namespaces being lazily fetched from the keyed state backend, or being set on the state directly. */ if (keyedStateBackend != null) { return keyedStateBackend.getPartitionedState(namespace, namespaceSerializer, stateDescriptor); } else { throw new RuntimeException(("Cannot create partitioned state. The keyed state " + "backend has not been set. This indicates that the operator is not ") + "partitioned/keyed."); } }
3.26
flink_MurmurHashUtil_fmix_rdh
// Finalization mix - force all bits of a hash block to avalanche private static int fmix(int h1, int length) { h1 ^= length; return fmix(h1); }
3.26
flink_MurmurHashUtil_hashUnsafeBytes_rdh
/** * Hash unsafe bytes. * * @param base * base unsafe object * @param offset * offset for unsafe object * @param lengthInBytes * length in bytes * @return hash code */ public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes) { return hashUnsafeBytes(base, offset, lengthInBytes, DEFAULT_SEED); }
3.26
flink_MurmurHashUtil_hashUnsafeBytesByWords_rdh
/** * Hash unsafe bytes, length must be aligned to 4 bytes. * * @param base * base unsafe object * @param offset * offset for unsafe object * @param lengthInBytes * length in bytes * @return hash code */ public static int hashUnsafeBytesByWords(Object base, long offset, int lengthInBytes) { return hashUnsafeBytesByWords(base, offset, lengthInBytes, DEFAULT_SEED); }
3.26
flink_MurmurHashUtil_hashBytes_rdh
/** * Hash bytes in MemorySegment. * * @param segment * segment. * @param offset * offset for MemorySegment * @param lengthInBytes * length in MemorySegment * @return hash code */ public static int hashBytes(MemorySegment segment, int offset, int lengthInBytes) { return hashBytes(segment, offset, lengthInBytes, DEFAULT_SEED); }
3.26
flink_MurmurHashUtil_hashBytesByWords_rdh
/** * Hash bytes in MemorySegment, length must be aligned to 4 bytes. * * @param segment * segment. * @param offset * offset for MemorySegment * @param lengthInBytes * length in MemorySegment * @return hash code */ public static int hashBytesByWords(MemorySegment segment, int offset, int lengthInBytes) { return hashBytesByWords(segment, offset, lengthInBytes, DEFAULT_SEED); }
3.26
flink_OutputCollector_collect_rdh
/** * Collects a record and emits it to all writers. */@Override public void collect(T record) { if (record != null) { this.delegate.setInstance(record); try { for (RecordWriter<SerializationDelegate<T>> writer : writers) { writer.emit(this.delegate); } } catch (IOException e) { throw new RuntimeException("Emitting the record caused an I/O exception: " + e.getMessage(), e); } } else { throw new NullPointerException("The system does not support records that are null. " + "Null values are only supported as fields inside other objects."); } }
3.26
flink_StreamTaskStateInitializerImpl_streamOperatorStateContext_rdh
// ----------------------------------------------------------------------------------------------------------------- @Override public StreamOperatorStateContext streamOperatorStateContext(@Nonnull OperatorID operatorID, @Nonnull String operatorClassName, @Nonnull ProcessingTimeService processingTimeService, @Nonnull KeyContext keyContext, @Nullable TypeSerializer<?> keySerializer, @Nonnull CloseableRegistry streamTaskCloseableRegistry, @Nonnull MetricGroup metricGroup, double managedMemoryFraction, boolean isUsingCustomRawKeyedState) throws Exception { TaskInfo taskInfo = environment.getTaskInfo(); OperatorSubtaskDescriptionText operatorSubtaskDescription = new OperatorSubtaskDescriptionText(operatorID, operatorClassName, taskInfo.getIndexOfThisSubtask(), taskInfo.getNumberOfParallelSubtasks()); final String operatorIdentifierText = operatorSubtaskDescription.toString(); final PrioritizedOperatorSubtaskState prioritizedOperatorSubtaskStates = taskStateManager.prioritizedOperatorState(operatorID); CheckpointableKeyedStateBackend<?> keyedStatedBackend = null; OperatorStateBackend operatorStateBackend = null; CloseableIterable<KeyGroupStatePartitionStreamProvider> rawKeyedStateInputs = null;CloseableIterable<StatePartitionStreamProvider> rawOperatorStateInputs = null;InternalTimeServiceManager<?> v8; try { // -------------- Keyed State Backend -------------- keyedStatedBackend = keyedStatedBackend(keySerializer, operatorIdentifierText, prioritizedOperatorSubtaskStates, streamTaskCloseableRegistry, metricGroup, managedMemoryFraction); // -------------- Operator State Backend -------------- operatorStateBackend = operatorStateBackend(operatorIdentifierText, prioritizedOperatorSubtaskStates, streamTaskCloseableRegistry); // -------------- Raw State Streams -------------- rawKeyedStateInputs = rawKeyedStateInputs(prioritizedOperatorSubtaskStates.getPrioritizedRawKeyedState().iterator()); streamTaskCloseableRegistry.registerCloseable(rawKeyedStateInputs); rawOperatorStateInputs = rawOperatorStateInputs(prioritizedOperatorSubtaskStates.getPrioritizedRawOperatorState().iterator()); streamTaskCloseableRegistry.registerCloseable(rawOperatorStateInputs); // -------------- Internal Timer Service Manager -------------- if (keyedStatedBackend != null) { // if the operator indicates that it is using custom raw keyed state, // then whatever was written in the raw keyed state snapshot was NOT written // by the internal timer services (because there is only ever one user of raw keyed // state); // in this case, timers should not attempt to restore timers from the raw keyed // state. final Iterable<KeyGroupStatePartitionStreamProvider> restoredRawKeyedStateTimers = (prioritizedOperatorSubtaskStates.isRestored() && (!isUsingCustomRawKeyedState)) ? rawKeyedStateInputs : Collections.emptyList(); v8 = timeServiceManagerProvider.create(keyedStatedBackend, environment.getUserCodeClassLoader().asClassLoader(), keyContext, processingTimeService, restoredRawKeyedStateTimers, f0); } else { v8 = null; } // -------------- Preparing return value -------------- return new StreamOperatorStateContextImpl(prioritizedOperatorSubtaskStates.getRestoredCheckpointId(), operatorStateBackend, keyedStatedBackend, v8, rawOperatorStateInputs, rawKeyedStateInputs); } catch (Exception ex) { // cleanup if something went wrong before results got published. if (keyedStatedBackend != null) { if (streamTaskCloseableRegistry.unregisterCloseable(keyedStatedBackend)) { IOUtils.closeQuietly(keyedStatedBackend); } // release resource (e.g native resource) keyedStatedBackend.dispose(); } if (operatorStateBackend != null) { if (streamTaskCloseableRegistry.unregisterCloseable(operatorStateBackend)) { IOUtils.closeQuietly(operatorStateBackend); } operatorStateBackend.dispose(); } if (streamTaskCloseableRegistry.unregisterCloseable(rawKeyedStateInputs)) { IOUtils.closeQuietly(rawKeyedStateInputs); }if (streamTaskCloseableRegistry.unregisterCloseable(rawOperatorStateInputs)) { IOUtils.closeQuietly(rawOperatorStateInputs); } throw new Exception("Exception while creating StreamOperatorStateContext.", ex); } }
3.26
flink_TypeTransformation_transform_rdh
/** * Transforms the given data type to a different data type. * * <p>This method provides a {@link DataTypeFactory} if available. */default DataType transform(@Nullable DataTypeFactory factory, DataType typeToTransform) { return transform(typeToTransform); }
3.26
flink_SplitsChange_splits_rdh
/** * * @return the list of splits. */ public List<SplitT> splits() { return Collections.unmodifiableList(splits); }
3.26
flink_GSBlobIdentifier_fromBlobId_rdh
/** * Construct an abstract blob identifier from a Google BlobId. * * @param blobId * The Google BlobId * @return The abstract blob identifier */public static GSBlobIdentifier fromBlobId(BlobId blobId) { return new GSBlobIdentifier(blobId.getBucket(), blobId.getName()); }
3.26
flink_OrcLegacyTimestampColumnVector_fromTimestamp_rdh
// converting from/to Timestamp is copied from Hive 2.0.0 TimestampUtils private static long fromTimestamp(Timestamp timestamp) { long v4 = timestamp.getTime();int nanos = timestamp.getNanos(); return (v4 * 1000000) + (nanos % 1000000); }
3.26
flink_OrcLegacyTimestampColumnVector_createFromConstant_rdh
// creates a Hive ColumnVector of constant timestamp value public static ColumnVector createFromConstant(int batchSize, Object value) { LongColumnVector res = new LongColumnVector(batchSize); if (value == null) { res.noNulls = false; res.isNull[0] = true; res.isRepeating = true; } else { Timestamp timestamp = (value instanceof LocalDateTime) ? Timestamp.valueOf(((LocalDateTime) (value))) : ((Timestamp) (value)); res.fill(fromTimestamp(timestamp)); res.isNull[0] = false; } return res; }
3.26
flink_CompactFileUtils_doCompact_rdh
/** * Do Compaction: - Target file exists, do nothing. - Can do compaction: - Single file, do * atomic renaming, there are optimizations for FileSystem. - Multiple file, do reading and * writing. */ @Nullable public static <T> Path doCompact(FileSystem fileSystem, String partition, List<Path> paths, Path target, Configuration config, CompactReader.Factory<T> readerFactory, CompactWriter.Factory<T> writerFactory) throws IOException { if (paths.size() == 0) { return null; } Map<Path, Long> inputMap = new HashMap<>(); for (Path path : paths) { inputMap.put(path, fileSystem.getFileStatus(path).getLen()); } if (fileSystem.exists(target)) { return target; } checkExist(fileSystem, paths); long startMillis = System.currentTimeMillis(); boolean success = false; if (paths.size() == 1) { // optimizer for single file success = doSingleFileMove(fileSystem, paths.get(0), target); } if (!success) { doMultiFilesCompact(partition, paths, target, config, fileSystem, readerFactory, writerFactory); } Map<Path, Long> targetMap = new HashMap<>(); targetMap.put(target, fileSystem.getFileStatus(target).getLen()); double costSeconds = ((double) (System.currentTimeMillis() - startMillis)) / 1000; LOG.info("Compaction time cost is '{}S', output per file as following format: name=size(byte), target file is '{}', input files are '{}'", costSeconds, targetMap, inputMap); return target; }
3.26
flink_BatchExecOverAggregateBase_inferGroupMode_rdh
/** * Infer the over window mode based on given group info. */ protected OverWindowMode inferGroupMode(GroupSpec group) { AggregateCall aggCall = group.getAggCalls().get(0); if (aggCall.getAggregation().getKind() == NTILE) { return OverWindowMode.INSENSITIVE; } if (aggCall.getAggregation().allowsFraming()) { if (group.isRows()) { return OverWindowMode.ROW; } else { return OverWindowMode.RANGE; } } else if (aggCall.getAggregation() instanceof SqlLeadLagAggFunction) { return OverWindowMode.OFFSET; } else if (aggCall.getAggregation().getKind() == CUME_DIST) { // CUME_DIST is range mode (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), // because equal values in window partition should return same result. return OverWindowMode.RANGE; } else { return OverWindowMode.INSENSITIVE; } }
3.26
flink_FieldAccessor_getFieldType_rdh
/** * Gets the TypeInformation for the type of the field. Note: For an array of a primitive type, * it returns the corresponding basic type (Integer for int[]). */ @SuppressWarnings("unchecked") public TypeInformation<F> getFieldType() { return fieldType; }
3.26
flink_Tuple11_setFields_rdh
/** * Sets new values to all fields of the tuple. * * @param f0 * The value for field 0 * @param f1 * The value for field 1 * @param f2 * The value for field 2 * @param f3 * The value for field 3 * @param f4 * The value for field 4 * @param f5 * The value for field 5 * @param f6 * The value for field 6 * @param f7 * The value for field 7 * @param f8 * The value for field 8 * @param f9 * The value for field 9 * @param f10 * The value for field 10 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10) {this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8; this.f9 = f9; this.f10 = f10; }
3.26
flink_Tuple11_of_rdh
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10) { return new Tuple11<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10); }
3.26
flink_Tuple11_toString_rdh
// ------------------------------------------------------------------------------------------------- // standard utilities // ------------------------------------------------------------------------------------------------- /** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10), where the individual fields are the value returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return ((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ")"; }
3.26
flink_Tuple11_copy_rdh
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked")public Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> copy() { return new Tuple11<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10); }
3.26
flink_FileWriter_m0_rdh
// --------------------------- Testing Methods ----------------------------- @VisibleForTesting Map<String, FileWriterBucket<IN>> m0() { return activeBuckets; }
3.26
flink_FileWriter_initializeState_rdh
/** * Initializes the state after recovery from a failure. * * <p>During this process: * * <ol> * <li>we set the initial value for part counter to the maximum value used before across all * tasks and buckets. This guarantees that we do not overwrite valid data, * <li>we commit any pending files for previous checkpoints (previous to the last successful * one from which we restore), * <li>we resume writing to the previous in-progress file of each bucket, and * <li>if we receive multiple states for the same bucket, we merge them. * </ol> * * @param bucketStates * the state holding recovered state about active buckets. * @throws IOException * if anything goes wrong during retrieving the state or * restoring/committing of any in-progress/pending part files */ public void initializeState(Collection<FileWriterBucketState> bucketStates) throws IOException { checkNotNull(bucketStates, "The retrieved state was null."); for (FileWriterBucketState state : bucketStates) {String bucketId = state.getBucketId(); if (LOG.isDebugEnabled()) { LOG.debug("Restoring: {}", state); } FileWriterBucket<IN> restoredBucket = bucketFactory.restoreBucket(bucketWriter, rollingPolicy, state, outputFileConfig); updateActiveBucketId(bucketId, restoredBucket); } registerNextBucketInspectionTimer(); }
3.26
flink_BaseTwoInputStreamOperatorWithStateRetention_onProcessingTime_rdh
/** * The users of this class are not allowed to use processing time timers. See class javadoc. */ @Override public final void onProcessingTime(InternalTimer<Object, VoidNamespace> timer) throws Exception { if (stateCleaningEnabled) { long v6 = timer.getTimestamp(); Long cleanupTime = latestRegisteredCleanupTimer.value(); if ((cleanupTime != null) && (cleanupTime == v6)) { cleanupState(cleanupTime); latestRegisteredCleanupTimer.clear(); } } }
3.26
flink_BaseTwoInputStreamOperatorWithStateRetention_registerProcessingCleanupTimer_rdh
/** * If the user has specified a {@code minRetentionTime} and {@code maxRetentionTime}, this * method registers a cleanup timer for {@code currentProcessingTime + minRetentionTime}. * * <p>When this timer fires, the {@link #cleanupState(long)} method is called. */ protected void registerProcessingCleanupTimer() throws IOException { if (stateCleaningEnabled) { long currentProcessingTime = timerService.currentProcessingTime(); Optional<Long> currentCleanupTime = Optional.ofNullable(latestRegisteredCleanupTimer.value()); if ((!currentCleanupTime.isPresent()) || ((currentProcessingTime + minRetentionTime) > currentCleanupTime.get())) { updateCleanupTimer(currentProcessingTime, currentCleanupTime); } } }
3.26
flink_HadoopConfigLoader_mirrorCertainHadoopConfig_rdh
// mirror certain keys to make use more uniform across implementations // with different keys private Configuration mirrorCertainHadoopConfig(Configuration hadoopConfig) { for (String[] mirrored : mirroredConfigKeys) { String value = hadoopConfig.get(mirrored[0], null); if (value != null) { hadoopConfig.set(mirrored[1], value); } } return hadoopConfig; }
3.26
flink_HadoopConfigLoader_loadHadoopConfigFromFlink_rdh
// add additional config entries from the Flink config to the Hadoop config private Configuration loadHadoopConfigFromFlink() { Configuration hadoopConfig = new Configuration(); for (String key : flinkConfig.keySet()) { for (String prefix : flinkConfigPrefixes) { if (key.startsWith(prefix)) { String newKey = hadoopConfigPrefix + key.substring(prefix.length()); String newValue = m0(key, flinkConfig.getString(key, null)); hadoopConfig.set(newKey, newValue); LOG.debug("Adding Flink config entry for {} as {} to Hadoop config", key, newKey);} }} return hadoopConfig; }
3.26
flink_HadoopConfigLoader_getOrLoadHadoopConfig_rdh
/** * get the loaded Hadoop config (or fall back to one loaded from the classpath). */ public Configuration getOrLoadHadoopConfig() { Configuration hadoopConfig = this.hadoopConfig; if (hadoopConfig == null) { if (flinkConfig != null) { hadoopConfig = mirrorCertainHadoopConfig(loadHadoopConfigFromFlink()); } else { LOG.warn("Flink configuration is not set prior to loading this configuration." + " Cannot forward configuration keys from Flink configuration."); hadoopConfig = new Configuration(); } } this.hadoopConfig = hadoopConfig; return hadoopConfig; }
3.26
flink_AbstractIterativeTask_createWorksetUpdateOutputCollector_rdh
// ----------------------------------------------------------------------------------------------------------------- // Iteration State Update Handling // ----------------------------------------------------------------------------------------------------------------- /** * Creates a new {@link WorksetUpdateOutputCollector}. * * <p>This collector is used by {@link IterationIntermediateTask} or {@link IterationTailTask} * to update the workset. * * <p>If a non-null delegate is given, the new {@link Collector} will write to the solution set * and also call collect(T) of the delegate. * * @param delegate * null -OR- the delegate on which to call collect() by the newly created * collector * @return a new {@link WorksetUpdateOutputCollector} */ protected Collector<OT> createWorksetUpdateOutputCollector(Collector<OT> delegate) { DataOutputView outputView = worksetBackChannel.getWriteEnd();TypeSerializer<OT> serializer = getOutputSerializer(); return new WorksetUpdateOutputCollector<OT>(outputView, serializer, delegate); }
3.26
flink_AbstractIterativeTask_initialize_rdh
// -------------------------------------------------------------------------------------------- // Main life cycle methods that implement the iterative behavior // -------------------------------------------------------------------------------------------- @Override protected void initialize() throws Exception { super.initialize(); // check if the driver is resettable if (this.driver instanceof ResettableDriver) { final ResettableDriver<?, ?> resDriver = ((ResettableDriver<?, ?>) (this.driver)); // make sure that the according inputs are not reset for (int i = 0; i < resDriver.getNumberOfInputs(); i++) { if (resDriver.isInputResettable(i)) { excludeFromReset(i); } } } TaskConfig config = getLastTasksConfig(); isWorksetIteration = config.getIsWorksetIteration(); isWorksetUpdate = config.getIsWorksetUpdate(); isSolutionSetUpdate = config.getIsSolutionSetUpdate(); if (isWorksetUpdate) { worksetBackChannel = BlockingBackChannelBroker.instance().getAndRemove(brokerKey());if (isWorksetIteration) { f1 = getIterationAggregators().getAggregator(WorksetEmptyConvergenceCriterion.AGGREGATOR_NAME); if (f1 == null) { throw new RuntimeException("Missing workset elements count aggregator."); } } } }
3.26
flink_AbstractIterativeTask_getOutputSerializer_rdh
/** * * @return output serializer of this task */ private TypeSerializer<OT> getOutputSerializer() { TypeSerializerFactory<OT> serializerFactory; if ((serializerFactory = getLastTasksConfig().getOutputSerializer(getUserCodeClassLoader())) == null) { throw new RuntimeException("Missing output serializer for workset update."); } return serializerFactory.getSerializer(); }
3.26
flink_AbstractIterativeTask_inFirstIteration_rdh
// -------------------------------------------------------------------------------------------- // Utility Methods for Iteration Handling // -------------------------------------------------------------------------------------------- protected boolean inFirstIteration() { return this.superstepNum == 1; }
3.26
flink_AbstractIterativeTask_createSolutionSetUpdateOutputCollector_rdh
/** * Creates a new solution set update output collector. * * <p>This collector is used by {@link IterationIntermediateTask} or {@link IterationTailTask} * to update the solution set of workset iterations. Depending on the task configuration, either * a fast (non-probing) {@link org.apache.flink.runtime.iterative.io.SolutionSetFastUpdateOutputCollector} or normal * (re-probing) {@link SolutionSetUpdateOutputCollector} is created. * * <p>If a non-null delegate is given, the new {@link Collector} will write back to the solution * set and also call collect(T) of the delegate. * * @param delegate * null -OR- a delegate collector to be called by the newly created collector * @return a new {@link org.apache.flink.runtime.iterative.io.SolutionSetFastUpdateOutputCollector} or {@link SolutionSetUpdateOutputCollector} */ protected Collector<OT> createSolutionSetUpdateOutputCollector(Collector<OT> delegate) { Broker<Object> solutionSetBroker = SolutionSetBroker.instance(); Object ss = solutionSetBroker.get(brokerKey()); if (ss instanceof CompactingHashTable) { @SuppressWarnings("unchecked") CompactingHashTable<OT> solutionSet = ((CompactingHashTable<OT>) (ss)); return new SolutionSetUpdateOutputCollector<OT>(solutionSet, delegate); } else if (ss instanceof JoinHashMap) { @SuppressWarnings("unchecked") JoinHashMap<OT> map = ((JoinHashMap<OT>) (ss)); return new SolutionSetObjectsUpdateOutputCollector<OT>(map, delegate); } else {throw new RuntimeException("Unrecognized solution set handle: " + ss); }}
3.26
flink_RowPartitionComputer_restorePartValueFromType_rdh
/** * Restore partition value from string and type. This method is the opposite of method {@link #generatePartValues}. * * @param valStr * string partition value. * @param type * type of partition field. * @return partition value. */ public static Object restorePartValueFromType(String valStr, LogicalType type) { if (valStr == null) { return null; } LogicalTypeRoot typeRoot = type.getTypeRoot(); switch (typeRoot) { case CHAR : case VARCHAR : return valStr; case BOOLEAN : return Boolean.parseBoolean(valStr); case TINYINT : return Integer.valueOf(valStr).byteValue(); case SMALLINT : return Short.valueOf(valStr); case INTEGER : return Integer.valueOf(valStr); case BIGINT : return Long.valueOf(valStr); case FLOAT : return Float.valueOf(valStr); case DOUBLE : return Double.valueOf(valStr); case DATE : return LocalDate.parse(valStr); case TIMESTAMP_WITHOUT_TIME_ZONE : return LocalDateTime.parse(valStr); case DECIMAL : return new BigDecimal(valStr); default : throw new RuntimeException(String.format("Can not convert %s to type %s for partition value", valStr, type)); }}
3.26
flink_MapViewSerializer_transformLegacySerializerSnapshot_rdh
/** * We need to override this as a {@link LegacySerializerSnapshotTransformer} because in Flink * 1.6.x and below, this serializer was incorrectly returning directly the snapshot of the * nested map serializer as its own snapshot. * * <p>This method transforms the incorrect map serializer snapshot to be a proper {@link MapViewSerializerSnapshot}. */ @Override public <U> TypeSerializerSnapshot<MapView<K, V>> transformLegacySerializerSnapshot(TypeSerializerSnapshot<U> legacySnapshot) { if (legacySnapshot instanceof MapViewSerializerSnapshot) { return ((TypeSerializerSnapshot<MapView<K, V>>) (legacySnapshot)); } else { throw new UnsupportedOperationException(legacySnapshot.getClass().getCanonicalName() + " is not supported."); } }
3.26
flink_FullCachingLookupProvider_of_rdh
/** * Build a {@link FullCachingLookupProvider} from the specified {@link ScanTableSource.ScanRuntimeProvider} and {@link CacheReloadTrigger}. */ static FullCachingLookupProvider of(ScanTableSource.ScanRuntimeProvider scanRuntimeProvider, CacheReloadTrigger cacheReloadTrigger) { return new FullCachingLookupProvider() { @Override public ScanRuntimeProvider getScanRuntimeProvider() { return scanRuntimeProvider; } @Override public CacheReloadTrigger getCacheReloadTrigger() { return cacheReloadTrigger; } @Override public LookupFunction createLookupFunction() { return null; } }; }
3.26
flink_MapMapConverter_toBinaryMapData_rdh
// -------------------------------------------------------------------------------------------- // Runtime helper methods // -------------------------------------------------------------------------------------------- private MapData toBinaryMapData(Map<K, V> external) { final int length = external.size(); keyConverter.allocateWriter(length); valueConverter.allocateWriter(length); int v8 = 0; for (Map.Entry<K, V> entry : external.entrySet()) { keyConverter.writeElement(v8, entry.getKey()); valueConverter.writeElement(v8, entry.getValue()); v8++; } return BinaryMapData.valueOf(keyConverter.completeWriter(), valueConverter.completeWriter()); }
3.26
flink_MapMapConverter_createForMapType_rdh
// -------------------------------------------------------------------------------------------- // Factory method // -------------------------------------------------------------------------------------------- public static MapMapConverter<?, ?> createForMapType(DataType dataType) { final DataType keyDataType = dataType.getChildren().get(0); final DataType valueDataType = dataType.getChildren().get(1); return new MapMapConverter<>(ArrayObjectArrayConverter.createForElement(keyDataType), ArrayObjectArrayConverter.createForElement(valueDataType)); }
3.26
flink_BooleanWriter_forRow_rdh
/** * {@link ArrowFieldWriter} for Boolean. */ @Internal
3.26
flink_RemoteInputChannel_setup_rdh
/** * Setup includes assigning exclusive buffers to this input channel, and this method should be * called only once after this input channel is created. */ @Override void setup() throws IOException { checkState(bufferManager.unsynchronizedGetAvailableExclusiveBuffers() == 0, "Bug in input channel setup logic: exclusive buffers have already been set for this input channel."); bufferManager.requestExclusiveBuffers(initialCredit); }
3.26
flink_RemoteInputChannel_notifyCreditAvailable_rdh
// ------------------------------------------------------------------------ // Credit-based // ------------------------------------------------------------------------ /** * Enqueue this input channel in the pipeline for notifying the producer of unannounced credit. */ private void notifyCreditAvailable() throws IOException { checkPartitionRequestQueueInitialized(); partitionRequestClient.notifyCreditAvailable(this); }
3.26
flink_RemoteInputChannel_sendTaskEvent_rdh
// ------------------------------------------------------------------------ // Task events // ------------------------------------------------------------------------ @Override void sendTaskEvent(TaskEvent event) throws IOException { checkState(!f1.get(), "Tried to send task event to producer after channel has been released."); checkPartitionRequestQueueInitialized(); partitionRequestClient.sendTaskEvent(partitionId, event, this); }
3.26
flink_RemoteInputChannel_increaseBackoff_rdh
/** * The remote task manager creates partition request listener and returns {@link PartitionNotFoundException} until the listener is timeout, so the backoff should add the * timeout milliseconds if it exists. * * @return <code>true</code>, iff the operation was successful. Otherwise, <code>false</code>. */ @Override protected boolean increaseBackoff() { if (partitionRequestListenerTimeout > 0) { currentBackoff += partitionRequestListenerTimeout; return currentBackoff < (2 * maxBackoff); } // Backoff is disabled return false; }
3.26
flink_RemoteInputChannel_checkpointStarted_rdh
/** * Spills all queued buffers on checkpoint start. If barrier has already been received (and * reordered), spill only the overtaken buffers. */ public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException { synchronized(receivedBuffers) { if (barrier.getId() < lastBarrierId) { throw new CheckpointException(String.format("Sequence number for checkpoint %d is not known (it was likely been overwritten by a newer checkpoint %d)", barrier.getId(), lastBarrierId), CheckpointFailureReason.CHECKPOINT_SUBSUMED);// currently, at most one active unaligned // checkpoint is possible } else if (barrier.getId() > lastBarrierId) {// This channel has received some obsolete barrier, older compared to the // checkpointId // which we are processing right now, and we should ignore that obsoleted checkpoint // barrier sequence number. resetLastBarrier(); } channelStatePersister.startPersisting(barrier.getId(), getInflightBuffersUnsafe(barrier.getId())); } }
3.26
flink_RemoteInputChannel_retriggerSubpartitionRequest_rdh
/** * Retriggers a remote subpartition request. */void retriggerSubpartitionRequest() throws IOException { checkPartitionRequestQueueInitialized();if (increaseBackoff()) { partitionRequestClient.requestSubpartition(partitionId, consumedSubpartitionIndex, this, 0); } else { failPartitionRequest(); } }
3.26
flink_RemoteInputChannel_getUnannouncedCredit_rdh
// ------------------------------------------------------------------------ // Network I/O notifications (called by network I/O thread) // ------------------------------------------------------------------------ /** * Gets the currently unannounced credit. * * @return Credit which was not announced to the sender yet. */ public int getUnannouncedCredit() { return unannouncedCredit.get(); }
3.26
flink_RemoteInputChannel_m0_rdh
/** * The unannounced credit is increased by the given amount and might notify increased credit to * the producer. */ @Override public void m0(int numAvailableBuffers) throws IOException { if ((numAvailableBuffers > 0) && (unannouncedCredit.getAndAdd(numAvailableBuffers) == 0)) { notifyCreditAvailable(); } }
3.26
flink_RemoteInputChannel_onSenderBacklog_rdh
/** * Receives the backlog from the producer's buffer response. If the number of available buffers * is less than backlog + initialCredit, it will request floating buffers from the buffer * manager, and then notify unannounced credits to the producer. * * @param backlog * The number of unsent buffers in the producer's sub partition. */ public void onSenderBacklog(int backlog) throws IOException { m0(bufferManager.requestFloatingBuffers(backlog + initialCredit)); }
3.26
flink_RemoteInputChannel_requestBuffer_rdh
/** * Requests buffer from input channel directly for receiving network data. It should always * return an available buffer in credit-based mode unless the channel has been released. * * @return The available buffer. */ @Nullable public Buffer requestBuffer() { return bufferManager.requestBuffer(); }
3.26
flink_RemoteInputChannel_requestSubpartition_rdh
// ------------------------------------------------------------------------ // Consume // ------------------------------------------------------------------------ /** * Requests a remote subpartition. */@VisibleForTesting @Override public void requestSubpartition() throws IOException, InterruptedException { if (partitionRequestClient == null) { LOG.debug("{}: Requesting REMOTE subpartition {} of partition {}. {}", this, consumedSubpartitionIndex, partitionId, channelStatePersister); // Create a client and request the partition try { partitionRequestClient = connectionManager.createPartitionRequestClient(connectionId); } catch (IOException e) { // IOExceptions indicate that we could not open a connection to the remote // TaskExecutor throw new PartitionConnectionException(partitionId, e); } partitionRequestClient.requestSubpartition(partitionId, consumedSubpartitionIndex, this, 0); } }
3.26
flink_RemoteInputChannel_isReleased_rdh
// ------------------------------------------------------------------------ // Life cycle // ------------------------------------------------------------------------ @Override public boolean isReleased() { return f1.get(); }
3.26
flink_RemoteInputChannel_releaseAllResources_rdh
/** * Releases all exclusive and floating buffers, closes the partition request client. */ @Override void releaseAllResources() throws IOException { if (f1.compareAndSet(false, true)) { final ArrayDeque<Buffer> releasedBuffers; synchronized(receivedBuffers) { releasedBuffers = receivedBuffers.stream().map(sb -> sb.buffer).collect(Collectors.toCollection(ArrayDeque::new)); receivedBuffers.clear(); } bufferManager.releaseAllBuffers(releasedBuffers); // The released flag has to be set before closing the connection to ensure that // buffers received concurrently with closing are properly recycled. if (partitionRequestClient != null) { partitionRequestClient.close(this); } else { connectionManager.closeOpenChannelConnections(connectionId); } } }
3.26
flink_RemoteInputChannel_getInflightBuffersUnsafe_rdh
/** * Returns a list of buffers, checking the first n non-priority buffers, and skipping all * events. */ private List<Buffer> getInflightBuffersUnsafe(long checkpointId) { assert Thread.holdsLock(receivedBuffers);checkState((checkpointId == lastBarrierId) || (lastBarrierId == f0)); final List<Buffer> inflightBuffers = new ArrayList<>(); Iterator<SequenceBuffer> iterator = receivedBuffers.iterator(); // skip all priority events (only buffers are stored anyways) Iterators.advance(iterator, receivedBuffers.getNumPriorityElements()); while (iterator.hasNext()) { SequenceBuffer sequenceBuffer = iterator.next(); if (sequenceBuffer.buffer.isBuffer()) { if (m4(sequenceBuffer.sequenceNumber)) {inflightBuffers.add(sequenceBuffer.buffer.retainBuffer()); } else { break; } } } return inflightBuffers; }
3.26
flink_RemoteInputChannel_onBuffer_rdh
/** * Handles the input buffer. This method is taking over the ownership of the buffer and is fully * responsible for cleaning it up both on the happy path and in case of an error. */public void onBuffer(Buffer buffer, int sequenceNumber, int backlog) throws IOException { boolean recycleBuffer = true; try { if (expectedSequenceNumber != sequenceNumber) { onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber)); return; } if (buffer.getDataType().isBlockingUpstream()) { m3();checkArgument(backlog == 0, "Illegal number of backlog: %s, should be 0.", backlog); } final boolean wasEmpty; boolean firstPriorityEvent = false; synchronized(receivedBuffers) { NetworkActionsLogger.traceInput("RemoteInputChannel#onBuffer", buffer, inputGate.getOwningTaskName(), channelInfo, channelStatePersister, sequenceNumber); // Similar to notifyBufferAvailable(), make sure that we never add a buffer // after releaseAllResources() released all buffers from receivedBuffers // (see above for details). if (f1.get()) { return; } wasEmpty = receivedBuffers.isEmpty();SequenceBuffer sequenceBuffer = new SequenceBuffer(buffer, sequenceNumber); DataType dataType = buffer.getDataType(); if (dataType.hasPriority()) { firstPriorityEvent = addPriorityBuffer(sequenceBuffer); recycleBuffer = false; } else { receivedBuffers.add(sequenceBuffer); recycleBuffer = false; if (dataType.requiresAnnouncement()) { firstPriorityEvent = addPriorityBuffer(announce(sequenceBuffer)); } } totalQueueSizeInBytes += buffer.getSize(); final OptionalLong barrierId = channelStatePersister.checkForBarrier(sequenceBuffer.buffer); if (barrierId.isPresent() && (barrierId.getAsLong() > lastBarrierId)) { // checkpoint was not yet started by task thread, // so remember the numbers of buffers to spill for the time when // it will be started lastBarrierId = barrierId.getAsLong(); lastBarrierSequenceNumber = sequenceBuffer.sequenceNumber; }channelStatePersister.maybePersist(buffer); ++expectedSequenceNumber; } if (firstPriorityEvent) { notifyPriorityEvent(sequenceNumber); } if (wasEmpty) { notifyChannelNonEmpty(); } if (backlog >= 0) {onSenderBacklog(backlog); } } finally { if (recycleBuffer) { buffer.recycleBuffer(); } } }
3.26
flink_RemoteInputChannel_addPriorityBuffer_rdh
/** * * @return {@code true} if this was first priority buffer added. */ private boolean addPriorityBuffer(SequenceBuffer sequenceBuffer) { receivedBuffers.addPriorityElement(sequenceBuffer); return receivedBuffers.getNumPriorityElements() == 1;}
3.26
flink_RemoteInputChannel_getNumberOfQueuedBuffers_rdh
/** * Gets the current number of received buffers which have not been processed yet. * * @return Buffers queued for processing. */ public int getNumberOfQueuedBuffers() { synchronized(receivedBuffers) { return receivedBuffers.size(); } }
3.26
flink_RemoteInputChannel_getAndResetUnannouncedCredit_rdh
/** * Gets the unannounced credit and resets it to <tt>0</tt> atomically. * * @return Credit which was not announced to the sender yet. */public int getAndResetUnannouncedCredit() { return unannouncedCredit.getAndSet(0); }
3.26
flink_StateTtlConfig_cleanupInRocksdbCompactFilter_rdh
/** * Cleanup expired state while Rocksdb compaction is running. * * <p>RocksDB compaction filter will query current timestamp, used to check expiration, from * Flink every time after processing {@code queryTimeAfterNumEntries} number of state * entries. Updating the timestamp more often can improve cleanup speed but it decreases * compaction performance because it uses JNI call from native code. * * <p>Periodic compaction could speed up expired state entries cleanup, especially for state * entries rarely accessed. Files older than this value will be picked up for compaction, * and re-written to the same level as they were before. It makes sure a file goes through * compaction filters periodically. * * @param queryTimeAfterNumEntries * number of state entries to process by compaction filter * before updating current timestamp * @param periodicCompactionTime * periodic compaction which could speed up expired state * cleanup. 0 means turning off periodic compaction. */ @Nonnull public Builder cleanupInRocksdbCompactFilter(long queryTimeAfterNumEntries, Time periodicCompactionTime) { strategies.put(CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER, new RocksdbCompactFilterCleanupStrategy(queryTimeAfterNumEntries, periodicCompactionTime)); return this; }
3.26
flink_StateTtlConfig_cleanupFullSnapshot_rdh
/** * Cleanup expired state in full snapshot on checkpoint. */ @Nonnull public Builder cleanupFullSnapshot() { strategies.put(CleanupStrategies.Strategies.FULL_STATE_SCAN_SNAPSHOT, EMPTY_STRATEGY); return this; } /** * Cleanup expired state incrementally cleanup local state. * * <p>Upon every state access this cleanup strategy checks a bunch of state keys for * expiration and cleans up expired ones. It keeps a lazy iterator through all keys with * relaxed consistency if backend supports it. This way all keys should be regularly checked * and cleaned eventually over time if any state is constantly being accessed. * * <p>Additionally to the incremental cleanup upon state access, it can also run per every * record. Caution: if there are a lot of registered states using this option, they all will * be iterated for every record to check if there is something to cleanup. * * <p>Note: if no access happens to this state or no records are processed in case of {@code runCleanupForEveryRecord}
3.26
flink_StateTtlConfig_setUpdateType_rdh
/** * Sets the ttl update type. * * @param updateType * The ttl update type configures when to update last access timestamp * which prolongs state TTL. */ @Nonnull public Builder setUpdateType(UpdateType updateType) { this.updateType = updateType; return this; }
3.26