name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_SavepointWriter_changeOperatorIdentifier_rdh
/** * Changes the identifier of an operator. * * <p>This method is comparatively cheap since it only modifies savepoint metadata without * reading the entire savepoint data. * * <p>Use-cases include, but are not limited to: * * <ul> * <li>assigning a UID to an operator that did not have a UID assigned before * <li>changing the UID of an operator * <li>swapping the states of 2 operators * </ul> * * <p>Identifier changes are applied after all other operations; in the following example the * savepoint will only contain UID_2. * * <pre> * SavepointWriter savepoint = ... * savepoint.withOperator(UID_1, ...) * savepoint.changeOperatorIdentifier(UID_1, UID_2) * savepoint.write(...) * </pre> * * <p>You cannot define a chain of changes; in the following example the savepoint will only * contain UID_2. * * <pre> * SavepointWriter savepoint = ... * savepoint.withOperator(UID_1, ...) * savepoint.changeOperatorIdentifier(UID_1, UID_2) * savepoint.changeOperatorIdentifier(UID_2, UID_3) * savepoint.write(...) * </pre> * * @param from * operator whose identifier should be changed * @param to * desired identifier * @return The modified savepoint. */ public SavepointWriter changeOperatorIdentifier(OperatorIdentifier from, OperatorIdentifier to) { this.uidTransformationMap.put(from, to); return this; }
3.26
flink_SavepointWriter_withConfiguration_rdh
/** * Sets a configuration that will be applied to the stream operators used to bootstrap a new * savepoint. * * @param option * metadata information * @param value * value to be stored * @param <T> * type of the value to be stored * @return The modified savepoint. */ public <T> SavepointWriter withConfiguration(ConfigOption<T> option, T value) { configuration.set(option, value); return this; }
3.26
flink_SavepointWriter_withOperator_rdh
/** * Adds a new operator to the savepoint. * * @param identifier * The identifier of the operator. * @param transformation * The operator to be included. * @return The modified savepoint. */ public <T> SavepointWriter withOperator(OperatorIdentifier identifier, StateBootstrapTransformation<T> transformation) { metadata.addOperator(identifier, transformation);return this; }
3.26
flink_PartitionedFileReader_getPriority_rdh
/** * Gets read priority of this file reader. Smaller value indicates higher priority. */ long getPriority() { return nextOffsetToRead; }
3.26
flink_PartitionedFileReader_readCurrentRegion_rdh
/** * Reads a buffer from the current region of the target {@link PartitionedFile} and moves the * read position forward. * * <p>Note: The caller is responsible for recycling the target buffer if any exception occurs. * * @param freeSegments * The free {@link MemorySegment}s to read data to. * @param recycler * The {@link BufferRecycler} which is responsible to recycle the target buffer. * @param consumer * The target {@link Buffer} stores the data read from file channel. * @return Whether the file reader has remaining data to read. */boolean readCurrentRegion(Queue<MemorySegment> freeSegments, BufferRecycler recycler, Consumer<Buffer> consumer) throws IOException { if (currentRegionRemainingBytes == 0) { return false; } checkArgument(!freeSegments.isEmpty(), "No buffer available for data reading."); dataFileChannel.position(nextOffsetToRead); BufferAndHeader partialBuffer = new BufferAndHeader(null, null);try { while ((!freeSegments.isEmpty()) && (currentRegionRemainingBytes > 0)) { MemorySegment segment = freeSegments.poll(); int numBytes = ((int) (Math.min(segment.size(), currentRegionRemainingBytes))); ByteBuffer byteBuffer = segment.wrap(0, numBytes); try { BufferReaderWriterUtil.readByteBufferFully(dataFileChannel, byteBuffer); byteBuffer.flip(); currentRegionRemainingBytes -= byteBuffer.remaining(); nextOffsetToRead += byteBuffer.remaining(); } catch (Throwable throwable) { freeSegments.add(segment); throw throwable; } NetworkBuffer buffer = new NetworkBuffer(segment, recycler); buffer.setSize(byteBuffer.remaining()); try { partialBuffer = processBuffer(byteBuffer, buffer, partialBuffer, consumer); } catch (Throwable throwable) { partialBuffer = new BufferAndHeader(null, null); throw throwable; } finally { buffer.recycleBuffer(); } } } finally { if (headerBuf.position() > 0) { nextOffsetToRead -= headerBuf.position(); currentRegionRemainingBytes += headerBuf.position(); headerBuf.clear(); } if (partialBuffer.header != null) { nextOffsetToRead -= HEADER_LENGTH; currentRegionRemainingBytes += HEADER_LENGTH; } if (partialBuffer.buffer != null) { nextOffsetToRead -= partialBuffer.buffer.readableBytes(); currentRegionRemainingBytes += partialBuffer.buffer.readableBytes(); partialBuffer.buffer.recycleBuffer();} } return hasRemaining(); }
3.26
flink_NetworkBufferPool_requestUnpooledMemorySegments_rdh
/** * Unpooled memory segments are requested directly from {@link NetworkBufferPool}, as opposed to * pooled segments, that are requested through {@link BufferPool} that was created from this * {@link NetworkBufferPool} (see {@link #createBufferPool}). They are used for example for * exclusive {@link RemoteInputChannel} credits, that are permanently assigned to that channel, * and never returned to any {@link BufferPool}. As opposed to pooled segments, when requested, * unpooled segments needs to be accounted against {@link #numTotalRequiredBuffers}, which might * require redistribution of the segments. */ @Override public List<MemorySegment> requestUnpooledMemorySegments(int numberOfSegmentsToRequest) throws IOException { checkArgument(numberOfSegmentsToRequest >= 0, "Number of buffers to request must be non-negative."); synchronized(factoryLock) { if (isDestroyed) { throw new IllegalStateException("Network buffer pool has already been destroyed."); } if (numberOfSegmentsToRequest == 0) { return Collections.emptyList(); } tryRedistributeBuffers(numberOfSegmentsToRequest); } try { return internalRequestMemorySegments(numberOfSegmentsToRequest); } catch (IOException exception) { revertRequiredBuffers(numberOfSegmentsToRequest); ExceptionUtils.rethrowIOException(exception); return null; } }
3.26
flink_NetworkBufferPool_recyclePooledMemorySegment_rdh
/** * Corresponding to {@link #requestPooledMemorySegmentsBlocking} and {@link #requestPooledMemorySegment}, this method is for pooled memory segments recycling. */ public void recyclePooledMemorySegment(MemorySegment segment) { // Adds the segment back to the queue, which does not immediately free the memory // however, since this happens when references to the global pool are also released, // making the availableMemorySegments queue and its contained object reclaimable internalRecycleMemorySegments(Collections.singleton(checkNotNull(segment))); }
3.26
flink_NetworkBufferPool_createBufferPool_rdh
// ------------------------------------------------------------------------ // BufferPoolFactory // ------------------------------------------------------------------------ @Override public BufferPool createBufferPool(int numRequiredBuffers, int maxUsedBuffers) throws IOException { return internalCreateBufferPool(numRequiredBuffers, maxUsedBuffers, 0, Integer.MAX_VALUE, 0); }
3.26
flink_NetworkBufferPool_destroyAllBufferPools_rdh
/** * Destroys all buffer pools that allocate their buffers from this buffer pool (created via * {@link #createBufferPool(int, int)}). */ public void destroyAllBufferPools() { synchronized(factoryLock) { // create a copy to avoid concurrent modification exceptions LocalBufferPool[] poolsCopy = allBufferPools.toArray(new LocalBufferPool[allBufferPools.size()]); for (LocalBufferPool v26 : poolsCopy) { v26.lazyDestroy();} // some sanity checks if (((allBufferPools.size() > 0) || (numTotalRequiredBuffers > 0)) || (resizableBufferPools.size() > 0)) { throw new IllegalStateException("NetworkBufferPool is not empty after destroying all LocalBufferPools"); } } }
3.26
flink_NetworkBufferPool_getAvailableFuture_rdh
/** * Returns a future that is completed when there are free segments in this pool. */ @Override public CompletableFuture<?> getAvailableFuture() { return availabilityHelper.getAvailableFuture(); }
3.26
flink_NetworkBufferPool_redistributeBuffers_rdh
// Must be called from synchronized block private void redistributeBuffers() { assert Thread.holdsLock(factoryLock); if (resizableBufferPools.isEmpty()) { return; } // All buffers, which are not among the required ones final int numAvailableMemorySegment = totalNumberOfMemorySegments - numTotalRequiredBuffers; if (numAvailableMemorySegment == 0) {// in this case, we need to redistribute buffers so that every pool gets its minimum for (LocalBufferPool bufferPool : resizableBufferPools) { bufferPool.setNumBuffers(bufferPool.getNumberOfRequiredMemorySegments()); } return; } /* With buffer pools being potentially limited, let's distribute the available memory segments based on the capacity of each buffer pool, i.e. the maximum number of segments an unlimited buffer pool can take is numAvailableMemorySegment, for limited buffer pools it may be less. Based on this and the sum of all these values (totalCapacity), we build a ratio that we use to distribute the buffers. */ long v29 = 0;// long to avoid int overflow for (LocalBufferPool bufferPool : resizableBufferPools) { int excessMax = bufferPool.getMaxNumberOfMemorySegments() - bufferPool.getNumberOfRequiredMemorySegments();v29 += Math.min(numAvailableMemorySegment, excessMax); } // no capacity to receive additional buffers? if (v29 == 0) {return;// necessary to avoid div by zero when nothing to re-distribute } // since one of the arguments of 'min(a,b)' is a positive int, this is actually // guaranteed to be within the 'int' domain // (we use a checked downCast to handle possible bugs more gracefully). final int memorySegmentsToDistribute = MathUtils.checkedDownCast(Math.min(numAvailableMemorySegment, v29)); long totalPartsUsed = 0;// of totalCapacity int v34 = 0; for (LocalBufferPool bufferPool : resizableBufferPools) { int excessMax = bufferPool.getMaxNumberOfMemorySegments() - bufferPool.getNumberOfRequiredMemorySegments(); // shortcut if (excessMax == 0) { continue; } totalPartsUsed += Math.min(numAvailableMemorySegment, excessMax); // avoid remaining buffers by looking at the total capacity that should have been // re-distributed up until here // the downcast will always succeed, because both arguments of the subtraction are in // the 'int' domain final int mySize = MathUtils.checkedDownCast(((memorySegmentsToDistribute * totalPartsUsed) / v29) - v34); v34 += mySize; bufferPool.setNumBuffers(bufferPool.getNumberOfRequiredMemorySegments() + mySize); } assert totalPartsUsed == v29; assert v34 == memorySegmentsToDistribute; }
3.26
flink_NetworkBufferPool_tryRedistributeBuffers_rdh
// Must be called from synchronized block private void tryRedistributeBuffers(int numberOfSegmentsToRequest) throws IOException { assert Thread.holdsLock(factoryLock); if ((numTotalRequiredBuffers + numberOfSegmentsToRequest) > totalNumberOfMemorySegments) { throw new IOException(String.format("Insufficient number of network buffers: " + "required %d, but only %d available. %s.", numberOfSegmentsToRequest, totalNumberOfMemorySegments - numTotalRequiredBuffers, getConfigDescription())); } this.numTotalRequiredBuffers += numberOfSegmentsToRequest; try { redistributeBuffers(); } catch (Throwable t) { this.numTotalRequiredBuffers -= numberOfSegmentsToRequest; redistributeBuffers(); ExceptionUtils.rethrow(t); } }
3.26
flink_MapStateDescriptor_getValueSerializer_rdh
/** * Gets the serializer for the values in the state. * * @return The serializer for the values in the state. */ public TypeSerializer<UV> getValueSerializer() { final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer(); if (!(rawSerializer instanceof MapSerializer)) { throw new IllegalStateException("Unexpected serializer type."); } return ((MapSerializer<UK, UV>) (rawSerializer)).getValueSerializer(); }
3.26
flink_MapStateDescriptor_getKeySerializer_rdh
/** * Gets the serializer for the keys in the state. * * @return The serializer for the keys in the state. */ public TypeSerializer<UK> getKeySerializer() {final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer(); if (!(rawSerializer instanceof MapSerializer)) {throw new IllegalStateException("Unexpected serializer type."); } return ((MapSerializer<UK, UV>) (rawSerializer)).getKeySerializer(); }
3.26
flink_RefCountedBufferingFileStream_openNew_rdh
// ------------------------- Factory Methods ------------------------- public static RefCountedBufferingFileStream openNew(final FunctionWithException<File, RefCountedFileWithStream, IOException> tmpFileProvider) throws IOException { return new RefCountedBufferingFileStream(tmpFileProvider.apply(null), BUFFER_SIZE); }
3.26
flink_JobConfUtils_getDefaultPartitionName_rdh
/** * Gets the {@link HiveConf.ConfVars#DEFAULTPARTITIONNAME} value from the {@link JobConf}. */ public static String getDefaultPartitionName(JobConf jobConf) { return jobConf.get(DEFAULTPARTITIONNAME.varname, DEFAULTPARTITIONNAME.defaultStrVal); }
3.26
flink_PythonConfigUtil_alignTransformation_rdh
/** * Configure the {@link AbstractExternalOneInputPythonFunctionOperator} to be chained with the * upstream/downstream operator by setting their parallelism, slot sharing group, co-location * group to be the same, and applying a {@link ForwardPartitioner}. 1. operator with name * "_keyed_stream_values_operator" should align with its downstream operator. 2. operator with * name "_stream_key_by_map_operator" should align with its upstream operator. */ private static void alignTransformation(Transformation<?> transformation) throws NoSuchFieldException, IllegalAccessException { String transformName = transformation.getName(); if (transformation.getInputs().isEmpty()) { return; } Transformation<?> inputTransformation = transformation.getInputs().get(0); String inputTransformName = inputTransformation.getName(); if (inputTransformName.equals(KEYED_STREAM_VALUE_OPERATOR_NAME)) { chainTransformation(inputTransformation, transformation); configForwardPartitioner(inputTransformation, transformation); }if (transformName.equals(STREAM_KEY_BY_MAP_OPERATOR_NAME) || transformName.equals(STREAM_PARTITION_CUSTOM_MAP_OPERATOR_NAME)) { chainTransformation(transformation, inputTransformation); configForwardPartitioner(inputTransformation, transformation); } }
3.26
flink_PythonConfigUtil_processSideOutput_rdh
/** * Process {@link SideOutputTransformation}s, set the {@link OutputTag}s into the Python * corresponding operator to make it aware of the {@link OutputTag}s. */ private static void processSideOutput(List<Transformation<?>> transformations) {final Set<Transformation<?>> visitedTransforms = Sets.newIdentityHashSet();final Queue<Transformation<?>> queue = Queues.newArrayDeque(transformations); while (!queue.isEmpty()) { Transformation<?> transform = queue.poll(); visitedTransforms.add(transform); if (transform instanceof SideOutputTransformation) { final SideOutputTransformation<?> sideTransform = ((SideOutputTransformation<?>) (transform)); final Transformation<?> upTransform = Iterables.getOnlyElement(sideTransform.getInputs()); if (PythonConfigUtil.isPythonDataStreamOperator(upTransform)) { final DataStreamPythonFunctionOperator<?> upOperator = ((DataStreamPythonFunctionOperator<?>) (((SimpleOperatorFactory<?>) (getOperatorFactory(upTransform))).getOperator())); upOperator.addSideOutputTags(Collections.singletonList(sideTransform.getOutputTag())); } } for (Transformation<?> upTransform : transform.getInputs()) { if (!visitedTransforms.contains(upTransform)) { queue.add(upTransform); } } } }
3.26
flink_PythonConfigUtil_extractPythonConfiguration_rdh
/** * Extract the configurations which is used in the Python operators. */ public static Configuration extractPythonConfiguration(List<Tuple2<String, DistributedCache.DistributedCacheEntry>> cachedFiles, ReadableConfig config) { final Configuration pythonDependencyConfig = PythonDependencyUtils.configurePythonDependencies(cachedFiles, config); final PythonConfig pythonConfig = new PythonConfig(config, pythonDependencyConfig); return pythonConfig.toConfiguration(); }
3.26
flink_DataStreamUtils_collectWithClient_rdh
/** * Starts the execution of the program and returns an iterator to read the result of the given * data stream, plus a {@link JobClient} to interact with the application execution. * * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecated public static <OUT> ClientAndIterator<OUT> collectWithClient(DataStream<OUT> stream, String jobExecutionName) throws Exception { return stream.executeAndCollectWithClient(jobExecutionName); }
3.26
flink_DataStreamUtils_collectUnboundedStream_rdh
/** * Triggers execution of the DataStream application and collects the given number of records * from the stream. After the records are received, the execution is canceled. * * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecated public static <E> List<E> collectUnboundedStream(DataStream<E> stream, int numElements, String jobName) throws Exception { final ClientAndIterator<E> clientAndIterator = collectWithClient(stream, jobName); final List<E> result = collectRecordsFromUnboundedStream(clientAndIterator, numElements); // cancel the job now that we have received enough elements clientAndIterator.client.cancel().get(); return result; }
3.26
flink_DataStreamUtils_collectRecordsFromUnboundedStream_rdh
/** * * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecatedpublic static <E> List<E> collectRecordsFromUnboundedStream(final ClientAndIterator<E> client, final int numElements) { checkNotNull(client, "client"); checkArgument(numElements > 0, "numElement must be > 0"); final ArrayList<E> result = new ArrayList<>(numElements); final Iterator<E> iterator = client.iterator; while (iterator.hasNext()) { result.add(iterator.next()); if (result.size() == numElements) { return result; } } throw new IllegalArgumentException(String.format("The stream ended before reaching the requested %d records. Only %d records were received.", numElements, result.size())); }
3.26
flink_DataStreamUtils_m0_rdh
// ------------------------------------------------------------------------ // Deriving a KeyedStream from a stream already partitioned by key // without a shuffle // ------------------------------------------------------------------------ /** * Reinterprets the given {@link DataStream} as a {@link KeyedStream}, which extracts keys with * the given {@link KeySelector}. * * <p>IMPORTANT: For every partition of the base stream, the keys of events in the base stream * must be partitioned exactly in the same way as if it was created through a {@link DataStream#keyBy(KeySelector)}. * * @param stream * The data stream to reinterpret. For every partition, this stream must be * partitioned exactly in the same way as if it was created through a {@link DataStream#keyBy(KeySelector)}. * @param keySelector * Function that defines how keys are extracted from the data stream. * @param <T> * Type of events in the data stream. * @param <K> * Type of the extracted keys. * @return The reinterpretation of the {@link DataStream} as a {@link KeyedStream}. */ public static <T, K> KeyedStream<T, K> m0(DataStream<T> stream, KeySelector<T, K> keySelector) { return reinterpretAsKeyedStream(stream, keySelector, TypeExtractor.getKeySelectorTypes(keySelector, stream.getType())); }
3.26
flink_DataStreamUtils_collectBoundedStream_rdh
/** * Collects contents the given DataStream into a list, assuming that the stream is a bounded * stream. * * <p>This method blocks until the job execution is complete. By the time the method returns, * the job will have reached its FINISHED status. * * <p>Note that if the stream is unbounded, this method will never return and might fail with an * Out-of-Memory Error because it attempts to collect an infinite stream into a list. * * @throws Exception * Exceptions that occur during the execution are forwarded. * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecated public static <E> List<E> collectBoundedStream(DataStream<E> stream, String jobName) throws Exception { final ArrayList<E> v0 = new ArrayList<>(); final Iterator<E> iter = collectWithClient(stream, jobName).iterator; while (iter.hasNext()) { v0.add(iter.next()); } v0.trimToSize(); return v0; }
3.26
flink_DataStreamUtils_reinterpretAsKeyedStream_rdh
/** * Reinterprets the given {@link DataStream} as a {@link KeyedStream}, which extracts keys with * the given {@link KeySelector}. * * <p>IMPORTANT: For every partition of the base stream, the keys of events in the base stream * must be partitioned exactly in the same way as if it was created through a {@link DataStream#keyBy(KeySelector)}. * * @param stream * The data stream to reinterpret. For every partition, this stream must be * partitioned exactly in the same way as if it was created through a {@link DataStream#keyBy(KeySelector)}. * @param keySelector * Function that defines how keys are extracted from the data stream. * @param typeInfo * Explicit type information about the key type. * @param <T> * Type of events in the data stream. * @param <K> * Type of the extracted keys. * @return The reinterpretation of the {@link DataStream} as a {@link KeyedStream}. */ public static <T, K> KeyedStream<T, K> reinterpretAsKeyedStream(DataStream<T> stream, KeySelector<T, K> keySelector, TypeInformation<K> typeInfo) { PartitionTransformation<T> partitionTransformation = new PartitionTransformation<>(stream.getTransformation(), new ForwardPartitioner<>()); return new KeyedStream<>(stream, partitionTransformation, keySelector, typeInfo); }
3.26
flink_DataStreamUtils_collect_rdh
/** * Triggers the distributed execution of the streaming dataflow and returns an iterator over the * elements of the given DataStream. * * <p>The DataStream application is executed in the regular distributed manner on the target * environment, and the events from the stream are polled back to this application process and * thread through Flink's REST API. * * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecated public static <OUT> Iterator<OUT> collect(DataStream<OUT> stream, String executionJobName) { try { return stream.executeAndCollect(executionJobName); } catch (Exception e) { // this "wrap as unchecked" step is here only to preserve the exception signature // backwards compatible. throw new RuntimeException("Failed to execute data stream", e); } }
3.26
flink_FsCheckpointMetadataOutputStream_write_rdh
// I/O // ------------------------------------------------------------------------ @Override public final void write(int b) throws IOException { outputStreamWrapper.getOutput().write(b); }
3.26
flink_FsCheckpointMetadataOutputStream_isClosed_rdh
// ------------------------------------------------------------------------ // Closing // ------------------------------------------------------------------------ public boolean isClosed() { return closed; }
3.26
flink_SuperstepBarrier_onEvent_rdh
/** * Barrier will release the waiting thread if an event occurs. */ @Override public void onEvent(TaskEvent event) { if (event instanceof TerminationEvent) { terminationSignaled = true; } else if (event instanceof AllWorkersDoneEvent) { AllWorkersDoneEvent wde = ((AllWorkersDoneEvent) (event)); aggregatorNames = wde.getAggregatorNames(); aggregates = wde.getAggregates(userCodeClassLoader); } else { throw new IllegalArgumentException("Unknown event type."); } latch.countDown(); }
3.26
flink_SuperstepBarrier_setup_rdh
/** * Setup the barrier, has to be called at the beginning of each superstep. */ public void setup() { latch = new CountDownLatch(1); }
3.26
flink_SuperstepBarrier_waitForOtherWorkers_rdh
/** * Wait on the barrier. */ public void waitForOtherWorkers() throws InterruptedException { latch.await(); }
3.26
flink_HiveParserRexNodeConverter_convertIN_rdh
// converts IN for constant value list, RexSubQuery won't get here private RexNode convertIN(ExprNodeGenericFuncDesc func) throws SemanticException { List<RexNode> childRexNodes = new ArrayList<>(); for (ExprNodeDesc childExpr : func.getChildren()) {childRexNodes.add(convert(childExpr)); } if (funcConverter.hasOverloadedOp(HiveParserIN.INSTANCE, SqlFunctionCategory.USER_DEFINED_FUNCTION)) { return cluster.getRexBuilder().makeCall(HiveParserIN.INSTANCE, childRexNodes); } else { // hive module is not loaded, calcite converts IN using either OR or inline table // (LogicalValues), we do the same here but only support OR for now RexNode leftKey = childRexNodes.get(0); Preconditions.checkState(leftKey instanceof RexInputRef, "Expecting LHS key of IN to be a RexInputRef, actually got " + leftKey); final List<RexNode> comparisons = new ArrayList<>(); for (int i = 1; i < childRexNodes.size(); i++) { comparisons.add(cluster.getRexBuilder().makeCall(SqlStdOperatorTable.EQUALS, leftKey, childRexNodes.get(i))); } return RexUtil.composeDisjunction(cluster.getRexBuilder(), comparisons, true);} }
3.26
flink_PostVersionedIOReadableWritable_read_rdh
/** * We do not support reading from a {@link DataInputView}, because it does not support pushing * back already read bytes. */ @Override public final void read(DataInputView in) throws IOException { throw new UnsupportedOperationException("PostVersionedIOReadableWritable cannot read from a DataInputView."); }
3.26
flink_SimpleSplitAssigner_getNext_rdh
// ------------------------------------------------------------------------ @Override public Optional<FileSourceSplit> getNext(String hostname) { final int size = splits.size(); return size == 0 ? Optional.empty() : Optional.of(splits.remove(size - 1)); }
3.26
flink_SimpleSplitAssigner_toString_rdh
// ------------------------------------------------------------------------ @Override public String toString() { return "SimpleSplitAssigner " + splits; }
3.26
flink_SortMergeFullOuterJoinIterator_bufferRows2_rdh
/** * Buffer rows from iterator2 with same key. */ private void bufferRows2() throws IOException { BinaryRowData copy = key2.copy(); buffer2.reset(); do { buffer2.add(row2); } while (nextRow2() && (keyComparator.compare(key2, copy) == 0) ); buffer2.complete(); }
3.26
flink_SortMergeFullOuterJoinIterator_bufferRows1_rdh
/** * Buffer rows from iterator1 with same key. */ private void bufferRows1() throws IOException { BinaryRowData copy = key1.copy(); buffer1.reset(); do { buffer1.add(row1); } while (nextRow1() && (keyComparator.compare(key1, copy) == 0) ); buffer1.complete(); }
3.26
flink_KeyContextHandler_hasKeyContext_rdh
/** * Whether the {@link Input} has "KeyContext". If false, we can omit the call of {@link Input#setKeyContextElement} for each record. * * @return True if the {@link Input} has "KeyContext", false otherwise. */ default boolean hasKeyContext() { return hasKeyContext1(); }
3.26
flink_KeyContextHandler_hasKeyContext1_rdh
/** * Whether the first input of {@link StreamOperator} has "KeyContext". If false, we can omit the * call of {@link StreamOperator#setKeyContextElement1} for each record arrived on the first * input. * * @return True if the first input has "KeyContext", false otherwise. */ default boolean hasKeyContext1() { return true; } /** * Whether the second input of {@link StreamOperator} has "KeyContext". If false, we can omit * the call of {@link StreamOperator#setKeyContextElement1}
3.26
flink_EventAnnouncement_write_rdh
// ------------------------------------------------------------------------ // Serialization // ------------------------------------------------------------------------ // // These methods are inherited form the generic serialization of AbstractEvent // but would require the CheckpointBarrier to be mutable. Since all serialization // for events goes through the EventSerializer class, which has special serialization // for the CheckpointBarrier, we don't need these methods // @Override public void write(DataOutputView out) throws IOException { throw new UnsupportedOperationException("This method should never be called"); }
3.26
flink_EventAnnouncement_hashCode_rdh
// ------------------------------------------------------------------------ @Override public int hashCode() { return Objects.hash(announcedEvent, sequenceNumber); }
3.26
flink_FunctionTemplate_createResultTemplate_rdh
/** * Creates an instance of {@link FunctionResultTemplate} from a {@link DataTypeHint}. */ @Nullable static FunctionResultTemplate createResultTemplate(DataTypeFactory typeFactory, @Nullable DataTypeHint hint) { if (hint == null) { return null; } final DataTypeTemplate template; try { template = DataTypeTemplate.fromAnnotation(typeFactory, hint); } catch (Throwable t) { throw extractionError(t, "Error in data type hint annotation."); } if (template.dataType != null) { return FunctionResultTemplate.of(template.dataType); } throw extractionError("Data type hint does not specify a data type for use as function result."); }
3.26
flink_FunctionTemplate_fromAnnotation_rdh
/** * Creates an instance using the given {@link ProcedureHint}. It resolves explicitly defined * data types. */ static FunctionTemplate fromAnnotation(DataTypeFactory typeFactory, ProcedureHint hint) { return new FunctionTemplate(createSignatureTemplate(typeFactory, defaultAsNull(hint, ProcedureHint::input), defaultAsNull(hint, ProcedureHint::argumentNames), hint.isVarArgs()), null, createResultTemplate(typeFactory, defaultAsNull(hint, ProcedureHint::output))); }
3.26
flink_JvmShutdownSafeguard_installAsShutdownHook_rdh
/** * Installs the safeguard shutdown hook. The maximum time that the JVM is allowed to spend on * shutdown before being killed is the given number of milliseconds. * * @param logger * The logger to log errors to. * @param delayMillis * The delay (in milliseconds) to wait after clean shutdown was stared, * before forcibly terminating the JVM. */ public static void installAsShutdownHook(Logger logger, long delayMillis) { checkArgument(delayMillis >= 0, "delay must be >= 0"); // install the blocking shutdown hook Thread shutdownHook = new JvmShutdownSafeguard(delayMillis); ShutdownHookUtil.addShutdownHookThread(shutdownHook, JvmShutdownSafeguard.class.getSimpleName(), logger); }
3.26
flink_ScanReuser_applyPhysicalAndMetadataPushDown_rdh
/** * Generate sourceAbilitySpecs and newProducedType by projected physical fields and metadata * keys. */ private static RowType applyPhysicalAndMetadataPushDown(DynamicTableSource source, RowType originType, List<SourceAbilitySpec> sourceAbilitySpecs, int[][] physicalAndMetaFields, int[][] projectedPhysicalFields, List<String> usedMetadataNames) { RowType newProducedType = originType; boolean supportsProjectPushDown = source instanceof SupportsProjectionPushDown; boolean v34 = source instanceof SupportsReadingMetadata; if (supportsProjectPushDown || v34) { newProducedType = ((RowType) (Projection.of(physicalAndMetaFields).project(originType))); } if (supportsProjectPushDown) { sourceAbilitySpecs.add(new ProjectPushDownSpec(projectedPhysicalFields, newProducedType));} if (v34) {sourceAbilitySpecs.add(new ReadingMetadataSpec(usedMetadataNames, newProducedType)); } return newProducedType; }
3.26
flink_ExecNodeUtil_setManagedMemoryWeight_rdh
/** * An Utility class that helps translating {@link ExecNode} to {@link Transformation}. */public class ExecNodeUtil { /** * Sets {Transformation#declareManagedMemoryUseCaseAtOperatorScope(ManagedMemoryUseCase, int)} * using the given bytes for {@link ManagedMemoryUseCase#OPERATOR}. */ public static <T> void setManagedMemoryWeight(Transformation<T> transformation, long memoryBytes) { if (memoryBytes > 0) { final int weightInMebibyte = Math.max(1, ((int) (memoryBytes >> 20))); final Optional<Integer> previousWeight = transformation.declareManagedMemoryUseCaseAtOperatorScope(ManagedMemoryUseCase.OPERATOR, weightInMebibyte); if (previousWeight.isPresent()) { throw new TableException("Managed memory weight has been set, this should not happen."); } }}
3.26
flink_ExecNodeUtil_createOneInputTransformation_rdh
/** * Create a {@link OneInputTransformation} with memoryBytes. */ public static <I, O> OneInputTransformation<I, O> createOneInputTransformation(Transformation<I> input, TransformationMetadata transformationMeta, StreamOperatorFactory<O> operatorFactory, TypeInformation<O> outputType, int parallelism, long memoryBytes, boolean parallelismConfigured) { OneInputTransformation<I, O> transformation = new OneInputTransformation<>(input, transformationMeta.getName(), operatorFactory, outputType, parallelism, parallelismConfigured); setManagedMemoryWeight(transformation, memoryBytes); transformationMeta.fill(transformation); return transformation; }
3.26
flink_ExecNodeUtil_m0_rdh
/** * Create a {@link TwoInputTransformation} with memoryBytes. */ public static <IN1, IN2, O> TwoInputTransformation<IN1, IN2, O> m0(Transformation<IN1> input1, Transformation<IN2> input2, TransformationMetadata transformationMeta, TwoInputStreamOperator<IN1, IN2, O> operator, TypeInformation<O> outputType, int parallelism, long memoryBytes) { return m0(input1, input2, transformationMeta, SimpleOperatorFactory.of(operator), outputType, parallelism, memoryBytes); }
3.26
flink_ExecNodeUtil_createTwoInputTransformation_rdh
/** * Create a {@link TwoInputTransformation} with memoryBytes. */ public static <I1, I2, O> TwoInputTransformation<I1, I2, O> createTwoInputTransformation(Transformation<I1> input1, Transformation<I2> input2, String name, String desc, StreamOperatorFactory<O> operatorFactory, TypeInformation<O> outputType, int parallelism, long memoryBytes, boolean parallelismConfigured) { return createTwoInputTransformation(input1, input2, new TransformationMetadata(name, desc), operatorFactory, outputType, parallelism, memoryBytes, parallelismConfigured); }
3.26
flink_ExecNodeUtil_makeLegacySourceTransformationsBounded_rdh
/** * The planner might have more information than expressed in legacy source transformations. This * enforces planner information about boundedness to the affected transformations. */ public static void makeLegacySourceTransformationsBounded(Transformation<?> transformation) { if (transformation instanceof LegacySourceTransformation) { ((LegacySourceTransformation<?>) (transformation)).setBoundedness(Boundedness.BOUNDED); }transformation.getInputs().forEach(ExecNodeUtil::makeLegacySourceTransformationsBounded); }
3.26
flink_ExecNodeUtil_getMultipleInputDescription_rdh
/** * Return description for multiple input node. */ public static String getMultipleInputDescription(ExecNode<?> rootNode, List<ExecNode<?>> inputNodes, List<InputProperty> inputProperties) { String members = ExecNodePlanDumper.treeToString(rootNode, inputNodes, true).replace("\n", "\\n"); StringBuilder sb = new StringBuilder(); sb.append("MultipleInput("); List<String> readOrders = inputProperties.stream().map(InputProperty::getPriority).map(Object::toString).collect(Collectors.toList()); boolean hasDiffReadOrder = readOrders.stream().distinct().count() > 1; if (hasDiffReadOrder) { sb.append("readOrder=[").append(String.join(",", readOrders)).append("], "); } sb.append("members=[\\n").append(members).append("]"); sb.append(")"); return sb.toString(); }
3.26
flink_RocksDBKeyedStateBackend_getInstanceBasePath_rdh
/** * Only visible for testing, DO NOT USE. */ File getInstanceBasePath() { return instanceBasePath; }
3.26
flink_RocksDBKeyedStateBackend_snapshot_rdh
/** * Triggers an asynchronous snapshot of the keyed state backend from RocksDB. This snapshot can * be canceled and is also stopped when the backend is closed through {@link #dispose()}. For * each backend, this method must always be called by the same thread. * * @param checkpointId * The Id of the checkpoint. * @param timestamp * The timestamp of the checkpoint. * @param streamFactory * The factory that we can use for writing our state to streams. * @param checkpointOptions * Options for how to perform this checkpoint. * @return Future to the state handle of the snapshot data. * @throws Exception * indicating a problem in the synchronous part of the checkpoint. */ @Nonnull@Override public RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot(final long checkpointId, final long timestamp, @Nonnull final CheckpointStreamFactory streamFactory, @Nonnull CheckpointOptions checkpointOptions) throws Exception { // flush everything into db before taking a snapshot writeBatchWrapper.flush(); return new SnapshotStrategyRunner<>(checkpointSnapshotStrategy.getDescription(), checkpointSnapshotStrategy, cancelStreamRegistry, ASYNCHRONOUS).snapshot(checkpointId, timestamp, streamFactory, checkpointOptions); }
3.26
flink_RocksDBKeyedStateBackend_tryRegisterKvStateInformation_rdh
/** * Registers a k/v state information, which includes its state id, type, RocksDB column family * handle, and serializers. * * <p>When restoring from a snapshot, we don’t restore the individual k/v states, just the * global RocksDB database and the list of k/v state information. When a k/v state is first * requested we check here whether we already have a registered entry for that and return it * (after some necessary state compatibility checks) or create a new one if it does not exist. */ private <N, S extends State, SV, SEV> Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> tryRegisterKvStateInformation(StateDescriptor<S, SV> stateDesc, TypeSerializer<N> namespaceSerializer, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory, boolean allowFutureMetadataUpdates) throws Exception { RocksDbKvStateInfo v21 = kvStateInformation.get(stateDesc.getName());TypeSerializer<SV> stateSerializer = stateDesc.getSerializer(); RocksDbKvStateInfo newRocksStateInfo;RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo; if (v21 != null) { @SuppressWarnings("unchecked") RegisteredKeyValueStateBackendMetaInfo<N, SV> castedMetaInfo = ((RegisteredKeyValueStateBackendMetaInfo<N, SV>) (v21.metaInfo)); newMetaInfo = updateRestoredStateMetaInfo(Tuple2.of(v21.columnFamilyHandle, castedMetaInfo), stateDesc, namespaceSerializer, stateSerializer); newMetaInfo = (allowFutureMetadataUpdates) ? newMetaInfo.withSerializerUpgradesAllowed() : newMetaInfo; newRocksStateInfo = new RocksDbKvStateInfo(v21.columnFamilyHandle, newMetaInfo); kvStateInformation.put(stateDesc.getName(), newRocksStateInfo); } else { newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>(stateDesc.getType(), stateDesc.getName(), namespaceSerializer, stateSerializer, StateSnapshotTransformFactory.noTransform()); newMetaInfo = (allowFutureMetadataUpdates) ? newMetaInfo.withSerializerUpgradesAllowed() : newMetaInfo; newRocksStateInfo = RocksDBOperationUtils.createStateInfo(newMetaInfo, f2, columnFamilyOptionsFactory, ttlCompactFiltersManager, optionsContainer.getWriteBufferManagerCapacity()); RocksDBOperationUtils.registerKvStateInformation(this.kvStateInformation, this.nativeMetricMonitor, stateDesc.getName(), newRocksStateInfo);} StateSnapshotTransformFactory<SV> wrappedSnapshotTransformFactory = wrapStateSnapshotTransformFactory(stateDesc, snapshotTransformFactory, newMetaInfo.getStateSerializer()); newMetaInfo.updateSnapshotTransformFactory(wrappedSnapshotTransformFactory);return Tuple2.of(newRocksStateInfo.columnFamilyHandle, newMetaInfo); }
3.26
flink_RocksDBKeyedStateBackend_dispose_rdh
/** * Should only be called by one thread, and only after all accesses to the DB happened. */ @Override public void dispose() { if (this.disposed) { return; } super.dispose(); // This call will block until all clients that still acquire access to the RocksDB instance // have released it, // so that we cannot release the native resources while clients are still working with it in // parallel. rocksDBResourceGuard.close(); // IMPORTANT: null reference to signal potential async checkpoint workers that the db was // disposed, as // working on the disposed object results in SEGFAULTS. if (f2 != null) { IOUtils.closeQuietly(writeBatchWrapper); // Metric collection occurs on a background thread. When this method returns // it is guaranteed that thr RocksDB reference has been invalidated // and no more metric collection will be attempted against the database. if (nativeMetricMonitor != null) { nativeMetricMonitor.close(); } List<ColumnFamilyOptions> columnFamilyOptions = new ArrayList<>(kvStateInformation.values().size()); // RocksDB's native memory management requires that *all* CFs (including default) are // closed before the // DB is closed. See: // https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#opening-a-database-with-column-families // Start with default CF ... RocksDBOperationUtils.addColumnFamilyOptionsToCloseLater(columnFamilyOptions, f1); IOUtils.closeQuietly(f1); // ... continue with the ones created by Flink... for (RocksDbKvStateInfo kvStateInfo : kvStateInformation.values()) { RocksDBOperationUtils.addColumnFamilyOptionsToCloseLater(columnFamilyOptions, kvStateInfo.columnFamilyHandle); IOUtils.closeQuietly(kvStateInfo.columnFamilyHandle); } // ... and finally close the DB instance ... IOUtils.closeQuietly(f2); columnFamilyOptions.forEach(IOUtils::closeQuietly); IOUtils.closeQuietly(optionsContainer); ttlCompactFiltersManager.disposeAndClearRegisteredCompactionFactories(); kvStateInformation.clear(); cleanInstanceBasePath(); } IOUtils.closeQuietly(checkpointSnapshotStrategy); this.disposed = true; }
3.26
flink_RocksDBKeyedStateBackend_getKeyGroupPrefixBytes_rdh
// ------------------------------------------------------------------------ // Getters and Setters // ------------------------------------------------------------------------ public int getKeyGroupPrefixBytes() { return keyGroupPrefixBytes; }
3.26
flink_RocksDBKeyedStateBackend_migrateStateValues_rdh
/** * Migrate only the state value, that is the "value" that is stored in RocksDB. We don't migrate * the key here, which is made up of key group, key, namespace and map key (in case of * MapState). */ @SuppressWarnings("unchecked") private <N, S extends State, SV> void migrateStateValues(StateDescriptor<S, SV> stateDesc, Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> stateMetaInfo) throws Exception { if (stateDesc.getType() == Type.MAP) { TypeSerializerSnapshot<SV> previousSerializerSnapshot = stateMetaInfo.f1.getPreviousStateSerializerSnapshot(); checkState(previousSerializerSnapshot != null, "the previous serializer snapshot should exist."); checkState(previousSerializerSnapshot instanceof MapSerializerSnapshot, "previous serializer snapshot should be a MapSerializerSnapshot."); TypeSerializer<SV> newSerializer = stateMetaInfo.f1.getStateSerializer(); checkState(newSerializer instanceof MapSerializer, "new serializer should be a MapSerializer."); MapSerializer<?, ?> mapSerializer = ((MapSerializer<?, ?>) (newSerializer)); MapSerializerSnapshot<?, ?> mapSerializerSnapshot = ((MapSerializerSnapshot<?, ?>) (previousSerializerSnapshot)); if (!checkMapStateKeySchemaCompatibility(mapSerializerSnapshot, mapSerializer)) { throw new StateMigrationException("The new serializer for a MapState requires state migration in order for the job to proceed, since the key schema has changed. However, migration for MapState currently only allows value schema evolutions."); } } LOG.info("Performing state migration for state {} because the state serializer's schema, i.e. serialization format, has changed.", stateDesc); // we need to get an actual state instance because migration is different // for different state types. For example, ListState needs to deal with // individual elements State state = createState(stateDesc, stateMetaInfo); if (!(state instanceof AbstractRocksDBState)) { throw new FlinkRuntimeException("State should be an AbstractRocksDBState but is " + state); } @SuppressWarnings("unchecked") AbstractRocksDBState<?, ?, SV> rocksDBState = ((AbstractRocksDBState<?, ?, SV>) (state)); Snapshot rocksDBSnapshot = f2.getSnapshot(); try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(f2, stateMetaInfo.f0, readOptions);RocksDBWriteBatchWrapper batchWriter = new RocksDBWriteBatchWrapper(f2, getWriteOptions(), getWriteBatchSize())) { iterator.seekToFirst(); DataInputDeserializer serializedValueInput = new DataInputDeserializer(); DataOutputSerializer migratedSerializedValueOutput = new DataOutputSerializer(512); while (iterator.isValid()) { serializedValueInput.setBuffer(iterator.value()); rocksDBState.migrateSerializedValue(serializedValueInput, migratedSerializedValueOutput, stateMetaInfo.f1.getPreviousStateSerializer(), stateMetaInfo.f1.getStateSerializer()); batchWriter.put(stateMetaInfo.f0, iterator.key(), migratedSerializedValueOutput.getCopyOfBuffer()); migratedSerializedValueOutput.clear(); iterator.next(); } } finally { f2.releaseSnapshot(rocksDBSnapshot); rocksDBSnapshot.close(); } }
3.26
flink_HiveParserSqlFunctionConverter_getName_rdh
// TODO: this is not valid. Function names for built-in UDFs are specified in // FunctionRegistry, and only happen to match annotations. For user UDFs, the // name is what user specifies at creation time (annotation can be absent, // different, or duplicate some other function). private static String getName(GenericUDF hiveUDF) { String udfName = null; if (hiveUDF instanceof GenericUDFBridge) { udfName = hiveUDF.getUdfName(); } else { Class<? extends GenericUDF> udfClass = hiveUDF.getClass(); Description udfAnnotation = udfClass.getAnnotation(Description.class); if (udfAnnotation != null) { udfName = udfAnnotation.name(); if (udfName != null) { String[] aliases = udfName.split(","); if (aliases.length > 0) { udfName = aliases[0]; } } } if ((udfName == null) || udfName.isEmpty()) { udfName = hiveUDF.getClass().getName(); int indx = udfName.lastIndexOf("."); if (indx >= 0) { indx += 1; udfName = udfName.substring(indx); } } } return udfName; }
3.26
flink_SortMergeResultPartitionReadScheduler_release_rdh
/** * Releases this read scheduler and returns a {@link CompletableFuture} which will be completed * when all resources are released. */ CompletableFuture<?> release() {List<SortMergeSubpartitionReader> pendingReaders; synchronized(lock) { if (isReleased) { return releaseFuture; } isReleased = true; failedReaders.addAll(allReaders); pendingReaders = new ArrayList<>(allReaders); mayNotifyReleased(); } failSubpartitionReaders(pendingReaders, new IllegalStateException("Result partition has been already released.")); return releaseFuture; }
3.26
flink_HiveParserASTNode_setUnknownTokenBoundaries_rdh
/** * For every node in this subtree, make sure it's start/stop token's are set. Walk depth first, * visit bottom up. Only updates nodes with at least one token index < 0. * * <p>In contrast to the method in the parent class, this method is iterative. */ @Override public void setUnknownTokenBoundaries() { Deque<HiveParserASTNode> stack1 = new ArrayDeque<HiveParserASTNode>(); Deque<HiveParserASTNode> stack2 = new ArrayDeque<HiveParserASTNode>(); stack1.push(this); while (!stack1.isEmpty()) { HiveParserASTNode next = stack1.pop(); stack2.push(next); if (next.children != null) { for (int i = next.children.size() - 1; i >= 0; i--) { stack1.push(((HiveParserASTNode) (next.children.get(i)))); } } } while (!stack2.isEmpty()) { HiveParserASTNode next = stack2.pop(); if (next.children == null) { if ((next.startIndex < 0) || (next.stopIndex < 0)) { next.startIndex = next.stopIndex = next.token.getTokenIndex(); } } else if ((next.startIndex >= 0) && (next.stopIndex >= 0)) { continue; } else if (next.children.size() > 0) { HiveParserASTNode firstChild = ((HiveParserASTNode) (next.children.get(0))); HiveParserASTNode lastChild = ((HiveParserASTNode) (next.children.get(next.children.size() - 1))); next.startIndex = firstChild.getTokenStartIndex(); next.stopIndex = lastChild.getTokenStopIndex(); } } }
3.26
flink_UserDefinedFunctionHelper_validateImplementationMethod_rdh
/** * Validates an implementation method such as {@code eval()} or {@code accumulate()}. */ private static void validateImplementationMethod(Class<? extends UserDefinedFunction> clazz, boolean rejectStatic, boolean isOptional, String... methodNameOptions) { final Set<String> nameSet = new HashSet<>(Arrays.asList(methodNameOptions)); final List<Method> methods = getAllDeclaredMethods(clazz); boolean found = false; for (Method method : methods) { if (!nameSet.contains(method.getName())) { continue; } found = true; final int modifier = method.getModifiers(); if (!Modifier.isPublic(modifier)) { throw new ValidationException(String.format("Method '%s' of function class '%s' is not public.", method.getName(), clazz.getName())); } if (Modifier.isAbstract(modifier)) { throw new ValidationException(String.format("Method '%s' of function class '%s' must not be abstract.", method.getName(), clazz.getName())); } if (rejectStatic && Modifier.isStatic(modifier)) { throw new ValidationException(String.format("Method '%s' of function class '%s' must not be static.", method.getName(), clazz.getName())); } } if ((!found) && (!isOptional)) { throw new ValidationException(String.format("Function class '%s' does not implement a method named %s.", clazz.getName(), nameSet.stream().map(s -> ("'" + s) + "'").collect(Collectors.joining(" or "))));} }
3.26
flink_UserDefinedFunctionHelper_validateClass_rdh
/** * Validates a {@link UserDefinedFunction} class for usage in the API. */ private static void validateClass(Class<? extends UserDefinedFunction> functionClass, boolean requiresDefaultConstructor) { if (TableFunction.class.isAssignableFrom(functionClass)) { validateNotSingleton(functionClass); } validateInstantiation(functionClass, requiresDefaultConstructor); validateImplementationMethods(functionClass); }
3.26
flink_UserDefinedFunctionHelper_isClassNameSerializable_rdh
/** * Returns whether a {@link UserDefinedFunction} can be easily serialized and identified by only * a fully qualified class name. It must have a default constructor and no serializable fields. * * <p>Other properties (such as checks for abstract classes) are validated at the entry points * of the API, see {@link #prepareInstance(ReadableConfig, UserDefinedFunction)}. */ public static boolean isClassNameSerializable(UserDefinedFunction function) { final Class<?> functionClass = function.getClass(); if (!InstantiationUtil.hasPublicNullaryConstructor(functionClass)) { // function must be parameterized return false; } Class<?> currentClass = functionClass; while (!currentClass.equals(UserDefinedFunction.class)) { for (Field field : currentClass.getDeclaredFields()) { if ((!Modifier.isTransient(field.getModifiers())) && (!Modifier.isStatic(field.getModifiers()))) { // function seems to be stateful return false; } } currentClass = currentClass.getSuperclass(); } return true; }
3.26
flink_UserDefinedFunctionHelper_validateImplementationMethods_rdh
/** * Validates the implementation methods such as {@link #SCALAR_EVAL} or {@link #AGGREGATE_ACCUMULATE} depending on the {@link UserDefinedFunction} subclass. * * <p>This method must be kept in sync with the code generation requirements and the individual * docs of each function. */ private static void validateImplementationMethods(Class<? extends UserDefinedFunction> functionClass) {if (ScalarFunction.class.isAssignableFrom(functionClass)) { validateImplementationMethod(functionClass, false, false, SCALAR_EVAL); } else if (TableFunction.class.isAssignableFrom(functionClass)) { validateImplementationMethod(functionClass, true, false, TABLE_EVAL); } else if (AsyncTableFunction.class.isAssignableFrom(functionClass)) { validateImplementationMethod(functionClass, true, false, ASYNC_TABLE_EVAL); } else if (AggregateFunction.class.isAssignableFrom(functionClass)) { validateImplementationMethod(functionClass, true, false, AGGREGATE_ACCUMULATE); validateImplementationMethod(functionClass, true, true, AGGREGATE_RETRACT); validateImplementationMethod(functionClass, true, true, AGGREGATE_MERGE); } else if (TableAggregateFunction.class.isAssignableFrom(functionClass)) { validateImplementationMethod(functionClass, true, false, TABLE_AGGREGATE_ACCUMULATE); validateImplementationMethod(functionClass, true, true, TABLE_AGGREGATE_RETRACT); validateImplementationMethod(functionClass, true, true, TABLE_AGGREGATE_MERGE); validateImplementationMethod(functionClass, true, false, TABLE_AGGREGATE_EMIT, TABLE_AGGREGATE_EMIT_RETRACT); } }
3.26
flink_UserDefinedFunctionHelper_getAccumulatorTypeOfAggregateFunction_rdh
/** * Tries to infer the TypeInformation of an AggregateFunction's accumulator type. * * @param aggregateFunction * The AggregateFunction for which the accumulator type is inferred. * @param scalaType * The implicitly inferred type of the accumulator type. * @return The inferred accumulator type of the AggregateFunction. */ public static <T, ACC> TypeInformation<ACC> getAccumulatorTypeOfAggregateFunction(ImperativeAggregateFunction<T, ACC> aggregateFunction, TypeInformation<ACC> scalaType) { TypeInformation<ACC> userProvidedType = aggregateFunction.getAccumulatorType(); if (userProvidedType != null) { return userProvidedType; } else if (scalaType != null) { return scalaType; } else {return TypeExtractor.createTypeInfo(aggregateFunction, ImperativeAggregateFunction.class, aggregateFunction.getClass(), 1); } }
3.26
flink_UserDefinedFunctionHelper_createSpecializedFunction_rdh
/** * Creates the runtime implementation of a {@link FunctionDefinition} as an instance of {@link UserDefinedFunction}. * * @see SpecializedFunction */ public static UserDefinedFunction createSpecializedFunction(String functionName, FunctionDefinition definition, CallContext callContext, ClassLoader builtInClassLoader, @Nullable ReadableConfig configuration, @Nullable ExpressionEvaluatorFactory evaluatorFactory) { if (definition instanceof SpecializedFunction) { final SpecializedFunction specialized = ((SpecializedFunction) (definition)); final SpecializedContext specializedContext = new SpecializedContext() { @Overridepublic CallContext getCallContext() { return callContext; } @Override public ReadableConfig getConfiguration() { if (configuration == null) { throw new TableException("Access to configuration is currently not supported for all kinds of calls."); } return configuration; } @Override public ClassLoader getBuiltInClassLoader() {return builtInClassLoader;} @Override public ExpressionEvaluator createEvaluator(Expression expression, DataType outputDataType, DataTypes... args) { if (evaluatorFactory == null) { throw new TableException("Access to expression evaluation is currently not supported " + "for all kinds of calls."); } return evaluatorFactory.createEvaluator(expression, outputDataType, args); } @Overridepublic ExpressionEvaluator createEvaluator(String sqlExpression, DataType outputDataType, DataTypes... args) {if (evaluatorFactory == null) { throw new TableException("Access to expression evaluation is currently not supported " + "for all kinds of calls."); } return evaluatorFactory.createEvaluator(sqlExpression, outputDataType, args); } @Override public ExpressionEvaluator createEvaluator(BuiltInFunctionDefinition function, DataType outputDataType, DataType... args) { if (evaluatorFactory == null) { throw new TableException("Access to expression evaluation is currently not supported " + "for all kinds of calls."); } return evaluatorFactory.createEvaluator(function, outputDataType, args); }}; final UserDefinedFunction udf = specialized.specialize(specializedContext); checkState(udf.getKind() == definition.getKind(), "Function kind must not change during specialization."); return udf; } else if (definition instanceof UserDefinedFunction) {return ((UserDefinedFunction) (definition)); } else { throw new TableException(String.format("Could not find a runtime implementation for function definition '%s'.", functionName)); } }
3.26
flink_UserDefinedFunctionHelper_validateInstantiation_rdh
/** * Checks if a user-defined function can be easily instantiated. */ private static void validateInstantiation(Class<?> clazz, boolean requiresDefaultConstructor) { if (!InstantiationUtil.isPublic(clazz)) { throw new ValidationException(String.format("Function class '%s' is not public.", clazz.getName())); } else if (!InstantiationUtil.isProperClass(clazz)) { throw new ValidationException(String.format("Function class '%s' is not a proper class. It is either abstract, an interface, or a primitive type.", clazz.getName())); } else if (requiresDefaultConstructor && (!InstantiationUtil.hasPublicNullaryConstructor(clazz))) { throw new ValidationException(String.format("Function class '%s' must have a public default constructor.", clazz.getName())); } }
3.26
flink_UserDefinedFunctionHelper_validateNotSingleton_rdh
/** * Check whether this is a Scala object. Using Scala objects can lead to concurrency issues, * e.g., due to a shared collector. */ private static void validateNotSingleton(Class<?> clazz) { if (Arrays.stream(clazz.getFields()).anyMatch(f -> f.getName().equals("MODULE$"))) { throw new ValidationException(String.format("Function implemented by class %s is a Scala object. This is forbidden because of concurrency" + " problems when using them.", clazz.getName())); } }
3.26
flink_UserDefinedFunctionHelper_cleanFunction_rdh
/** * Modifies a function instance by removing any reference to outer classes. This enables * non-static inner function classes. */ private static void cleanFunction(ReadableConfig config, UserDefinedFunction function) { final ClosureCleanerLevel level = config.get(PipelineOptions.CLOSURE_CLEANER_LEVEL); try { ClosureCleaner.clean(function, level, true); } catch (Throwable t) { throw new ValidationException(String.format("Function class '%s' is not serializable. Make sure that the class is self-contained " + "(i.e. no references to outer classes) and all inner fields are serializable as well.", function.getClass()), t); } }
3.26
flink_UserDefinedFunctionHelper_validateClassForRuntime_rdh
/** * Validates a {@link UserDefinedFunction} class for usage in the runtime. * * <p>Note: This is for the final validation when actual {@link DataType}s for arguments and * result are known. */ public static void validateClassForRuntime(Class<? extends UserDefinedFunction> functionClass, String methodName, Class<?>[] argumentClasses, Class<?> outputClass, String functionName) { final List<Method> methods = ExtractionUtils.collectMethods(functionClass, methodName); // verifies regular JVM calling semantics final boolean isMatching = methods.stream().anyMatch(method -> ExtractionUtils.isInvokable(method, argumentClasses) && ExtractionUtils.isAssignable(outputClass, method.getReturnType(), true)); if (!isMatching) { throw new ValidationException(String.format("Could not find an implementation method '%s' in class '%s' for function '%s' that " + "matches the following signature:\n%s", methodName, functionClass.getName(), functionName, ExtractionUtils.createMethodSignatureString(methodName, argumentClasses, outputClass))); } }
3.26
flink_UserDefinedFunctionHelper_getReturnTypeOfAggregateFunction_rdh
/** * Tries to infer the TypeInformation of an AggregateFunction's accumulator type. * * @param aggregateFunction * The AggregateFunction for which the accumulator type is inferred. * @return The inferred accumulator type of the AggregateFunction. */ public static <T, ACC> TypeInformation<T> getReturnTypeOfAggregateFunction(ImperativeAggregateFunction<T, ACC> aggregateFunction) { return m0(aggregateFunction, null);}
3.26
flink_UserDefinedFunctionHelper_prepareInstance_rdh
/** * Prepares a {@link UserDefinedFunction} instance for usage in the API. */ public static void prepareInstance(ReadableConfig config, UserDefinedFunction function) { validateClass(function.getClass(), false); cleanFunction(config, function); }
3.26
flink_UserDefinedFunctionHelper_m0_rdh
/** * Tries to infer the TypeInformation of an AggregateFunction's accumulator type. * * @param aggregateFunction * The AggregateFunction for which the accumulator type is inferred. * @param scalaType * The implicitly inferred type of the accumulator type. * @return The inferred accumulator type of the AggregateFunction. */ public static <T, ACC> TypeInformation<T> m0(ImperativeAggregateFunction<T, ACC> aggregateFunction, TypeInformation<T> scalaType) { TypeInformation<T> userProvidedType = aggregateFunction.getResultType(); if (userProvidedType != null) { return userProvidedType; } else if (scalaType != null) { return scalaType; } else { return TypeExtractor.createTypeInfo(aggregateFunction, ImperativeAggregateFunction.class, aggregateFunction.getClass(), 0); } }
3.26
flink_UserDefinedFunctionHelper_instantiateFunction_rdh
/** * Instantiates a {@link UserDefinedFunction} assuming a JVM function with default constructor. */ @SuppressWarnings({ "unchecked", "rawtypes" }) public static UserDefinedFunction instantiateFunction(Class<?> functionClass) { if (!UserDefinedFunction.class.isAssignableFrom(functionClass)) { throw new ValidationException(String.format("Function '%s' does not extend from '%s'.", functionClass.getName(), UserDefinedFunction.class.getName())); } validateClass(((Class) (functionClass)), true); try { return ((UserDefinedFunction) (functionClass.newInstance())); } catch (Exception e) { throw new ValidationException(String.format("Cannot instantiate user-defined function class '%s'.", functionClass.getName()), e); } }
3.26
flink_UserDefinedFunctionHelper_getReturnTypeOfTableFunction_rdh
/** * Tries to infer the TypeInformation of an AggregateFunction's accumulator type. * * @param tableFunction * The TableFunction for which the accumulator type is inferred. * @param scalaType * The implicitly inferred type of the accumulator type. * @return The inferred accumulator type of the AggregateFunction. */ public static <T> TypeInformation<T> getReturnTypeOfTableFunction(TableFunction<T> tableFunction, TypeInformation<T> scalaType) { TypeInformation<T> userProvidedType = tableFunction.getResultType(); if (userProvidedType != null) { return userProvidedType;} else if (scalaType != null) { return scalaType; } else { return TypeExtractor.createTypeInfo(tableFunction, TableFunction.class, tableFunction.getClass(), 0); } }
3.26
flink_UserDefinedFunctionHelper_generateInlineFunctionName_rdh
/** * Name for anonymous, inline functions. */ public static String generateInlineFunctionName(UserDefinedFunction function) { // use "*...*" to indicate anonymous function similar to types at other locations return String.format("*%s*", function.functionIdentifier()); }
3.26
flink_FlinkMatchers_futureFailedWith_rdh
// ------------------------------------------------------------------------ // factories // ------------------------------------------------------------------------ /** * Checks whether {@link CompletableFuture} completed already exceptionally with a specific * exception type. */ public static <T, E extends Throwable> FutureFailedMatcher<T> futureFailedWith(Class<E> exceptionType) { Objects.requireNonNull(exceptionType, "exceptionType should not be null"); return new FutureFailedMatcher<>(exceptionType); }
3.26
flink_FlinkMatchers_findThrowable_rdh
// copied from flink-core to not mess up the dependency design too much, just for a little // utility method private static Optional<Throwable> findThrowable(Throwable throwable, Predicate<Throwable> predicate) { if ((throwable == null) || (predicate == null)) { return Optional.empty(); } Throwable v9 = throwable; while (v9 != null) { if (predicate.test(v9)) { return Optional.of(v9);} else { v9 = v9.getCause(); } } return Optional.empty(); }
3.26
flink_FlinkMatchers_m0_rdh
/** * Checks for a {@link Throwable} that contains the expected error message. */ public static Matcher<Throwable> m0(String errorMessage) { return new ContainsMessageMatcher(errorMessage); } /** * Checks that a {@link CompletableFuture}
3.26
flink_FlinkMatchers_futureWillCompleteExceptionally_rdh
/** * Checks whether {@link CompletableFuture} will completed exceptionally within a certain time. */ public static <T> FutureWillFailMatcher<T> futureWillCompleteExceptionally(Duration timeout) { return futureWillCompleteExceptionally(Throwable.class, timeout); }
3.26
flink_FlinkMatchers_containsCause_rdh
/** * Checks for a {@link Throwable} that matches by class. */ public static Matcher<Throwable> containsCause(Class<? extends Throwable> failureCause) { return new ContainsCauseMatcher(failureCause); } /** * Checks for a {@link Throwable}
3.26
flink_DriverUtils_isNullOrWhitespaceOnly_rdh
/** * Checks if the string is null, empty, or contains only whitespace characters. A whitespace * character is defined via {@link Character#isWhitespace(char)}. * * @param str * The string to check * @return True, if the string is null or blank, false otherwise. */ public static boolean isNullOrWhitespaceOnly(String str) { if ((str == null) || (str.length() == 0)) { return true; } final int len = str.length(); for (int i = 0; i < len; i++) { if (!Character.isWhitespace(str.charAt(i))) { return false; } } return true; }
3.26
flink_DriverUtils_checkArgument_rdh
/** * Checks the given boolean condition, and throws an {@code IllegalArgumentException} if the * condition is not met (evaluates to {@code false}). The exception will have the given error * message. * * @param condition * The condition to check * @param errorMessage * The message for the {@code IllegalArgumentException} that is thrown if * the check fails. * @throws IllegalArgumentException * Thrown, if the condition is violated. */ public static void checkArgument(boolean condition, @Nullable Object errorMessage) { if (!condition) { throw new IllegalArgumentException(String.valueOf(errorMessage)); } }
3.26
flink_DriverUtils_fromProperties_rdh
/** * Generate map from given properties. * * @param properties * the given properties * @return the result map */ public static Map<String, String> fromProperties(Properties properties) { Map<String, String> map = new HashMap<>(); Enumeration<?> e = properties.propertyNames(); while (e.hasMoreElements()) { String key = ((String) (e.nextElement())); map.put(key, properties.getProperty(key)); } return map;}
3.26
flink_DriverUtils_checkNotNull_rdh
/** * Ensures that the given object reference is not null. Upon violation, a {@code NullPointerException} with the given message is thrown. * * @param reference * The object reference * @param errorMessage * The message for the {@code NullPointerException} that is thrown if the * check fails. * @return The object reference itself (generically typed). * @throws NullPointerException * Thrown, if the passed reference was null. */ public static <T> T checkNotNull(@Nullable T reference, @Nullable String errorMessage) { if (reference == null) { throw new NullPointerException(String.valueOf(errorMessage)); } return reference; }
3.26
flink_PartitionWriter_createNewOutputFormat_rdh
/** * Create a new output format with path, configure it and open it. */ OutputFormat<T> createNewOutputFormat(Path path) throws IOException { OutputFormat<T> format = factory.createOutputFormat(path); format.configure(conf); // Here we just think of it as a single file format, so there can only be a single task. format.open(0, 1); return format; }
3.26
flink_CsvOutputFormat_setInputType_rdh
/** * The purpose of this method is solely to check whether the data type to be processed is in * fact a tuple type. */ @Override public void setInputType(TypeInformation<?> type, ExecutionConfig executionConfig) { if (!type.isTupleType()) { throw new InvalidProgramException(("The " + CsvOutputFormat.class.getSimpleName()) + " can only be used to write tuple data sets."); } }
3.26
flink_CsvOutputFormat_setCharsetName_rdh
/** * Sets the charset with which the CSV strings are written to the file. If not specified, the * output format uses the systems default character encoding. * * @param charsetName * The name of charset to use for encoding the output. */ public void setCharsetName(String charsetName) { this.charsetName = charsetName; }
3.26
flink_CsvOutputFormat_toString_rdh
// -------------------------------------------------------------------------------------------- @Override public String toString() { return ((("CsvOutputFormat (path: " + this.getOutputFilePath()) + ", delimiter: ") + this.fieldDelimiter) + ")"; }
3.26
flink_CsvOutputFormat_open_rdh
// -------------------------------------------------------------------------------------------- @Override public void open(int taskNumber, int numTasks) throws IOException { super.open(taskNumber, numTasks); this.wrt = (this.charsetName == null) ? new OutputStreamWriter(new BufferedOutputStream(this.stream, 4096)) : new OutputStreamWriter(new BufferedOutputStream(this.stream, 4096), this.charsetName); }
3.26
flink_CsvOutputFormat_setAllowNullValues_rdh
/** * Configures the format to either allow null values (writing an empty field), or to throw an * exception when encountering a null field. * * <p>by default, null values are disallowed. * * @param allowNulls * Flag to indicate whether the output format should accept null values. */ public void setAllowNullValues(boolean allowNulls) { this.allowNullValues = allowNulls; }
3.26
flink_CsvOutputFormat_setQuoteStrings_rdh
/** * Configures whether the output format should quote string values. String values are fields of * type {@link java.lang.String} and {@link org.apache.flink.types.StringValue}, as well as all * subclasses of the latter. * * <p>By default, strings are not quoted. * * @param quoteStrings * Flag indicating whether string fields should be quoted. */ public void setQuoteStrings(boolean quoteStrings) { this.quoteStrings = quoteStrings; }
3.26
flink_EventTimeSessionWindows_mergeWindows_rdh
/** * Merge overlapping {@link TimeWindow}s. */ @Override public void mergeWindows(Collection<TimeWindow> windows, MergingWindowAssigner.MergeCallback<TimeWindow> c) { TimeWindow.mergeWindows(windows, c); }
3.26
flink_EventTimeSessionWindows_withDynamicGap_rdh
/** * Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions * based on the element timestamp. * * @param sessionWindowTimeGapExtractor * The extractor to use to extract the time gap from the * input elements * @return The policy. */ @PublicEvolving public static <T> DynamicEventTimeSessionWindows<T> withDynamicGap(SessionWindowTimeGapExtractor<T> sessionWindowTimeGapExtractor) { return new DynamicEventTimeSessionWindows<>(sessionWindowTimeGapExtractor); }
3.26
flink_EventTimeSessionWindows_withGap_rdh
/** * Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions * based on the element timestamp. * * @param size * The session timeout, i.e. the time gap between sessions * @return The policy. */ public static EventTimeSessionWindows withGap(Time size) { return new EventTimeSessionWindows(size.toMilliseconds()); }
3.26
flink_JaasModule_generateDefaultConfigFile_rdh
/** * Generate the default JAAS config file. */ private static File generateDefaultConfigFile(String workingDir) { checkArgument(workingDir != null, "working directory should not be null."); final File jaasConfFile; try { Path path = Paths.get(workingDir); if (Files.notExists(path)) { // We intentionally favored Path.toRealPath over Files.readSymbolicLinks as the // latter one might return a // relative path if the symbolic link refers to it. Path.toRealPath resolves the // relative path instead. Path parent = path.getParent().toRealPath(); Path resolvedPath = Paths.get(parent.toString(), path.getFileName().toString()); path = Files.createDirectories(resolvedPath); } Path jaasConfPath = Files.createTempFile(path, "jaas-", ".conf"); try (InputStream resourceStream = JaasModule.class.getClassLoader().getResourceAsStream(JAAS_CONF_RESOURCE_NAME)) { Files.copy(resourceStream, jaasConfPath, StandardCopyOption.REPLACE_EXISTING); } jaasConfFile = new File(workingDir, jaasConfPath.getFileName().toString()); jaasConfFile.deleteOnExit(); } catch (IOException e) { throw new RuntimeException("unable to generate a JAAS configuration file", e); } return jaasConfFile; }
3.26
flink_PythonFunction_takesRowAsInput_rdh
/** * Returns Whether the Python function takes row as input instead of each columns of a row. */ default boolean takesRowAsInput() { return false; }
3.26
flink_PythonFunction_getPythonFunctionKind_rdh
/** * Returns the kind of the user-defined python function. */ default PythonFunctionKind getPythonFunctionKind() { return PythonFunctionKind.GENERAL; }
3.26
flink_QueryableStateClient_getKvState_rdh
/** * Returns a future holding the serialized request result. * * @param jobId * JobID of the job the queryable state belongs to * @param queryableStateName * Name under which the state is queryable * @param keyHashCode * Integer hash code of the key (result of a call to {@link Object#hashCode()} * @param serializedKeyAndNamespace * Serialized key and namespace to query KvState instance with * @return Future holding the serialized result */ private CompletableFuture<KvStateResponse> getKvState(final JobID jobId, final String queryableStateName, final int keyHashCode, final byte[] serializedKeyAndNamespace) { LOG.debug("Sending State Request to {}.", remoteAddress); try { KvStateRequest request = new KvStateRequest(jobId, queryableStateName, keyHashCode, serializedKeyAndNamespace); return client.sendRequest(remoteAddress, request); } catch (Exception e) { LOG.error("Unable to send KVStateRequest: ", e); return FutureUtils.completedExceptionally(e); } }
3.26
flink_QueryableStateClient_shutdownAndWait_rdh
/** * Shuts down the client and waits until shutdown is completed. * * <p>If an exception is thrown, a warning is logged containing the exception message. */ public void shutdownAndWait() { try { client.shutdown().get(); LOG.info("The Queryable State Client was shutdown successfully."); } catch (Exception e) { LOG.warn("The Queryable State Client shutdown failed: ", e); } }
3.26