name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_StringValueComparator_supportsSerializationWithKeyNormalization_rdh
// -------------------------------------------------------------------------------------------- // unsupported normalization // -------------------------------------------------------------------------------------------- @Override public boolean supportsSerializationWithKeyNormalization() { return false;}
3.26
flink_TimestampedInputSplit_resetSplitState_rdh
/** * Sets the state of the split to {@code null}. */ default void resetSplitState() { this.setSplitState(null); }
3.26
flink_NettyMessage_allocateBuffer_rdh
/** * Allocates a new buffer and adds some header information for the frame decoder. * * <p>If the <tt>contentLength</tt> is unknown, you must write the actual length after adding * the contents as an integer to position <tt>0</tt>! * * @param allocator * byte buffer allocator to use * @param id * {@link NettyMessage} subclass ID * @param messageHeaderLength * additional header length that should be part of the allocated * buffer and is written outside of this method * @param contentLength * content length (or <tt>-1</tt> if unknown) * @param allocateForContent * whether to make room for the actual content in the buffer * (<tt>true</tt>) or whether to only return a buffer with the header information * (<tt>false</tt>) * @return a newly allocated direct buffer with header data written for {@link NettyMessageEncoder} */ private static ByteBuf allocateBuffer(ByteBufAllocator allocator, byte id, int messageHeaderLength, int contentLength, boolean allocateForContent) { checkArgument(contentLength <= (Integer.MAX_VALUE - FRAME_HEADER_LENGTH)); final ByteBuf buffer; if (!allocateForContent) { buffer = allocator.directBuffer(FRAME_HEADER_LENGTH + messageHeaderLength); } else if (contentLength != (-1)) { buffer = allocator.directBuffer((FRAME_HEADER_LENGTH + messageHeaderLength) + contentLength); } else { // content length unknown -> start with the default initial size (rather than // FRAME_HEADER_LENGTH only): buffer = allocator.directBuffer(); } buffer.writeInt((FRAME_HEADER_LENGTH + messageHeaderLength) + contentLength);// may be updated later, e.g. if contentLength == -1 buffer.writeInt(MAGIC_NUMBER); buffer.writeByte(id); return buffer; }
3.26
flink_NettyMessage_writeToChannel_rdh
// ------------------------------------------------------------------------ void writeToChannel(ChannelOutboundInvoker out, ChannelPromise promise, ByteBufAllocator allocator, Consumer<ByteBuf> consumer, byte id, int length) throws IOException { ByteBuf byteBuf = null; try { byteBuf = allocateBuffer(allocator, id, length); consumer.accept(byteBuf); out.write(byteBuf, promise); } catch (Throwable t) { handleException(byteBuf, null, t); } }
3.26
flink_NettyMessage_write_rdh
// -------------------------------------------------------------------- // Serialization // -------------------------------------------------------------------- @Override void write(ChannelOutboundInvoker out, ChannelPromise promise, ByteBufAllocator allocator) throws IOException {ByteBuf headerBuf = null; try { // in order to forward the buffer to netty, it needs an allocator set buffer.setAllocator(allocator); headerBuf = fillHeader(allocator); out.write(headerBuf); if (buffer instanceof FileRegionBuffer) { out.write(buffer, promise); } else { out.write(buffer.asByteBuf(), promise); }} catch (Throwable t) { handleException(headerBuf, buffer, t); } }
3.26
flink_SlotPool_requestNewAllocatedBatchSlot_rdh
/** * Requests the allocation of a new batch slot from the resource manager. Unlike the normal * slot, a batch slot will only time out if the slot pool does not contain a suitable slot. * Moreover, it won't react to failure signals from the resource manager. * * @param slotRequestId * identifying the requested slot * @param resourceProfile * resource profile that specifies the resource requirements for the * requested batch slot * @return a future which is completed with newly allocated batch slot */ default CompletableFuture<PhysicalSlot> requestNewAllocatedBatchSlot(SlotRequestId slotRequestId, ResourceProfile resourceProfile) { return requestNewAllocatedBatchSlot(slotRequestId, resourceProfile, Collections.emptyList()); }
3.26
flink_SlotPool_requestNewAllocatedSlot_rdh
/** * Request the allocation of a new slot from the resource manager. This method will not return a * slot from the already available slots from the pool, but instead will add a new slot to that * pool that is immediately allocated and returned. * * @param slotRequestId * identifying the requested slot * @param resourceProfile * resource profile that specifies the resource requirements for the * requested slot * @param timeout * timeout for the allocation procedure * @return a newly allocated slot that was previously not available. */default CompletableFuture<PhysicalSlot> requestNewAllocatedSlot(SlotRequestId slotRequestId, ResourceProfile resourceProfile, @Nullable Time timeout) {return requestNewAllocatedSlot(slotRequestId, resourceProfile, Collections.emptyList(), timeout); }
3.26
flink_Tuple20_of_rdh
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19) { return new Tuple20<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19); }
3.26
flink_Tuple20_equals_rdh
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o * the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple20)) { return false; } @SuppressWarnings("rawtypes") Tuple20 tuple = ((Tuple20) (o)); if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false;} if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) { return false; } if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) { return false; } if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) { return false; } if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) { return false; } if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) { return false; } if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) { return false; } if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) { return false; } if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) { return false; } if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) { return false; } return true; }
3.26
flink_Tuple20_setFields_rdh
/** * Sets new values to all fields of the tuple. * * @param f0 * The value for field 0 * @param f1 * The value for field 1 * @param f2 * The value for field 2 * @param f3 * The value for field 3 * @param f4 * The value for field 4 * @param f5 * The value for field 5 * @param f6 * The value for field 6 * @param f7 * The value for field 7 * @param f8 * The value for field 8 * @param f9 * The value for field 9 * @param f10 * The value for field 10 * @param f11 * The value for field 11 * @param f12 * The value for field 12 * @param f13 * The value for field 13 * @param f14 * The value for field 14 * @param f15 * The value for field 15 * @param f16 * The value for field 16 * @param f17 * The value for field 17 * @param f18 * The value for field 18 * @param f19 * The value for field 19 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19) { this.f0 = f0; this.f1 = f1;this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8; this.f9 = f9; this.f10 = f10;this.f11 = f11; this.f12 = f12; this.f13 = f13; this.f14 = f14; this.f15 = f15; this.f16 = f16; this.f17 = f17; this.f18 = f18; this.f19 = f19; }
3.26
flink_Tuple20_copy_rdh
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> copy() { return new Tuple20<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19); }
3.26
flink_Tuple20_toString_rdh
// ------------------------------------------------------------------------------------------------- // standard utilities // ------------------------------------------------------------------------------------------------- /** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19), where the individual fields are the * value returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return ((((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ",") + StringUtils.arrayAwareToString(this.f19)) + ")"; }
3.26
flink_ParquetAvroWriters_forSpecificRecord_rdh
/** * Creates a ParquetWriterFactory for an Avro specific type. The Parquet writers will use the * schema of that specific type to build and write the columnar data. * * @param type * The class of the type to write. */ public static <T extends SpecificRecordBase> ParquetWriterFactory<T> forSpecificRecord(Class<T> type) { return AvroParquetWriters.forSpecificRecord(type); }
3.26
flink_ParquetAvroWriters_forReflectRecord_rdh
/** * Creates a ParquetWriterFactory for the given type. The Parquet writers will use Avro to * reflectively create a schema for the type and use that schema to write the columnar data. * * @param type * The class of the type to write. */ public static <T> ParquetWriterFactory<T> forReflectRecord(Class<T> type) { return AvroParquetWriters.forReflectRecord(type); }
3.26
flink_ParquetAvroWriters_forGenericRecord_rdh
/** * Creates a ParquetWriterFactory that accepts and writes Avro generic types. The Parquet * writers will use the given schema to build and write the columnar data. * * @param schema * The schema of the generic type. */ public static ParquetWriterFactory<GenericRecord> forGenericRecord(Schema schema) { return AvroParquetWriters.forGenericRecord(schema); }
3.26
flink_RuntimeOpenApiSpecGenerator_main_rdh
/** * Generates the Runtime REST API OpenAPI spec. * * @param args * args[0] contains the directory into which the generated files are placed * @throws IOException * if any file operation failed */ public static void main(String[] args) throws IOException, ConfigurationException { String outputDirectory = args[0]; for (final RuntimeRestAPIVersion apiVersion : RuntimeRestAPIVersion.values()) { if (apiVersion == RuntimeRestAPIVersion.V0) { // this version exists only for testing purposes continue; } createDocumentationFile("Flink JobManager REST API", new DocumentingDispatcherRestEndpoint(), apiVersion, Paths.get(outputDirectory, ("rest_" + apiVersion.getURLVersionPrefix()) + "_dispatcher.yml")); } }
3.26
flink_LimitedConnectionsFileSystem_getNumberOfOpenInputStreams_rdh
/** * Gets the number of currently open input streams. */ public int getNumberOfOpenInputStreams() { return f0; }
3.26
flink_LimitedConnectionsFileSystem_getTotalNumberOfOpenStreams_rdh
/** * Gets the total number of open streams (input plus output). */ public int getTotalNumberOfOpenStreams() { lock.lock(); try { return numReservedOutputStreams + f0; } finally { lock.unlock(); } }
3.26
flink_LimitedConnectionsFileSystem_getKind_rdh
// ------------------------------------------------------------------------ // other delegating file system methods // ------------------------------------------------------------------------ @Override public FileSystemKind getKind() {return originalFs.getKind(); }
3.26
flink_LimitedConnectionsFileSystem_createStream_rdh
// ------------------------------------------------------------------------ private <T extends StreamWithTimeout> T createStream(final SupplierWithException<T, IOException> streamOpener, final HashSet<T> openStreams, final boolean output) throws IOException { final int outputLimit = (output && (maxNumOpenOutputStreams > 0)) ? maxNumOpenOutputStreams : Integer.MAX_VALUE; final int inputLimit = ((!output) && (maxNumOpenInputStreams > 0)) ? maxNumOpenInputStreams : Integer.MAX_VALUE; final int totalLimit = (maxNumOpenStreamsTotal > 0) ? maxNumOpenStreamsTotal : Integer.MAX_VALUE; final int outputCredit = (output) ? 1 : 0; final int inputCredit = (output) ? 0 : 1; // because waiting for availability may take long, we need to be interruptible here // and handle interrupted exceptions as I/O errors // even though the code is written to make sure the lock is held for a short time only, // making the lock acquisition interruptible helps to guard against the cases where // a supposedly fast operation (like 'getPos()' on a stream) actually takes long. try { lock.lockInterruptibly(); try { // some integrity checks assert openOutputStreams.size() <= numReservedOutputStreams; assert openInputStreams.size() <= f0; // wait until there are few enough streams so we can open another waitForAvailability(totalLimit, outputLimit, inputLimit); // We do not open the stream here in the locked scope because opening a stream // could take a while. Holding the lock during that operation would block all // concurrent // attempts to try and open a stream, effectively serializing all calls to open the // streams. numReservedOutputStreams += outputCredit; f0 += inputCredit; } finally { lock.unlock(); } } catch (InterruptedException e) { // restore interruption flag Thread.currentThread().interrupt(); throw new IOException("interrupted before opening stream"); } // open the stream outside the lock. boolean success = false; try { final T out = streamOpener.get(); // add the stream to the set, need to re-acquire the lock lock.lock(); try { openStreams.add(out); } finally { lock.unlock(); } // good, can now return cleanly success = true; return out; } finally { if (!success) { // remove the reserved credit // we must open this non-interruptibly, because this must succeed! lock.lock(); try { numReservedOutputStreams -= outputCredit; f0 -= inputCredit; available.signalAll(); } finally { lock.unlock(); } } } }
3.26
flink_LimitedConnectionsFileSystem_getStreamInactivityTimeout_rdh
/** * Gets the milliseconds that a stream may spend not writing any bytes before it is closed as * inactive. */ public long getStreamInactivityTimeout() { return streamInactivityTimeoutNanos / 1000000; }
3.26
flink_LimitedConnectionsFileSystem_getLastCheckTimestampNanos_rdh
/** * Gets the timestamp when the last inactivity evaluation was made. */ public long getLastCheckTimestampNanos() { return lastCheckTimestampNanos; }
3.26
flink_LimitedConnectionsFileSystem_getStreamOpenTimeout_rdh
/** * Gets the number of milliseconds that a opening a stream may wait for availability in the * connection pool. */ public long getStreamOpenTimeout() { return streamOpenTimeoutNanos / 1000000; }
3.26
flink_LimitedConnectionsFileSystem_getMaxNumOpenStreamsTotal_rdh
/** * Gets the maximum number of concurrently open streams (input + output). */ public int getMaxNumOpenStreamsTotal() { return maxNumOpenStreamsTotal; }
3.26
flink_LimitedConnectionsFileSystem_fromConfig_rdh
// -------------------------------------------------------------------- /** * Parses and returns the settings for connection limiting, for the file system with the * given file system scheme. * * @param config * The configuration to check. * @param fsScheme * The file system scheme. * @return The parsed configuration, or null, if no connection limiting is configured. */ @Nullable public static ConnectionLimitingSettings fromConfig(Configuration config, String fsScheme) { checkNotNull(fsScheme, "fsScheme"); checkNotNull(config, "config"); final ConfigOption<Integer> totalLimitOption = CoreOptions.fileSystemConnectionLimit(fsScheme); final ConfigOption<Integer> limitInOption = CoreOptions.fileSystemConnectionLimitIn(fsScheme); final ConfigOption<Integer> limitOutOption = CoreOptions.fileSystemConnectionLimitOut(fsScheme); final int totalLimit = config.getInteger(totalLimitOption); final int limitIn = config.getInteger(limitInOption); final int limitOut = config.getInteger(limitOutOption); checkLimit(totalLimit, totalLimitOption); checkLimit(limitIn, limitInOption); checkLimit(limitOut, limitOutOption); // create the settings only, if at least one limit is configured if (((totalLimit <= 0) && (limitIn <= 0)) && (limitOut <= 0)) { // no limit configured return null;} else { final ConfigOption<Long> openTimeoutOption = CoreOptions.fileSystemConnectionLimitTimeout(fsScheme); final ConfigOption<Long> inactivityTimeoutOption = CoreOptions.fileSystemConnectionLimitStreamInactivityTimeout(fsScheme); final long openTimeout = config.getLong(openTimeoutOption);final long inactivityTimeout = config.getLong(inactivityTimeoutOption); checkTimeout(openTimeout, openTimeoutOption);checkTimeout(inactivityTimeout, inactivityTimeoutOption); return new ConnectionLimitingSettings(totalLimit == (-1) ? 0 : totalLimit, limitIn == (-1) ? 0 : limitIn, limitOut == (-1) ? 0 : limitOut, openTimeout, inactivityTimeout); } }
3.26
flink_LimitedConnectionsFileSystem_read_rdh
// --- FSDataOutputStream API implementation @Override public int read() throws IOException { try { return originalStream.read(); } catch (IOException e) { handleIOException(e); return 0;// silence the compiler } }
3.26
flink_LimitedConnectionsFileSystem_getMaxNumOpenOutputStreams_rdh
// ------------------------------------------------------------------------ /** * Gets the maximum number of concurrently open output streams. */ public int getMaxNumOpenOutputStreams() { return maxNumOpenOutputStreams;}
3.26
flink_LimitedConnectionsFileSystem_write_rdh
// --- FSDataOutputStream API implementation @Override public void write(int b) throws IOException { try {originalStream.write(b); } catch (IOException e) { handleIOException(e); } }
3.26
flink_LimitedConnectionsFileSystem_create_rdh
// input & output stream opening methods // ------------------------------------------------------------------------ @Override public FSDataOutputStream create(Path f, WriteMode overwriteMode) throws IOException { return createOutputStream(() -> originalFs.create(f, overwriteMode)); }
3.26
flink_LimitedConnectionsFileSystem_getNumberOfOpenOutputStreams_rdh
/** * Gets the number of currently open output streams. */ public int getNumberOfOpenOutputStreams() {lock.lock(); try { return numReservedOutputStreams; } finally { lock.unlock(); } }
3.26
flink_LimitedConnectionsFileSystem_checkNewBytesAndMark_rdh
/** * Checks whether there were new bytes since the last time this method was invoked. This * also sets the given timestamp, to be read via {@link #getLastCheckTimestampNanos()}. * * @return True, if there were new bytes, false if not. */ public boolean checkNewBytesAndMark(long timestamp) throws IOException { // remember the time when checked lastCheckTimestampNanos = timestamp; final long bytesNow = stream.getPos(); if (bytesNow > lastCheckBytes) { lastCheckBytes = bytesNow; return true; } else { return false; } }
3.26
flink_LimitedConnectionsFileSystem_unregisterInputStream_rdh
/** * Atomically removes the given input stream from the set of currently open input streams, and * signals that new stream can now be opened. */ void unregisterInputStream(InStream stream) { lock.lock();try { // only decrement if we actually remove the stream if (openInputStreams.remove(stream)) { f0--; available.signalAll(); } } finally { lock.unlock(); } }
3.26
flink_LimitedConnectionsFileSystem_getMaxNumOpenInputStreams_rdh
/** * Gets the maximum number of concurrently open input streams. */ public int getMaxNumOpenInputStreams() { return maxNumOpenInputStreams; }
3.26
flink_OrcSplitReader_reachedEnd_rdh
/** * Method used to check if the end of the input is reached. * * @return True if the end is reached, otherwise false. * @throws IOException * Thrown, if an I/O error occurred. */ public boolean reachedEnd() throws IOException { return !ensureBatch(); }
3.26
flink_OrcSplitReader_seekToRow_rdh
/** * Seek to a particular row number. */ public void seekToRow(long rowCount) throws IOException { orcRowsReader.seekToRow(rowCount); }
3.26
flink_OrcSplitReader_ensureBatch_rdh
/** * Checks if there is at least one row left in the batch to return. If no more row are * available, it reads another batch of rows. * * @return Returns true if there is one more row to return, false otherwise. * @throws IOException * throw if an exception happens while reading a batch. */ private boolean ensureBatch() throws IOException { if (nextRow >= rowsInBatch) { // Try to read the next batch if rows from the ORC file. boolean moreRows = shim.nextBatch(orcRowsReader, rowBatchWrapper.getBatch()); if (moreRows) { // No more rows available in the Rows array. nextRow = 0; // Load the data into the Rows array. rowsInBatch = fillRows(); } return moreRows; } // there is at least one Row left in the Rows array. return true; }
3.26
flink_BeamOperatorStateStore_getListState_rdh
/** * Currently list state and union-list state is not supported. */ @Override public ListState<byte[]> getListState(BeamFnApi.StateRequest request) throws Exception { throw new RuntimeException("Operator list state is still not supported"); }
3.26
flink_BeamOperatorStateStore_getMapState_rdh
/** * Returns a {@link BroadcastState} wrapped in {@link MapState} interface. */ @Override public MapState<ByteArrayWrapper, byte[]> getMapState(BeamFnApi.StateRequest request) throws Exception { if (!request.getStateKey().hasMultimapKeysSideInput()) { throw new RuntimeException("Unsupported broadcast state request: " + request); } BeamFnApi.StateKey.MultimapKeysSideInput mapUserState = request.getStateKey().getMultimapKeysSideInput(); // Retrieve state descriptor byte[] data = Base64.getDecoder().decode(mapUserState.getSideInputId()); FlinkFnApi.StateDescriptor stateDescriptor = FlinkFnApi.StateDescriptor.parseFrom(data); String stateName = PYTHON_STATE_PREFIX + stateDescriptor.getStateName(); StateDescriptor cachedStateDescriptor = stateDescriptorCache.get(stateName); MapStateDescriptor<ByteArrayWrapper, byte[]> v5; if (cachedStateDescriptor instanceof MapStateDescriptor) {v5 = ((MapStateDescriptor<ByteArrayWrapper, byte[]>) (cachedStateDescriptor)); } else if (cachedStateDescriptor == null) { v5 = new MapStateDescriptor<>(stateName, ByteArrayWrapperSerializer.INSTANCE, valueSerializer); if (stateDescriptor.hasStateTtlConfig()) { FlinkFnApi.StateDescriptor.StateTTLConfig stateTtlConfigProto = stateDescriptor.getStateTtlConfig(); StateTtlConfig stateTtlConfig = ProtoUtils.parseStateTtlConfigFromProto(stateTtlConfigProto);v5.enableTimeToLive(stateTtlConfig); } stateDescriptorCache.put(stateName, v5); } else { throw new RuntimeException(String.format("State name corrupt detected: " + "'%s' is used both as MAP state and '%s' state at the same time.", stateName, cachedStateDescriptor.getType())); } // Currently, operator state is only supported to be used as broadcast state in PyFlink final BroadcastState<ByteArrayWrapper, byte[]> broadcastState = operatorStateBackend.getBroadcastState(v5); return new MapState<ByteArrayWrapper, byte[]>() { @Override public byte[] get(ByteArrayWrapper key) throws Exception { return broadcastState.get(key); } @Override public void put(ByteArrayWrapper key, byte[] value) throws Exception {broadcastState.put(key, value); } @Override public void putAll(Map<ByteArrayWrapper, byte[]> map) throws Exception {broadcastState.putAll(map); } @Override public void remove(ByteArrayWrapper key) throws Exception { broadcastState.remove(key); } @Override public boolean contains(ByteArrayWrapper key) throws Exception { return broadcastState.contains(key); } @Override public Iterable<Map.Entry<ByteArrayWrapper, byte[]>> entries() throws Exception { return broadcastState.entries(); } @Override public Iterable<ByteArrayWrapper> keys() throws Exception { final Iterator<Map.Entry<ByteArrayWrapper, byte[]>> iterator = iterator(); return () -> new Iterator<ByteArrayWrapper>() { @Override public boolean hasNext() { return iterator.hasNext(); } @Override public ByteArrayWrapper next() { return iterator.next().getKey(); } }; } @Override public Iterable<byte[]> values() throws Exception { final Iterator<Map.Entry<ByteArrayWrapper, byte[]>> iterator = iterator(); return () -> new Iterator<byte[]>() { @Override public boolean hasNext() { return iterator.hasNext(); } @Override public byte[] next() { return iterator.next().getValue(); } }; } @Override public Iterator<Map.Entry<ByteArrayWrapper, byte[]>> iterator() throws Exception { return broadcastState.entries().iterator(); } @Override public boolean isEmpty() throws Exception { return iterator().hasNext(); } @Override public void clear() { broadcastState.clear(); } }; }
3.26
flink_MinWithRetractAggFunction_getArgumentDataTypes_rdh
// -------------------------------------------------------------------------------------------- // Planning // -------------------------------------------------------------------------------------------- @Override public List<DataType> getArgumentDataTypes() { return Collections.singletonList(f1); }
3.26
flink_ExternalizedSnapshotLocation_validatePath_rdh
/** * Checks the validity of the path's scheme and path. * * @param path * The path to check. * @return The URI as a Path. * @throws IllegalArgumentException * Thrown, if the URI misses scheme or path. */ private static Path validatePath(Path path) { if (path == null) { return null; } Optional.ofNullable(path.toUri().getScheme()).orElseThrow(() -> new IllegalArgumentException("The scheme (hdfs://, file://, etc) is null. " + "Please specify the file system scheme explicitly in the URI.")); Optional.ofNullable(path.getPath()).orElseThrow(() -> new IllegalArgumentException("The path to store the checkpoint data in is null. " + "Please specify a directory path for the checkpoint data.")); Optional.ofNullable(path.getParent()).orElseThrow(() -> new IllegalArgumentException("Cannot use the root directory for checkpoints.")); return path; }
3.26
flink_AbstractStreamOperatorFactory_getMailboxExecutor_rdh
/** * Provides the mailbox executor iff this factory implements {@link YieldingOperatorFactory}. */ protected MailboxExecutor getMailboxExecutor() { return checkNotNull(mailboxExecutor, "Factory does not implement %s", YieldingOperatorFactory.class); }
3.26
flink_Slide_over_rdh
/** * Creates a sliding window. Sliding windows have a fixed size and slide by a specified slide * interval. If the slide interval is smaller than the window size, sliding windows are * overlapping. Thus, an element can be assigned to multiple windows. * * <p>For example, a sliding window of size 15 minutes with 5 minutes sliding interval groups * elements of 15 minutes and evaluates every five minutes. Each element is contained in three * consecutive * * @param size * the size of the window as time or row-count interval * @return a partially specified sliding window */ public static SlideWithSize over(Expression size) { return new SlideWithSize(size); }
3.26
flink_YearMonthIntervalPeriodConverter_create_rdh
// Factory method // -------------------------------------------------------------------------------------------- public static YearMonthIntervalPeriodConverter create(DataType dataType) { return create(((YearMonthIntervalType) (dataType.getLogicalType()))); }
3.26
flink_RocksDBStateBackend_setNumberOfTransferingThreads_rdh
/** * * @deprecated Typo in method name. Use {@link #setNumberOfTransferThreads(int)} instead. */ @Deprecated public void setNumberOfTransferingThreads(int numberOfTransferingThreads) { setNumberOfTransferThreads(numberOfTransferingThreads); }
3.26
flink_RocksDBStateBackend_ensureRocksDBIsLoaded_rdh
// ------------------------------------------------------------------------ // static library loading utilities // ------------------------------------------------------------------------ @VisibleForTesting static void ensureRocksDBIsLoaded(String tempDirectory) throws IOException { EmbeddedRocksDBStateBackend.ensureRocksDBIsLoaded(tempDirectory); }
3.26
flink_RocksDBStateBackend_createOptionsAndResourceContainer_rdh
// ------------------------------------------------------------------------ // utilities // ------------------------------------------------------------------------ @VisibleForTesting RocksDBResourceContainer createOptionsAndResourceContainer() { return rocksDBStateBackend.createOptionsAndResourceContainer(null); }
3.26
flink_RocksDBStateBackend_getWriteBatchSize_rdh
/** * Gets the max batch size will be used in {@link RocksDBWriteBatchWrapper}. */ public long getWriteBatchSize() { return rocksDBStateBackend.getWriteBatchSize(); }
3.26
flink_RocksDBStateBackend_resolveCheckpoint_rdh
// Checkpoint initialization and persistent storage // ------------------------------------------------------------------------ @Override public CompletedCheckpointStorageLocation resolveCheckpoint(String pointer) throws IOException { return ((CheckpointStorage) (checkpointStreamBackend)).resolveCheckpoint(pointer); }
3.26
flink_RocksDBStateBackend_getDbStoragePaths_rdh
/** * Gets the configured local DB storage paths, or null, if none were configured. * * <p>Under these directories on the TaskManager, RocksDB stores its SST files and metadata * files. These directories do not need to be persistent, they can be ephermeral, meaning that * they are lost on a machine failure, because state in RocksDB is persisted in checkpoints. * * <p>If nothing is configured, these directories default to the TaskManager's local temporary * file directories. */ public String[] getDbStoragePaths() { return rocksDBStateBackend.getDbStoragePaths(); }
3.26
flink_RocksDBStateBackend_setRocksDBOptions_rdh
/** * Sets {@link org.rocksdb.Options} for the RocksDB instances. Because the options are not * serializable and hold native code references, they must be specified through a factory. * * <p>The options created by the factory here are applied on top of the pre-defined options * profile selected via {@link #setPredefinedOptions(PredefinedOptions)}. If the pre-defined * options profile is the default ({@link PredefinedOptions#DEFAULT}), then the factory fully * controls the RocksDB options. * * @param optionsFactory * The options factory that lazily creates the RocksDB options. */ public void setRocksDBOptions(RocksDBOptionsFactory optionsFactory) { rocksDBStateBackend.setRocksDBOptions(optionsFactory); }
3.26
flink_RocksDBStateBackend_getCheckpointBackend_rdh
// ------------------------------------------------------------------------ // State backend methods // ------------------------------------------------------------------------ /** * Gets the state backend that this RocksDB state backend uses to persist its bytes to. * * <p>This RocksDB state backend only implements the RocksDB specific parts, it relies on the * 'CheckpointBackend' to persist the checkpoint and savepoint bytes streams. */ public StateBackend getCheckpointBackend() { return checkpointStreamBackend; }
3.26
flink_RocksDBStateBackend_getNumberOfTransferThreads_rdh
/** * Gets the number of threads used to transfer files while snapshotting/restoring. */public int getNumberOfTransferThreads() { return rocksDBStateBackend.getNumberOfTransferThreads(); }
3.26
flink_RocksDBStateBackend_isIncrementalCheckpointsEnabled_rdh
/** * Gets whether incremental checkpoints are enabled for this state backend. */ public boolean isIncrementalCheckpointsEnabled() { return rocksDBStateBackend.isIncrementalCheckpointsEnabled(); }
3.26
flink_RocksDBStateBackend_createKeyedStateBackend_rdh
// ------------------------------------------------------------------------ // State holding data structures // ------------------------------------------------------------------------ @Override public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws IOException { return rocksDBStateBackend.createKeyedStateBackend(env, jobID, operatorIdentifier, keySerializer, numberOfKeyGroups, keyGroupRange, kvStateRegistry, ttlTimeProvider, metricGroup, stateHandles, cancelStreamRegistry); }
3.26
flink_RocksDBStateBackend_setDbStoragePath_rdh
// ------------------------------------------------------------------------ // Parameters // ------------------------------------------------------------------------ /** * Sets the path where the RocksDB local database files should be stored on the local file * system. Setting this path overrides the default behavior, where the files are stored across * the configured temp directories. * * <p>Passing {@code null} to this function restores the default behavior, where the configured * temp directories will be used. * * @param path * The path where the local RocksDB database files are stored. */ public void setDbStoragePath(String path) { setDbStoragePaths(path == null ? null : new String[]{ path }); }
3.26
flink_RocksDBStateBackend_setWriteBatchSize_rdh
/** * Sets the max batch size will be used in {@link RocksDBWriteBatchWrapper}, no positive value * will disable memory size controller, just use item count controller. * * @param writeBatchSize * The size will used to be used in {@link RocksDBWriteBatchWrapper}. */ public void setWriteBatchSize(long writeBatchSize) { rocksDBStateBackend.setWriteBatchSize(writeBatchSize); }
3.26
flink_RocksDBStateBackend_setNumberOfTransferThreads_rdh
/** * Sets the number of threads used to transfer files while snapshotting/restoring. * * @param numberOfTransferThreads * The number of threads used to transfer files while * snapshotting/restoring. */ public void setNumberOfTransferThreads(int numberOfTransferThreads) { rocksDBStateBackend.setNumberOfTransferThreads(numberOfTransferThreads); }
3.26
flink_RocksDBStateBackend_setDbStoragePaths_rdh
/** * Sets the directories in which the local RocksDB database puts its files (like SST and * metadata files). These directories do not need to be persistent, they can be ephemeral, * meaning that they are lost on a machine failure, because state in RocksDB is persisted in * checkpoints. * * <p>If nothing is configured, these directories default to the TaskManager's local temporary * file directories. * * <p>Each distinct state will be stored in one path, but when the state backend creates * multiple states, they will store their files on different paths. * * <p>Passing {@code null} to this function restores the default behavior, where the configured * temp directories will be used. * * @param paths * The paths across which the local RocksDB database files will be spread. */ public void setDbStoragePaths(String... paths) { rocksDBStateBackend.setDbStoragePaths(paths); }
3.26
flink_RocksDBStateBackend_getRocksDBOptions_rdh
/** * Gets {@link org.rocksdb.Options} for the RocksDB instances. * * <p>The options created by the factory here are applied on top of the pre-defined options * profile selected via {@link #setPredefinedOptions(PredefinedOptions)}. If the pre-defined * options profile is the default ({@link PredefinedOptions#DEFAULT}), then the factory fully * controls the RocksDB options. */ @Nullablepublic RocksDBOptionsFactory getRocksDBOptions() { return rocksDBStateBackend.getRocksDBOptions(); }
3.26
flink_RocksDBStateBackend_configure_rdh
// ------------------------------------------------------------------------ // Reconfiguration // ------------------------------------------------------------------------ /** * Creates a copy of this state backend that uses the values defined in the configuration for * fields where that were not yet specified in this state backend. * * @param config * The configuration. * @param classLoader * The class loader. * @return The re-configured variant of the state backend */ @Overridepublic RocksDBStateBackend configure(ReadableConfig config, ClassLoader classLoader) { return new RocksDBStateBackend(this, config, classLoader); }
3.26
flink_RocksDBStateBackend_setPredefinedOptions_rdh
// ------------------------------------------------------------------------ // Parametrize with RocksDB Options // ------------------------------------------------------------------------ /** * Sets the predefined options for RocksDB. * * <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through * flink-conf.yaml) or a user-defined options factory is set (via {@link #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on * top of the here specified predefined options and customized options. * * @param options * The options to set (must not be null). */ public void setPredefinedOptions(@Nonnull PredefinedOptions options) { rocksDBStateBackend.setPredefinedOptions(options); } /** * Gets the currently set predefined options for RocksDB. The default options (if nothing was * set via {@link #setPredefinedOptions(PredefinedOptions)}) are {@link PredefinedOptions#DEFAULT}. * * <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through * flink-conf.yaml) of a user-defined options factory is set (via {@link #setRocksDBOptions(RocksDBOptionsFactory)}
3.26
flink_RocksDBStateBackend_getNumberOfTransferingThreads_rdh
/** * * @deprecated Typo in method name. Use {@link #getNumberOfTransferThreads} instead. */@Deprecated public int getNumberOfTransferingThreads() { return getNumberOfTransferThreads(); }
3.26
flink_RocksDBStateBackend_setPriorityQueueStateType_rdh
/** * Sets the type of the priority queue state. It will fallback to the default value, if it is * not explicitly set. */ public void setPriorityQueueStateType(PriorityQueueStateType priorityQueueStateType) { rocksDBStateBackend.setPriorityQueueStateType(LegacyEnumBridge.convert(priorityQueueStateType)); }
3.26
flink_S3TestCredentials_credentialsAvailable_rdh
// ------------------------------------------------------------------------ /** * Checks whether S3 test credentials are available in the environment variables of this JVM. */private static boolean credentialsAvailable() {return (isNotEmpty(S3_TEST_BUCKET) && isNotEmpty(S3_TEST_ACCESS_KEY)) && isNotEmpty(S3_TEST_SECRET_KEY); }
3.26
flink_S3TestCredentials_assumeCredentialsAvailable_rdh
/** * Checks whether credentials are available in the environment variables of this JVM. If not, * throws an {@link AssumptionViolatedException} which causes JUnit tests to be skipped. */ public static void assumeCredentialsAvailable() { Assume.assumeTrue("No S3 credentials available in this test's environment", credentialsAvailable()); } /** * Gets the S3 Access Key. * * <p>This method throws an exception if the key is not available. Tests should use {@link #assumeCredentialsAvailable()}
3.26
flink_S3TestCredentials_getTestBucketUri_rdh
/** * Gets the URI for the path under which all tests should put their data. * * <p>This method throws an exception if the bucket was not configured. Tests should use {@link #assumeCredentialsAvailable()} to skip tests when credentials are not available. */ public static String getTestBucketUri() { return getTestBucketUriWithScheme("s3"); }
3.26
flink_S3TestCredentials_getS3SecretKey_rdh
/** * Gets the S3 Secret Key. * * <p>This method throws an exception if the key is not available. Tests should use {@link #assumeCredentialsAvailable()} to skip tests when credentials are not available. */ public static String getS3SecretKey() { if (S3_TEST_SECRET_KEY != null) { return S3_TEST_SECRET_KEY; } else { throw new IllegalStateException("S3 test secret key not available"); } }
3.26
flink_S3TestCredentials_getTestBucketUriWithScheme_rdh
/** * Gets the URI for the path under which all tests should put their data. * * <p>This method throws an exception if the bucket was not configured. Tests should use {@link #assumeCredentialsAvailable()} to skip tests when credentials are not available. */ public static String getTestBucketUriWithScheme(String scheme) { if (S3_TEST_BUCKET != null) { return ((scheme + "://") + S3_TEST_BUCKET) + "/temp/"; } else { throw new IllegalStateException("S3 test bucket not available"); } }
3.26
flink_NestedRowData_setNullAt_rdh
/** * See {@link BinaryRowData#setNullAt(int)}. */ @Override public void setNullAt(int i) { assertIndexIsValid(i); BinarySegmentUtils.bitSet(segments, offset, i + 8); BinarySegmentUtils.setLong(segments, getFieldOffset(i), 0); }
3.26
flink_HadoopDelegationTokenConverter_serialize_rdh
/** * Serializes delegation tokens. */ public static byte[] serialize(Credentials credentials) throws IOException { try (DataOutputBuffer dob = new DataOutputBuffer()) { credentials.writeTokenStorageToStream(dob); return dob.getData(); } }
3.26
flink_HadoopDelegationTokenConverter_deserialize_rdh
/** * Deserializes delegation tokens. */ public static Credentials deserialize(byte[] credentialsBytes) throws IOException { try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(credentialsBytes))) { Credentials credentials = new Credentials(); credentials.readTokenStorageStream(dis); return credentials; } }
3.26
flink_TypeTransformations_timeToSqlTypes_rdh
/** * Returns a type transformation that transforms data type to a new data type whose conversion * class is {@link java.sql.Timestamp}/{@link java.sql.Time}/{@link java.sql.Date} if the * original data type is TIMESTAMP/TIME/DATE. */ public static TypeTransformation timeToSqlTypes() { Map<LogicalTypeRoot, Class<?>> conversions = new HashMap<>();conversions.put(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE, Timestamp.class); conversions.put(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE, Time.class); conversions.put(LogicalTypeRoot.DATE, Date.class); return new DataTypeConversionClassTransformation(conversions); }
3.26
flink_TypeTransformations_toNullable_rdh
/** * Returns a type transformation that transforms data type to nullable data type but keeps other * information unchanged. */ public static TypeTransformation toNullable() { return DataType::nullable; }
3.26
flink_TypeTransformations_legacyToNonLegacy_rdh
/** * Returns a type transformation that transforms LEGACY(...) type to a non-legacy type. */ public static TypeTransformation legacyToNonLegacy() { return LegacyToNonLegacyTransformation.INSTANCE; }
3.26
flink_WindowWordCount_main_rdh
// ************************************************************************* // PROGRAM // ************************************************************************* public static void main(String[] args) throws Exception { final CLI params = CLI.fromArgs(args); // Create the execution environment. This is the main entrypoint // to building a Flink application. final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // Apache Flink’s unified approach to stream and batch processing means that a DataStream // application executed over bounded input will produce the same final results regardless // of the configured execution mode. It is important to note what final means here: a job // executing in STREAMING mode might produce incremental updates (think upserts in // a database) while a BATCH job would only produce one final result at the end. The final // result will be the same if interpreted correctly, but getting there can be different. // // The “classic” execution behavior of the DataStream API is called STREAMING execution // mode. Applications should use streaming execution for unbounded jobs that require // continuous incremental processing and are expected to stay online indefinitely. // // By enabling BATCH execution, we allow Flink to apply additional optimizations that we // can only do when we know that our input is bounded. For example, different // join/aggregation strategies can be used, in addition to a different shuffle // implementation that allows more efficient task scheduling and failure recovery behavior. // // By setting the runtime mode to AUTOMATIC, Flink will choose BATCH if all sources // are bounded and otherwise STREAMING. env.setRuntimeMode(params.getExecutionMode()); // This optional step makes the input parameters // available in the Flink UI. env.getConfig().setGlobalJobParameters(params); DataStream<String> text; if (params.getInputs().isPresent()) { // Create a new file source that will read files from a given set of directories. // Each file will be processed as plain text and split based on newlines. FileSource.FileSourceBuilder<String> builder = FileSource.forRecordStreamFormat(new TextLineInputFormat(), params.getInputs().get()); // If a discovery interval is provided, the source will // continuously watch the given directories for new files. params.getDiscoveryInterval().ifPresent(builder::monitorContinuously); text = env.fromSource(builder.build(), WatermarkStrategy.noWatermarks(), "file-input"); } else { text = env.fromData(WordCountData.WORDS).name("in-memory-input"); } int v4 = params.getInt("window").orElse(250); int slideSize = params.getInt("slide").orElse(150); // The text lines read from the source are split into words // using a user-defined function. The tokenizer, implemented below, // will output each words as a (2-tuple) containing (word, 1) DataStream<Tuple2<String, Integer>> v6 = // For each key, we perform a simple sum of the "1" field, the count. // If the input data set is bounded, sum will output a final count for // each word. If it is unbounded, it will continuously output updates // each time it sees a new instance of each word in the stream. // create windows of windowSize records slided every slideSize records // keyBy groups tuples based on the "0" field, the word. // Using a keyBy allows performing aggregations and other // stateful transformations over data on a per-key basis. // This is similar to a GROUP BY clause in a SQL query. text.flatMap(new WordCount.Tokenizer()).name("tokenizer").keyBy(value -> value.f0).countWindow(v4, slideSize).sum(1).name("counter"); if (params.getOutput().isPresent()) { // Given an output directory, Flink will write the results to a file // using a simple string encoding. In a production environment, this might // be something more structured like CSV, Avro, JSON, or Parquet. v6.sinkTo(FileSink.<Tuple2<String, Integer>>forRowFormat(params.getOutput().get(), new SimpleStringEncoder<>()).withRollingPolicy(DefaultRollingPolicy.builder().withMaxPartSize(MemorySize.ofMebiBytes(1)).withRolloverInterval(Duration.ofSeconds(10)).build()).build()).name("file-sink");} else { v6.print().name("print-sink"); } // Apache Flink applications are composed lazily. Calling execute // submits the Job and begins processing. env.execute("WindowWordCount");}
3.26
flink_ClassLogicalTypeConverter_getDefaultExternalClassForType_rdh
/** * Get internal(sql engine execution data formats) and default external class for {@link LogicalType}. */public class ClassLogicalTypeConverter { @Deprecated public static Class getDefaultExternalClassForType(LogicalType type) { return TypeConversions.fromLogicalToDataType(type).getConversionClass(); }
3.26
flink_DefaultDispatcherRunner_grantLeadership_rdh
// --------------------------------------------------------------- // Leader election // --------------------------------------------------------------- @Overridepublic void grantLeadership(UUID leaderSessionID) { runActionIfRunning(() -> { LOG.info("{} was granted leadership with leader id {}. Creating new {}.", getClass().getSimpleName(), leaderSessionID, DispatcherLeaderProcess.class.getSimpleName()); startNewDispatcherLeaderProcess(leaderSessionID); }); }
3.26
flink_MutableIOMetrics_addIOMetrics_rdh
/** * Adds the IO metrics for the given attempt to this object. If the {@link AccessExecution} is * in a terminal state the contained {@link IOMetrics} object is added. Otherwise the given * {@link MetricFetcher} is used to retrieve the required metrics. * * @param attempt * Attempt whose IO metrics should be added * @param fetcher * MetricFetcher to retrieve metrics for running jobs * @param jobID * JobID to which the attempt belongs * @param taskID * TaskID to which the attempt belongs */ public void addIOMetrics(AccessExecution attempt, @Nullable MetricFetcher fetcher, String jobID, String taskID) { if (attempt.getState().isTerminal()) { IOMetrics ioMetrics = attempt.getIOMetrics(); if (ioMetrics != null) {// execAttempt is already finished, use final metrics stored in // ExecutionGraph this.numBytesIn += ioMetrics.getNumBytesIn(); this.numBytesOut += ioMetrics.getNumBytesOut();this.numRecordsIn += ioMetrics.getNumRecordsIn(); this.numRecordsOut += ioMetrics.getNumRecordsOut(); this.accumulateBackPressuredTime += ioMetrics.getAccumulateBackPressuredTime(); this.accumulateIdleTime += ioMetrics.getAccumulateIdleTime(); if (Double.isNaN(ioMetrics.getAccumulateBusyTime())) { this.accumulateBusyTime = Double.NaN; } else { this.accumulateBusyTime += ioMetrics.getAccumulateBusyTime(); } } } else // execAttempt is still running, use MetricQueryService instead if (fetcher != null) { fetcher.update(); MetricStore.ComponentMetricStore metrics = fetcher.getMetricStore().getSubtaskAttemptMetricStore(jobID, taskID, attempt.getParallelSubtaskIndex(), attempt.getAttemptNumber()); if (metrics != null) { /** * We want to keep track of missing metrics to be able to make a difference * between 0 as a value and a missing value. In case a metric is missing for a * parallel instance of a task, we set the complete flag as false. */ if (metrics.getMetric(MetricNames.IO_NUM_BYTES_IN) == null) { this.numBytesInComplete = false; } else { this.numBytesIn += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_BYTES_IN)); } if (metrics.getMetric(MetricNames.IO_NUM_BYTES_OUT) == null) { this.numBytesOutComplete = false; } else { this.numBytesOut += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_BYTES_OUT)); } if (metrics.getMetric(MetricNames.IO_NUM_RECORDS_IN) == null) { this.numRecordsInComplete = false; } else { this.numRecordsIn += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_RECORDS_IN)); } if (metrics.getMetric(MetricNames.IO_NUM_RECORDS_OUT) == null) { this.numRecordsOutComplete = false; } else { this.numRecordsOut += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_RECORDS_OUT)); } if (metrics.getMetric(MetricNames.ACC_TASK_BACK_PRESSURED_TIME) != null) { this.accumulateBackPressuredTime += Long.parseLong(metrics.getMetric(MetricNames.ACC_TASK_BACK_PRESSURED_TIME)); } if (metrics.getMetric(MetricNames.ACC_TASK_IDLE_TIME) != null) { this.accumulateIdleTime += Long.parseLong(metrics.getMetric(MetricNames.ACC_TASK_IDLE_TIME)); } if (metrics.getMetric(MetricNames.ACC_TASK_BUSY_TIME) != null) { double busyTime = Double.parseDouble(metrics.getMetric(MetricNames.ACC_TASK_BUSY_TIME)); if (Double.isNaN(busyTime)) { this.accumulateBusyTime = Double.NaN; } else { this.accumulateBusyTime += busyTime; } } } else { this.numBytesInComplete = false; this.numBytesOutComplete = false; this.numRecordsInComplete = false; this.numRecordsOutComplete = false; } } }
3.26
flink_SourceFunctionProvider_of_rdh
/** * Helper method for creating a Source provider with a provided source parallelism. */ static SourceFunctionProvider of(SourceFunction<RowData> sourceFunction, boolean isBounded, @Nullable Integer sourceParallelism) { return new SourceFunctionProvider() { @Override public SourceFunction<RowData> createSourceFunction() { return sourceFunction; } @Override public boolean isBounded() { return isBounded; } @Override public Optional<Integer> m0() { return Optional.ofNullable(sourceParallelism); } }; }
3.26
flink_ReusingBlockResettableIterator_hasNext_rdh
// ------------------------------------------------------------------------ @Override public boolean hasNext() { try { if (this.nextElement == null) { if (this.readPhase) { // read phase, get next element from buffer T tmp = getNextRecord(this.reuseElement); if (tmp != null) { this.nextElement = tmp; return true; } else { return false; } } else if (this.input.hasNext()) { final T next = this.input.next(); if (writeNextRecord(next)) { this.nextElement = next; return true; } else { this.leftOverElement = next; return false; } } else { this.noMoreBlocks = true;return false; } } else { return true; } } catch (IOException ioex) { throw new RuntimeException("Error (de)serializing record in block resettable iterator.", ioex); } }
3.26
flink_SecurityUtils_install_rdh
/** * Installs a process-wide security configuration. * * <p>Applies the configuration using the available security modules (i.e. Hadoop, JAAS). */ public static void install(SecurityConfiguration config) throws Exception {// Install the security modules first before installing the security context installModules(config); installContext(config); }
3.26
flink_CsvRowSchemaConverter_convertType_rdh
/** * Convert {@link LogicalType} to {@link CsvSchema.ColumnType} based on Jackson's categories. */ private static ColumnType convertType(String fieldName, LogicalType type) { if (STRING_TYPE_ROOTS.contains(type.getTypeRoot())) { return ColumnType.STRING; } else if (NUMBER_TYPE_ROOTS.contains(type.getTypeRoot())) { return ColumnType.NUMBER; } else if (f0.contains(type.getTypeRoot())) { return ColumnType.BOOLEAN; } else if (type.getTypeRoot() == LogicalTypeRoot.ARRAY) { validateNestedField(fieldName, ((ArrayType) (type)).getElementType()); return ColumnType.ARRAY; } else if (type.getTypeRoot() == LogicalTypeRoot.ROW) { RowType rowType = ((RowType) (type)); for (LogicalType fieldType : rowType.getChildren()) { validateNestedField(fieldName, fieldType); } return ColumnType.ARRAY; } else { throw new IllegalArgumentException(((("Unsupported type '" + type.asSummaryString()) + "' for field '") + fieldName) + "'."); } }
3.26
flink_CsvRowSchemaConverter_convert_rdh
/** * Convert {@link RowTypeInfo} to {@link CsvSchema}. */ public static CsvSchema convert(RowTypeInfo rowType) { final Builder builder = new CsvSchema.Builder(); final String[] fields = rowType.getFieldNames(); final TypeInformation<?>[] types = rowType.getFieldTypes(); for (int i = 0; i < rowType.getArity(); i++) { builder.addColumn(new Column(i, fields[i], convertType(fields[i], types[i]))); } return builder.build(); } /** * Convert {@link RowType} to {@link CsvSchema}
3.26
flink_HadoopOutputCollector_collect_rdh
/** * Use the wrapped Flink collector to collect a key-value pair for Flink. * * @param key * the key to collect * @param val * the value to collect * @throws IOException * unexpected of key or value in key-value pair. */ @Override public void collect(final KEY key, final VALUE val) throws IOException { this.outTuple.f0 = key; this.outTuple.f1 = val; this.flinkCollector.collect(outTuple); }
3.26
flink_HadoopOutputCollector_setFlinkCollector_rdh
/** * Set the wrapped Flink collector. * * @param flinkCollector * The wrapped Flink OutputCollector. */ public void setFlinkCollector(Collector<Tuple2<KEY, VALUE>> flinkCollector) { this.flinkCollector = flinkCollector; }
3.26
flink_VertexInputInfoStore_put_rdh
/** * Put a {@link JobVertexInputInfo}. * * @param jobVertexId * the job vertex id * @param resultId * the intermediate result id * @param info * the {@link JobVertexInputInfo} to put */ public void put(JobVertexID jobVertexId, IntermediateDataSetID resultId, JobVertexInputInfo info) { checkNotNull(jobVertexId); checkNotNull(resultId); checkNotNull(info); jobVertexInputInfos.compute(jobVertexId, (ignored, inputInfos) -> { if (inputInfos == null) { inputInfos = new HashMap<>();} inputInfos.putIfAbsent(resultId, info); return inputInfos; }); } /** * Get a {@link JobVertexInputInfo}. * * @param jobVertexId * the job vertex id * @param resultId * the intermediate result id * @return the {@link JobVertexInputInfo}
3.26
flink_LogicalTypeJsonSerializer_serializeTime_rdh
// -------------------------------------------------------------------------------------------- // Helper methods for some complex types // -------------------------------------------------------------------------------------------- private void serializeTime(LogicalType timeType, JsonGenerator jsonGenerator) throws IOException { switch (timeType.getTypeRoot()) { case TIME_WITHOUT_TIME_ZONE : jsonGenerator.writeNumberField(FIELD_NAME_PRECISION, ((TimeType) (timeType)).getPrecision()); break; case TIMESTAMP_WITHOUT_TIME_ZONE : jsonGenerator.writeNumberField(FIELD_NAME_PRECISION, ((TimestampType) (timeType)).getPrecision()); break; case TIMESTAMP_WITH_TIME_ZONE : jsonGenerator.writeNumberField(FIELD_NAME_PRECISION, ((ZonedTimestampType) (timeType)).getPrecision()); break; case TIMESTAMP_WITH_LOCAL_TIME_ZONE : jsonGenerator.writeNumberField(FIELD_NAME_PRECISION, ((LocalZonedTimestampType) (timeType)).getPrecision()); break; default : throw new TableException("Time or time stamp type root expected."); } }
3.26
flink_SupportsFilterPushDown_of_rdh
/** * Constructs a filter push-down result. * * <p>See the documentation of {@link SupportsFilterPushDown} for more information. * * @param acceptedFilters * filters that are consumed by the source but may be applied on a * best effort basis * @param remainingFilters * filters that a subsequent filter operation still needs to perform * during runtime */ public static Result of(List<ResolvedExpression> acceptedFilters, List<ResolvedExpression> remainingFilters) { return new Result(acceptedFilters, remainingFilters); }
3.26
flink_CsvCommons_validateFormatOptions_rdh
// ------------------------------------------------------------------------ // Validation // ------------------------------------------------------------------------ static void validateFormatOptions(ReadableConfig tableOptions) { final boolean hasQuoteCharacter = tableOptions.getOptional(QUOTE_CHARACTER).isPresent(); final boolean isDisabledQuoteCharacter = tableOptions.get(DISABLE_QUOTE_CHARACTER); if (isDisabledQuoteCharacter && hasQuoteCharacter) { throw new ValidationException("Format cannot define a quote character and disabled quote character at the same time."); } // Validate the option value must be a single char. validateCharacterVal(tableOptions, FIELD_DELIMITER, true); validateCharacterVal(tableOptions, ARRAY_ELEMENT_DELIMITER); validateCharacterVal(tableOptions, QUOTE_CHARACTER); validateCharacterVal(tableOptions, ESCAPE_CHARACTER); }
3.26
flink_CsvCommons_validateCharacterVal_rdh
/** * Validates the option {@code option} value must be a Character. * * @param tableOptions * the table options * @param option * the config option * @param unescape * whether to unescape the option value */ private static void validateCharacterVal(ReadableConfig tableOptions, ConfigOption<String> option, boolean unescape) { if (!tableOptions.getOptional(option).isPresent()) { return; } final String value = (unescape) ? StringEscapeUtils.unescapeJava(tableOptions.get(option)) : tableOptions.get(option); if (value.length() != 1) { throw new ValidationException(String.format("Option '%s.%s' must be a string with single character, but was: %s", IDENTIFIER, option.key(), tableOptions.get(option))); } }
3.26
flink_Types_EITHER_rdh
/** * Returns type information for Flink's {@link org.apache.flink.types.Either} type. Null values * are not supported. * * <p>Either type can be used for a value of two possible types. * * <p>Example use: <code>Types.EITHER(Types.VOID, Types.INT)</code> * * @param leftType * type information of left side / {@link org.apache.flink.types.Either.Left} * @param rightType * type information of right side / {@link org.apache.flink.types.Either.Right} */ public static <L, R> TypeInformation<Either<L, R>> EITHER(TypeInformation<L> leftType, TypeInformation<R> rightType) { return new EitherTypeInfo<>(leftType, rightType); }
3.26
flink_Types_PRIMITIVE_ARRAY_rdh
/** * Returns type information for Java arrays of primitive type (such as <code>byte[]</code>). The * array must not be null. * * @param elementType * element type of the array (e.g. Types.BOOLEAN, Types.INT, Types.DOUBLE) */ public static TypeInformation<?> PRIMITIVE_ARRAY(TypeInformation<?> elementType) { if (elementType == BOOLEAN) { return PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == BYTE) { return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == SHORT) { return PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == INT) { return PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == LONG) { return PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == FLOAT) { return PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == DOUBLE) { return PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO; } else if (elementType == CHAR) { return PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO; } throw new IllegalArgumentException("Invalid element type for a primitive array."); }
3.26
flink_Types_TUPLE_rdh
/** * Returns type information for typed subclasses of Flink's {@link org.apache.flink.api.java.tuple.Tuple}. Typed subclassed are classes that extend {@link org.apache.flink.api.java.tuple.Tuple0} till {@link org.apache.flink.api.java.tuple.Tuple25} * to provide types for all fields and might add additional getters and setters for better * readability. Additional member fields must not be added. A tuple must not be null. * * <p>A tuple is a fixed-length composite type for storing multiple values in a deterministic * field order. Fields of a tuple are typed. Tuples are the most efficient composite type; a * tuple does not support null-valued fields unless the type of the field supports nullability. * * <p>The generic types for all fields of the tuple can be defined in a hierarchy of subclasses. * * <p>If Flink's type analyzer is unable to extract a tuple type information with type * information for all fields, an {@link org.apache.flink.api.common.functions.InvalidTypesException} is thrown. * * <p>Example use: * * <pre>{@code class MyTuple extends Tuple2<Integer, String> { * * public int getId() { return f0; } * * public String getName() { return f1; } * }} * * Types.TUPLE(MyTuple.class) * </pre> * * @param tupleSubclass * A subclass of {@link org.apache.flink.api.java.tuple.Tuple0} till {@link org.apache.flink.api.java.tuple.Tuple25} that defines all field types and does not add * any additional fields */ public static <T extends Tuple> TypeInformation<T> TUPLE(Class<T> tupleSubclass) { final TypeInformation<T> ti = TypeExtractor.createTypeInfo(tupleSubclass); if (ti instanceof TupleTypeInfo) { return ti; } throw new InvalidTypesException("Tuple type expected but was: " + ti); }
3.26
flink_Types_POJO_rdh
/** * Returns type information for a POJO (Plain Old Java Object) and allows to specify all fields * manually. * * <p>A type is considered a FLink POJO type, if it fulfills the conditions below. * * <ul> * <li>It is a public class, and standalone (not a non-static inner class) * <li>It has a public no-argument constructor. * <li>All non-static, non-transient fields in the class (and all superclasses) are either * public (and non-final) or have a public getter and a setter method that follows the * Java beans naming conventions for getters and setters. * <li>It is a fixed-length, null-aware composite type with non-deterministic field order. * Every field can be null independent of the field's type. * </ul> * * <p>The generic types for all fields of the POJO can be defined in a hierarchy of subclasses. * * <p>If Flink's type analyzer is unable to extract a POJO field, an {@link org.apache.flink.api.common.functions.InvalidTypesException} is thrown. * * <p><strong>Note:</strong> In most cases the type information of fields can be determined * automatically, we recommend to use {@link Types#POJO(Class)}. * * @param pojoClass * POJO class * @param fields * map of fields that map a name to type information. The map key is the name of * the field and the value is its type. */ public static <T> TypeInformation<T> POJO(Class<T> pojoClass, Map<String, TypeInformation<?>> fields) { final List<PojoField> pojoFields = new ArrayList<>(fields.size()); for (Map.Entry<String, TypeInformation<?>> field : fields.entrySet()) { final Field v4 = TypeExtractor.getDeclaredField(pojoClass, field.getKey()); if (v4 == null) { throw new InvalidTypesException(("Field '" + field.getKey()) + "' could not be accessed."); } pojoFields.add(new PojoField(v4, field.getValue())); } return new PojoTypeInfo<>(pojoClass, pojoFields); }
3.26
flink_Types_ROW_NAMED_rdh
/** * Returns type information for {@link org.apache.flink.types.Row} with fields of the given * types and with given names. A row must not be null. * * <p>A row is a fixed-length, null-aware composite type for storing multiple values in a * deterministic field order. Every field can be null independent of the field's type. The type * of row fields cannot be automatically inferred; therefore, it is required to provide type * information whenever a row is used. * * <p>The schema of rows can have up to <code>Integer.MAX_VALUE</code> fields, however, all row * instances must strictly adhere to the schema defined by the type info. * * <p>Example use: {@code ROW_NAMED(new String[]{"name", "number"}, Types.STRING, Types.INT)}. * * @param fieldNames * array of field names * @param types * array of field types */ public static TypeInformation<Row> ROW_NAMED(String[] fieldNames, TypeInformation<?>... types) { return new RowTypeInfo(types, fieldNames); }
3.26
flink_Types_OBJECT_ARRAY_rdh
/** * Returns type information for Java arrays of object types (such as <code>String[]</code>, * <code>Integer[]</code>). The array itself must not be null. Null values for elements are * supported. * * @param elementType * element type of the array */@SuppressWarnings("unchecked") public static <E> TypeInformation<E[]> OBJECT_ARRAY(TypeInformation<E> elementType) { if (elementType == Types.STRING) { return ((TypeInformation) (BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO)); } return ObjectArrayTypeInfo.getInfoFor(elementType); }
3.26
flink_Types_VALUE_rdh
/** * Returns type information for Flink value types (classes that implement {@link org.apache.flink.types.Value}). Built-in value types do not support null values (except for * {@link org.apache.flink.types.StringValue}). * * <p>Value types describe their serialization and deserialization manually. Instead of going * through a general purpose serialization framework. A value type is reasonable when general * purpose serialization would be highly inefficient. The wrapped value can be altered, allowing * programmers to reuse objects and take pressure off the garbage collector. * * <p>Flink provides built-in value types for all Java primitive types (such as {@link org.apache.flink.types.BooleanValue}, {@link org.apache.flink.types.IntValue}) as well as * {@link org.apache.flink.types.StringValue}, {@link org.apache.flink.types.NullValue}, {@link org.apache.flink.types.ListValue}, and {@link org.apache.flink.types.MapValue}. * * @param valueType * class that implements {@link org.apache.flink.types.Value} */ public static <V extends Value> TypeInformation<V> VALUE(Class<V> valueType) { return new ValueTypeInfo<>(valueType); }
3.26
flink_Types_MAP_rdh
/** * Returns type information for a Java {@link java.util.Map}. A map must not be null. Null * values in keys are not supported. An entry's value can be null. * * <p>By default, maps are untyped and treated as a generic type in Flink; therefore, it is * useful to pass type information whenever a map is used. * * <p><strong>Note:</strong> Flink does not preserve the concrete {@link Map} type. It converts * a map into {@link HashMap} when copying or deserializing. * * @param keyType * type information for the map's keys * @param valueType * type information for the map's values */ public static <K, V> TypeInformation<Map<K, V>> MAP(TypeInformation<K> keyType, TypeInformation<V> valueType) { return new MapTypeInfo<>(keyType, valueType); }
3.26
flink_Types_GENERIC_rdh
/** * Returns generic type information for any Java object. The serialization logic will use the * general purpose serializer Kryo. * * <p>Generic types are black-boxes for Flink, but allow any object and null values in fields. * * <p>By default, serialization of this type is not very efficient. Please read the * documentation about how to improve efficiency (namely by pre-registering classes). * * @param genericClass * any Java class */ public static <T> TypeInformation<T> GENERIC(Class<T> genericClass) { return new GenericTypeInfo<>(genericClass);}
3.26
flink_Types_ROW_rdh
// CHECKSTYLE.OFF: MethodName /** * Returns type information for {@link org.apache.flink.types.Row} with fields of the given * types. A row itself must not be null. * * <p>A row is a fixed-length, null-aware composite type for storing multiple values in a * deterministic field order. Every field can be null regardless of the field's type. The type * of row fields cannot be automatically inferred; therefore, it is required to provide type * information whenever a row is produced. * * <p>The schema of rows can have up to <code>Integer.MAX_VALUE</code> fields, however, all row * instances must strictly adhere to the schema defined by the type info. * * <p>This method generates type information with fields of the given types; the fields have the * default names (f0, f1, f2 ..). * * @param types * The types of the row fields, e.g., Types.STRING, Types.INT */public static TypeInformation<Row> ROW(TypeInformation<?>... types) { return new RowTypeInfo(types); }
3.26