name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_OutputFormatBase_postOpen_rdh
/** * Initialize the OutputFormat. This method is called at the end of {@link OutputFormatBase#open(int, int)}. */ protected void postOpen() { }
3.26
flink_TimestampedValue_getTimestamp_rdh
/** * * @return The timestamp associated with this stream value in milliseconds. */ public long getTimestamp() { if (hasTimestamp) { return timestamp; } else { throw new IllegalStateException("Record has no timestamp. Is the time characteristic set to 'ProcessingTime', or " + "did you forget to call 'DataStream.assignTimestampsAndWatermarks(...)'?"); } }
3.26
flink_TimestampedValue_getValue_rdh
/** * * @return The value wrapped in this {@link TimestampedValue}. */ public T getValue() { return value; }
3.26
flink_TimestampedValue_from_rdh
/** * Creates a TimestampedValue from given {@link StreamRecord}. * * @param streamRecord * The StreamRecord object from which TimestampedValue is to be created. */ public static <T> TimestampedValue<T> from(StreamRecord<T> streamRecord) { if (streamRecord.hasTimestamp()) { return new TimestampedValue<>(streamRecord.getValue(), streamRecord.getTimestamp()); } else { return new TimestampedValue<>(streamRecord.getValue()); } }
3.26
flink_TimestampedValue_hasTimestamp_rdh
/** * Checks whether this record has a timestamp. * * @return True if the record has a timestamp, false if not. */ public boolean hasTimestamp() { return hasTimestamp; }
3.26
flink_TimestampedValue_m0_rdh
/** * Creates a {@link StreamRecord} from this TimestampedValue. */ public StreamRecord<T> m0() { StreamRecord<T> streamRecord = new StreamRecord<>(value); if (hasTimestamp) { streamRecord.setTimestamp(timestamp); } return streamRecord; }
3.26
flink_CloseableRegistry_unregisterAndCloseAll_rdh
/** * Unregisters all given {@link Closeable} objects from this registry and closes all objects * that are were actually registered. Suppressed (and collects) all exceptions that happen * during closing and throws only when the all {@link Closeable} objects have been processed. * * @param toUnregisterAndClose * closables to unregister and close. * @throws IOException * collects all exceptions encountered during closing of the given objects. */ public void unregisterAndCloseAll(Closeable... toUnregisterAndClose) throws IOException { IOException suppressed = null; for (Closeable closeable : toUnregisterAndClose) { if (unregisterCloseable(closeable)) { try { closeable.close(); } catch (IOException ex) { suppressed = ExceptionUtils.firstOrSuppressed(ex, suppressed); } } } if (suppressed != null) { throw suppressed; } }
3.26
flink_CloseableRegistry_doClose_rdh
/** * This implementation doesn't imply any exception during closing due to backward compatibility. */ @Overridepublic void doClose(List<Closeable> toClose) throws IOException { IOUtils.closeAllQuietly(reverse(toClose)); }
3.26
flink_FileSourceSplitSerializer_getVersion_rdh
// ------------------------------------------------------------------------ @Override public int getVersion() { return VERSION; }
3.26
flink_SegmentsUtil_getFloat_rdh
/** * get float from segments. * * @param segments * target segments. * @param offset * value offset. */ public static float getFloat(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 4)) { return segments[0].getFloat(offset); } else { return getFloatMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_getByte_rdh
/** * get byte from segments. * * @param segments * target segments. * @param offset * value offset. */ public static byte getByte(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 1)) { return segments[0].get(offset); } else { return getByteMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_hashByWords_rdh
/** * hash segments to int, numBytes must be aligned to 4 bytes. * * @param segments * Source segments. * @param offset * Source segments offset. * @param numBytes * the number bytes to hash. */ public static int hashByWords(MemorySegment[] segments, int offset, int numBytes) { if (inFirstSegment(segments, offset, numBytes)) { return MurmurHashUtil.hashBytesByWords(segments[0], offset, numBytes); } else { return hashMultiSegByWords(segments, offset, numBytes); } }
3.26
flink_SegmentsUtil_getShort_rdh
/** * get short from segments. * * @param segments * target segments. * @param offset * value offset. */ public static short getShort(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 2)) { return segments[0].getShort(offset); } else { return getShortMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_copyFromBytes_rdh
/** * Copy target segments from source byte[]. * * @param segments * target segments. * @param offset * target segments offset. * @param bytes * source byte[]. * @param bytesOffset * source byte[] offset. * @param numBytes * the number bytes to copy. */ public static void copyFromBytes(MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) { if (segments.length == 1) { segments[0].put(offset, bytes, bytesOffset, numBytes); } else { copyMultiSegmentsFromBytes(segments, offset, bytes, bytesOffset, numBytes); } }
3.26
flink_SegmentsUtil_copyToView_rdh
/** * Copy bytes of segments to output view. Note: It just copies the data in, not include the * length. * * @param segments * source segments * @param offset * offset for segments * @param sizeInBytes * size in bytes * @param target * target output view */ public static void copyToView(MemorySegment[] segments, int offset, int sizeInBytes, DataOutputView target) throws IOException { for (MemorySegment sourceSegment : segments) { int curSegRemain = sourceSegment.size() - offset; if (curSegRemain > 0) { int copySize = Math.min(curSegRemain, sizeInBytes); byte[] bytes = allocateReuseBytes(copySize); sourceSegment.get(offset, bytes, 0, copySize);target.write(bytes, 0, copySize); sizeInBytes -= copySize; offset = 0; } else { offset -= sourceSegment.size(); } if (sizeInBytes == 0) { return; } } if (sizeInBytes != 0) { throw new RuntimeException(("No copy finished, this should be a bug, " + "The remaining length is: ") + sizeInBytes); } }
3.26
flink_SegmentsUtil_setDouble_rdh
/** * set double from segments. * * @param segments * target segments. * @param offset * value offset. */ public static void setDouble(MemorySegment[] segments, int offset, double value) { if (inFirstSegment(segments, offset, 8)) { segments[0].putDouble(offset, value);} else { setDoubleMultiSegments(segments, offset, value);} }
3.26
flink_SegmentsUtil_getBytes_rdh
/** * Maybe not copied, if want copy, please use copyTo. */ public static byte[] getBytes(MemorySegment[] segments, int baseOffset, int sizeInBytes) { // avoid copy if `base` is `byte[]` if (segments.length == 1) {byte[] heapMemory = segments[0].getHeapMemory(); if (((baseOffset == 0) && (heapMemory != null)) && (heapMemory.length == sizeInBytes)) { return heapMemory; } else { byte[] bytes = new byte[sizeInBytes]; segments[0].get(baseOffset, bytes, 0, sizeInBytes); return bytes; } } else { byte[] bytes = new byte[sizeInBytes]; copyMultiSegmentsToBytes(segments, baseOffset, bytes, 0, sizeInBytes); return bytes; } }
3.26
flink_SegmentsUtil_find_rdh
/** * Find equal segments2 in segments1. * * @param segments1 * segs to find. * @param segments2 * sub segs. * @return Return the found offset, return -1 if not find. */ public static int find(MemorySegment[] segments1, int offset1, int numBytes1, MemorySegment[] segments2, int offset2, int numBytes2) { if (numBytes2 == 0) { // quick way 1. return offset1; } if (inFirstSegment(segments1, offset1, numBytes1) && inFirstSegment(segments2, offset2, numBytes2)) { byte first = segments2[0].get(offset2); int end = (numBytes1 - numBytes2) + offset1; for (int i = offset1; i <= end; i++) { // quick way 2: equal first byte. if ((segments1[0].get(i) == first) && segments1[0].equalTo(segments2[0], i, offset2, numBytes2)) { return i; } } return -1; } else { return findInMultiSegments(segments1, offset1, numBytes1, segments2, offset2, numBytes2); } }
3.26
flink_SegmentsUtil_getInt_rdh
/** * get int from segments. * * @param segments * target segments. * @param offset * value offset. */ public static int getInt(MemorySegment[] segments, int offset) {if (inFirstSegment(segments, offset, 4)) { return segments[0].getInt(offset); } else { return getIntMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_bitSet_rdh
/** * set bit from segments. * * @param segments * target segments. * @param baseOffset * bits base offset. * @param index * bit index from base offset. */ public static void bitSet(MemorySegment[] segments, int baseOffset, int index) { if (segments.length == 1) { int offset = baseOffset + byteIndex(index); MemorySegment segment = segments[0]; byte current = segment.get(offset); current |= 1 << (index & BIT_BYTE_INDEX_MASK); segment.put(offset, current); } else { bitSetMultiSegments(segments, baseOffset, index);} }
3.26
flink_SegmentsUtil_copyToUnsafe_rdh
/** * Copy segments to target unsafe pointer. * * @param segments * Source segments. * @param offset * The position where the bytes are started to be read from these memory segments. * @param target * The unsafe memory to copy the bytes to. * @param pointer * The position in the target unsafe memory to copy the chunk to. * @param numBytes * the number bytes to copy. */ public static void copyToUnsafe(MemorySegment[] segments, int offset, Object target, int pointer, int numBytes) {if (inFirstSegment(segments, offset, numBytes)) { segments[0].copyToUnsafe(offset, target, pointer, numBytes); } else { copyMultiSegmentsToUnsafe(segments, offset, target, pointer, numBytes); } }
3.26
flink_SegmentsUtil_copyToBytes_rdh
/** * Copy segments to target byte[]. * * @param segments * Source segments. * @param offset * Source segments offset. * @param bytes * target byte[]. * @param bytesOffset * target byte[] offset. * @param numBytes * the number bytes to copy. */ public static byte[] copyToBytes(MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) { if (inFirstSegment(segments, offset, numBytes)) { segments[0].get(offset, bytes, bytesOffset, numBytes); } else { copyMultiSegmentsToBytes(segments, offset, bytes, bytesOffset, numBytes); } return bytes; }
3.26
flink_SegmentsUtil_setByte_rdh
/** * set byte from segments. * * @param segments * target segments. * @param offset * value offset. */ public static void setByte(MemorySegment[] segments, int offset, byte value) { if (inFirstSegment(segments, offset, 1)) { segments[0].put(offset, value); } else { setByteMultiSegments(segments, offset, value); } }
3.26
flink_SegmentsUtil_bitGet_rdh
/** * read bit from segments. * * @param segments * target segments. * @param baseOffset * bits base offset. * @param index * bit index from base offset. */public static boolean bitGet(MemorySegment[] segments, int baseOffset, int index) { int offset = baseOffset + byteIndex(index); byte current = getByte(segments, offset); return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0; }
3.26
flink_SegmentsUtil_setFloat_rdh
/** * set float from segments. * * @param segments * target segments. * @param offset * value offset. */ public static void setFloat(MemorySegment[] segments, int offset, float value) { if (inFirstSegment(segments, offset, 4)) { segments[0].putFloat(offset, value); } else {setFloatMultiSegments(segments, offset, value); } }
3.26
flink_SegmentsUtil_hash_rdh
/** * hash segments to int. * * @param segments * Source segments. * @param offset * Source segments offset. * @param numBytes * the number bytes to hash. */ public static int hash(MemorySegment[] segments, int offset, int numBytes) { if (inFirstSegment(segments, offset, numBytes)) {return MurmurHashUtil.hashBytes(segments[0], offset, numBytes); } else { return hashMultiSeg(segments, offset, numBytes); } }
3.26
flink_SegmentsUtil_getLong_rdh
/** * get long from segments. * * @param segments * target segments. * @param offset * value offset. */ public static long getLong(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 8)) { return segments[0].getLong(offset); } else { return getLongMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_m0_rdh
/** * Equals two memory segments regions. * * @param segments1 * Segments 1 * @param offset1 * Offset of segments1 to start equaling * @param segments2 * Segments 2 * @param offset2 * Offset of segments2 to start equaling * @param len * Length of the equaled memory region * @return true if equal, false otherwise */ public static boolean m0(MemorySegment[] segments1, int offset1, MemorySegment[] segments2, int offset2, int len) { if (inFirstSegment(segments1, offset1, len) && inFirstSegment(segments2, offset2, len)) { return segments1[0].equalTo(segments2[0], offset1, offset2, len); } else { return equalsMultiSegments(segments1, offset1, segments2, offset2, len); } }
3.26
flink_SegmentsUtil_getBoolean_rdh
/** * get boolean from segments. * * @param segments * target segments. * @param offset * value offset. */ public static boolean getBoolean(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 1)) { return segments[0].getBoolean(offset); } else { return getBooleanMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_getDouble_rdh
/** * get double from segments. * * @param segments * target segments. * @param offset * value offset. */public static double getDouble(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 8)) { return segments[0].getDouble(offset); } else { return getDoubleMultiSegments(segments, offset); } }
3.26
flink_SegmentsUtil_setShort_rdh
/** * set short from segments. * * @param segments * target segments. * @param offset * value offset. */ public static void setShort(MemorySegment[] segments, int offset, short value) { if (inFirstSegment(segments, offset, 2)) { segments[0].putShort(offset, value); } else { setShortMultiSegments(segments, offset, value); } }
3.26
flink_SegmentsUtil_m1_rdh
/** * read bit. * * @param segment * target segment. * @param baseOffset * bits base offset. * @param index * bit index from base offset. */ public static boolean m1(MemorySegment segment, int baseOffset, int index) { int offset = baseOffset + byteIndex(index); byte current = segment.get(offset); return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0; }
3.26
flink_SegmentsUtil_allocateReuseBytes_rdh
/** * Allocate bytes that is only for temporary usage, it should not be stored in somewhere else. * Use a {@link ThreadLocal} to reuse bytes to avoid overhead of byte[] new and gc. * * <p>If there are methods that can only accept a byte[], instead of a MemorySegment[] * parameter, we can allocate a reuse bytes and copy the MemorySegment data to byte[], then call * the method. Such as String deserialization. */ public static byte[] allocateReuseBytes(int length) { byte[] bytes = BYTES_LOCAL.get(); if (bytes == null) { if (length <= MAX_BYTES_LENGTH) { bytes = new byte[MAX_BYTES_LENGTH]; BYTES_LOCAL.set(bytes); } else { bytes = new byte[length]; } } else if (bytes.length < length) { bytes = new byte[length]; } return bytes; }
3.26
flink_SegmentsUtil_setLong_rdh
/** * set long from segments. * * @param segments * target segments. * @param offset * value offset. */public static void setLong(MemorySegment[] segments, int offset, long value) { if (inFirstSegment(segments, offset, 8)) { segments[0].putLong(offset, value); } else { setLongMultiSegments(segments, offset, value); }}
3.26
flink_SegmentsUtil_setInt_rdh
/** * set int from segments. * * @param segments * target segments. * @param offset * value offset. */ public static void setInt(MemorySegment[] segments, int offset, int value) { if (inFirstSegment(segments, offset, 4)) { segments[0].putInt(offset, value); } else { setIntMultiSegments(segments, offset, value); } }
3.26
flink_SegmentsUtil_inFirstSegment_rdh
/** * Is it just in first MemorySegment, we use quick way to do something. */private static boolean inFirstSegment(MemorySegment[] segments, int offset, int numBytes) { return (numBytes + offset) <= segments[0].size(); }
3.26
flink_SegmentsUtil_bitUnSet_rdh
/** * unset bit from segments. * * @param segments * target segments. * @param baseOffset * bits base offset. * @param index * bit index from base offset. */ public static void bitUnSet(MemorySegment[] segments, int baseOffset, int index) {if (segments.length == 1) { MemorySegment segment = segments[0]; int offset = baseOffset + byteIndex(index); byte current = segment.get(offset); current &= ~(1 << (index & BIT_BYTE_INDEX_MASK)); segment.put(offset, current); } else { bitUnSetMultiSegments(segments, baseOffset, index); } }
3.26
flink_SegmentsUtil_setBoolean_rdh
/** * set boolean from segments. * * @param segments * target segments. * @param offset * value offset. */ public static void setBoolean(MemorySegment[] segments, int offset, boolean value) { if (inFirstSegment(segments, offset, 1)) { segments[0].putBoolean(offset, value); } else { setBooleanMultiSegments(segments, offset, value); } }
3.26
flink_PartitionOperator_getCustomPartitioner_rdh
// -------------------------------------------------------------------------------------------- // Properties // -------------------------------------------------------------------------------------------- /** * Gets the custom partitioner from this partitioning. * * @return The custom partitioner. */ @Internalpublic Partitioner<?> getCustomPartitioner() { return customPartitioner; }
3.26
flink_PartitionOperator_translateToDataFlow_rdh
// -------------------------------------------------------------------------------------------- // Translation // -------------------------------------------------------------------------------------------- protected SingleInputOperator<?, T, ?> translateToDataFlow(Operator<T> input) { String name = "Partition at " + partitionLocationName; // distinguish between partition types if (pMethod == PartitionMethod.REBALANCE) { UnaryOperatorInformation<T, T> operatorInfo = new UnaryOperatorInformation<>(getType(), getType()); PartitionOperatorBase<T> rebalancedInput = new PartitionOperatorBase<>(operatorInfo, pMethod, name); rebalancedInput.setInput(input); rebalancedInput.setParallelism(getParallelism()); return rebalancedInput; } else if (((pMethod == PartitionMethod.HASH) || (pMethod == PartitionMethod.CUSTOM)) || (pMethod == PartitionMethod.RANGE)) { if (pKeys instanceof Keys.ExpressionKeys) { int[] logicalKeyPositions = pKeys.computeLogicalKeyPositions(); UnaryOperatorInformation<T, T> operatorInfo = new UnaryOperatorInformation<>(getType(), getType()); PartitionOperatorBase<T> v5 = new PartitionOperatorBase<>(operatorInfo, pMethod, logicalKeyPositions, name); v5.setInput(input); v5.setParallelism(getParallelism()); v5.setDistribution(f0); v5.setCustomPartitioner(customPartitioner);v5.setOrdering(computeOrdering(pKeys, orders)); return v5; } else if (pKeys instanceof Keys.SelectorFunctionKeys) { @SuppressWarnings("unchecked") Keys.SelectorFunctionKeys<T, ?> selectorKeys = ((Keys.SelectorFunctionKeys<T, ?>) (pKeys)); return translateSelectorFunctionPartitioner(selectorKeys, pMethod, name, input, getParallelism(), customPartitioner, orders); } else { throw new UnsupportedOperationException("Unrecognized key type."); } } else { throw new UnsupportedOperationException("Unsupported partitioning method: " + pMethod.name()); } }
3.26
flink_PartitionOperator_withOrders_rdh
/** * Sets the order of keys for range partitioning. NOTE: Only valid for {@link PartitionMethod#RANGE}. * * @param orders * array of orders for each specified partition key * @return The partitioneOperator with properly set orders for given keys */ @PublicEvolving public PartitionOperator<T> withOrders(Order... orders) { Preconditions.checkState(pMethod == PartitionMethod.RANGE, "Orders cannot be applied for %s partition " + "method", pMethod); Preconditions.checkArgument(pKeys.getOriginalKeyFieldTypes().length == orders.length, "The number of key " + "fields and orders should be the same.");this.orders = orders; return this;}
3.26
flink_SkipListKeyComparator_compareNamespaceAndNode_rdh
/** * Compares the namespace in the memory segment with the namespace in the node . Returns a * negative integer, zero, or a positive integer as the first node is less than, equal to, or * greater than the second. * * @param namespaceSegment * memory segment to store the namespace. * @param namespaceOffset * offset of namespace in the memory segment. * @param namespaceLen * length of namespace. * @param nodeSegment * memory segment to store the node key. * @param nodeKeyOffset * offset of node key in the memory segment. * @return An integer result of the comparison. */static int compareNamespaceAndNode(MemorySegment namespaceSegment, int namespaceOffset, int namespaceLen, MemorySegment nodeSegment, int nodeKeyOffset) { int nodeNamespaceLen = nodeSegment.getInt(nodeKeyOffset); return namespaceSegment.compare(nodeSegment, namespaceOffset, nodeKeyOffset + Integer.BYTES, namespaceLen, nodeNamespaceLen); }
3.26
flink_SkipListKeyComparator_compareTo_rdh
/** * Compares for order. Returns a negative integer, zero, or a positive integer as the first node * is less than, equal to, or greater than the second. * * @param left * left skip list key's ByteBuffer * @param leftOffset * left skip list key's ByteBuffer's offset * @param right * right skip list key's ByteBuffer * @param rightOffset * right skip list key's ByteBuffer's offset * @return An integer result of the comparison. */ static int compareTo(MemorySegment left, int leftOffset, MemorySegment right, int rightOffset) { // compare namespace int leftNamespaceLen = left.getInt(leftOffset); int rightNamespaceLen = right.getInt(rightOffset); int c = left.compare(right, leftOffset + Integer.BYTES, rightOffset + Integer.BYTES, leftNamespaceLen, rightNamespaceLen); if (c != 0) { return c; } // compare key int leftKeyOffset = (leftOffset + Integer.BYTES) + leftNamespaceLen; int rightKeyOffset = (rightOffset + Integer.BYTES) + rightNamespaceLen; int leftKeyLen = left.getInt(leftKeyOffset); int rightKeyLen = right.getInt(rightKeyOffset); return left.compare(right, leftKeyOffset + Integer.BYTES, rightKeyOffset + Integer.BYTES, leftKeyLen, rightKeyLen); }
3.26
flink_RocksDBProperty_getConfigKey_rdh
/** * * @return key for enabling metric using {@link org.apache.flink.configuration.Configuration}. */ public String getConfigKey() { return String.format(CONFIG_KEY_FORMAT, property); }
3.26
flink_RocksDBProperty_getRocksDBProperty_rdh
/** * * @return property string that can be used to query {@link RocksDB#getLongProperty(ColumnFamilyHandle, String)}. */ public String getRocksDBProperty() { return String.format(ROCKS_DB_PROPERTY_FORMAT, property); }
3.26
flink_BinaryInputFormat_getCurrentState_rdh
// -------------------------------------------------------------------------------------------- // Checkpointing // -------------------------------------------------------------------------------------------- @PublicEvolving @Override public Tuple2<Long, Long> getCurrentState() throws IOException { if (this.blockBasedInput == null) { throw new RuntimeException("You must have forgotten to call open() on your input format."); } return // the last read index in the block // the number of records read new Tuple2<>(this.blockBasedInput.getCurrBlockPos(), this.readRecords); }
3.26
flink_BinaryInputFormat_createStatistics_rdh
/** * Fill in the statistics. The last modification time and the total input size are prefilled. * * @param files * The files that are associated with this block input format. * @param stats * The pre-filled statistics. */ protected SequentialStatistics createStatistics(List<FileStatus> files, FileBaseStatistics stats) throws IOException { if (files.isEmpty()) { return null; } BlockInfo blockInfo = new BlockInfo(); long totalCount = 0; for (FileStatus file : files) { // invalid file if (file.getLen() < blockInfo.getInfoSize()) { continue; } FileSystem fs = file.getPath().getFileSystem(); try (FSDataInputStream fdis = fs.open(file.getPath(), blockInfo.getInfoSize())) { fdis.seek(file.getLen() - blockInfo.getInfoSize()); blockInfo.read(new DataInputViewStreamWrapper(fdis)); totalCount += blockInfo.getAccumulatedRecordCount(); } } final float avgWidth = (totalCount == 0) ? 0 : ((float) (stats.getTotalInputSize())) / totalCount; return new SequentialStatistics(stats.getLastModificationTime(), stats.getTotalInputSize(), avgWidth, totalCount); }
3.26
flink_ReusingBuildFirstReOpenableHashJoinIterator_reopenProbe_rdh
/** * Set new input for probe side * * @throws IOException */ public void reopenProbe(MutableObjectIterator<V2> probeInput) throws IOException { reopenHashTable.reopenProbe(probeInput); }
3.26
flink_TimeWindow_getEnd_rdh
/** * Gets the end timestamp of this window. The end timestamp is exclusive, meaning it is the * first timestamp that does not belong to this window any more. * * @return The exclusive end timestamp of this window. */ public long getEnd() { return f0; }
3.26
flink_TimeWindow_snapshotConfiguration_rdh
// ------------------------------------------------------------------------ @Override public TypeSerializerSnapshot<TimeWindow> snapshotConfiguration() { return new TimeWindow.Serializer.TimeWindowSerializerSnapshot(); }
3.26
flink_TimeWindow_modInverse_rdh
/** * Compute the inverse of (odd) x mod 2^32. */ private int modInverse(int x) { // Cube gives inverse mod 2^4, as x^4 == 1 (mod 2^4) for all odd x. int v1 = (x * x) * x; // Newton iteration doubles correct bits at each step. v1 *= 2 - (x * v1); v1 *= 2 - (x * v1); v1 *= 2 - (x * v1); return v1;}
3.26
flink_TimeWindow_intersects_rdh
/** * Returns {@code true} if this window intersects the given window. */ public boolean intersects(TimeWindow other) { return (this.start <= other.f0) && (this.f0 >= other.start); }
3.26
flink_TimeWindow_maxTimestamp_rdh
/** * Gets the largest timestamp that still belongs to this window. * * <p>This timestamp is identical to {@code getEnd() - 1}. * * @return The largest timestamp that still belongs to this window. * @see #getEnd() */ @Override public long maxTimestamp() { return f0 - 1; }
3.26
flink_TimeWindow_cover_rdh
/** * Returns the minimal window covers both this window and the given window. */ public TimeWindow cover(TimeWindow other) { return new TimeWindow(Math.min(start, other.start), Math.max(f0, other.f0)); }
3.26
flink_TimeWindow_m0_rdh
/** * Gets the starting timestamp of the window. This is the first timestamp that belongs to this * window. * * @return The starting timestamp of this window. */public long m0() { return start; }
3.26
flink_OperatorTransformation_bootstrapWith_rdh
/** * Create a new {@link OneInputStateTransformation} from a {@link DataStream}. * * @param stream * A data stream of elements. * @param <T> * The type of the input. * @return A {@link OneInputStateTransformation}. */ public static <T> OneInputStateTransformation<T> bootstrapWith(DataStream<T> stream) { return new OneInputStateTransformation<>(stream); }
3.26
flink_CumulativeWindowAssigner_of_rdh
// ------------------------------------------------------------------------ // Utilities // ------------------------------------------------------------------------ /** * Creates a new {@link CumulativeWindowAssigner} that assigns elements to cumulative time * windows based on the element timestamp. * * @param maxSize * The max size of the generated windows. * @param step * The step interval for window size to increase of the generated windows. * @return The time policy. */public static CumulativeWindowAssigner of(Duration maxSize, Duration step) { return new CumulativeWindowAssigner(maxSize.toMillis(), step.toMillis(), 0, true); }
3.26
flink_ReflectionUtil_getFullTemplateType_rdh
/** * Extract the full type information from the given type. * * @param type * to be analyzed * @return Full type information describing the given type */ public static FullTypeInfo getFullTemplateType(Type type) { if (type instanceof ParameterizedType) { ParameterizedType parameterizedType = ((ParameterizedType) (type)); FullTypeInfo[] templateTypeInfos = new FullTypeInfo[parameterizedType.getActualTypeArguments().length]; for (int i = 0; i < parameterizedType.getActualTypeArguments().length; i++) { templateTypeInfos[i] = getFullTemplateType(parameterizedType.getActualTypeArguments()[i]); } return new FullTypeInfo(((Class<?>) (parameterizedType.getRawType())), templateTypeInfos); } else { return new FullTypeInfo(((Class<?>) (type)), null); } }
3.26
flink_MessageParameters_resolveUrl_rdh
/** * Resolves the given URL (e.g "jobs/:jobid") using the given path/query parameters. * * <p>This method will fail with an {@link IllegalStateException} if any mandatory parameter was * not resolved. * * <p>Unresolved optional parameters will be ignored. * * @param genericUrl * URL to resolve * @param parameters * message parameters parameters * @return resolved url, e.g "/jobs/1234?state=running" * @throws IllegalStateException * if any mandatory parameter was not resolved */ public static String resolveUrl(String genericUrl, MessageParameters parameters) { Preconditions.checkState(parameters.isResolved(), "Not all mandatory message parameters were resolved."); StringBuilder path = new StringBuilder(genericUrl); StringBuilder v1 = new StringBuilder(); for (MessageParameter<?> pathParameter : parameters.getPathParameters()) { if (pathParameter.isResolved()) { int start = path.indexOf(':' + pathParameter.getKey()); final String pathValue = Preconditions.checkNotNull(pathParameter.getValueAsString()); // only replace path parameters if they are present if (start != (-1)) { path.replace(start, (start + pathParameter.getKey().length()) + 1, pathValue); } } } boolean isFirstQueryParameter = true; for (MessageQueryParameter<?> queryParameter : parameters.getQueryParameters()) { if (queryParameter.isResolved()) { if (isFirstQueryParameter) { v1.append('?'); isFirstQueryParameter = false; } else { v1.append('&'); } v1.append(queryParameter.getKey()); v1.append('='); v1.append(queryParameter.getValueAsString()); } }path.append(v1); return path.toString(); }
3.26
flink_MessageParameters_isResolved_rdh
/** * Returns whether all mandatory parameters have been resolved. * * @return true, if all mandatory parameters have been resolved, false otherwise */ public final boolean isResolved() { return getPathParameters().stream().filter(MessageParameter::isMandatory).allMatch(MessageParameter::isResolved) && getQueryParameters().stream().filter(MessageParameter::isMandatory).allMatch(MessageParameter::isResolved); }
3.26
flink_StreamTableEnvironment_create_rdh
/** * Creates a table environment that is the entry point and central context for creating Table * and SQL API programs that integrate with the Java-specific {@link DataStream} API. * * <p>It is unified for bounded and unbounded data processing. * * <p>A stream table environment is responsible for: * * <ul> * <li>Convert a {@link DataStream} into {@link Table} and vice-versa. * <li>Connecting to external systems. * <li>Registering and retrieving {@link Table}s and other meta objects from a catalog. * <li>Executing SQL statements. * <li>Offering further configuration options. * </ul> * * <p>Note: If you don't intend to use the {@link DataStream} API, {@link TableEnvironment} is * meant for pure table programs. * * @param executionEnvironment * The Java {@link StreamExecutionEnvironment} of the {@link TableEnvironment}. * @param settings * The environment settings used to instantiate the {@link TableEnvironment}. */ static StreamTableEnvironment create(StreamExecutionEnvironment executionEnvironment, EnvironmentSettings settings) { return StreamTableEnvironmentImpl.create(executionEnvironment, settings); } /** * Registers a {@link TableFunction} under a unique name in the TableEnvironment's catalog. * Registered functions can be referenced in Table API and SQL queries. * * @param name * The name under which the function is registered. * @param tableFunction * The TableFunction to register. * @param <T> * The type of the output row. * @deprecated Use {@link #createTemporarySystemFunction(String, UserDefinedFunction)} instead. Please note that the new method also uses the new type system and reflective extraction logic. It might be necessary to update the function implementation as well. See the documentation of {@link TableFunction}
3.26
flink_SqlNodeConverters_convertSqlNode_rdh
/** * Convert the given validated SqlNode into Operation if there is a registered converter for the * node. */ @SuppressWarnings({ "unchecked", "rawtypes" }) public static Optional<Operation> convertSqlNode(SqlNode validatedSqlNode, ConvertContext context) { // match by class first SqlNodeConverter classConverter = CLASS_CONVERTERS.get(validatedSqlNode.getClass()); if (classConverter != null) { return Optional.of(classConverter.convertSqlNode(validatedSqlNode, context)); } // match by kind if no matching items in class converters SqlNodeConverter sqlKindConverter = f0.get(validatedSqlNode.getKind()); if (sqlKindConverter != null) { return Optional.of(sqlKindConverter.convertSqlNode(validatedSqlNode, context)); } else { return Optional.empty(); } }
3.26
flink_FileChannelMemoryMappedBoundedData_createWithRegionSize_rdh
/** * Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given * path. Each mapped region (= ByteBuffer) will be of the given size. */ public static FileChannelMemoryMappedBoundedData createWithRegionSize(Path memMappedFilePath, int regionSize) throws IOException { checkNotNull(memMappedFilePath, "memMappedFilePath"); checkArgument(regionSize > 0, "regions size most be > 0"); final FileChannel fileChannel = FileChannel.open(memMappedFilePath, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); return new FileChannelMemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize); }
3.26
flink_FileChannelMemoryMappedBoundedData_finishWrite_rdh
/** * Finishes the current region and prevents further writes. After calling this method, further * calls to {@link #writeBuffer(Buffer)} will fail. */ @Override public void finishWrite() throws IOException { m0(); fileChannel.close(); }
3.26
flink_FileChannelMemoryMappedBoundedData_create_rdh
// ------------------------------------------------------------------------ // Factories // ------------------------------------------------------------------------ /** * Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given * path. */ public static FileChannelMemoryMappedBoundedData create(Path memMappedFilePath) throws IOException { return createWithRegionSize(memMappedFilePath, Integer.MAX_VALUE); }
3.26
flink_FileChannelMemoryMappedBoundedData_close_rdh
/** * Closes the file and unmaps all memory mapped regions. After calling this method, access to * any ByteBuffer obtained from this instance will cause a segmentation fault. */ public void close() throws IOException { IOUtils.closeQuietly(fileChannel); for (ByteBuffer bb : memoryMappedRegions) { PlatformDependent.freeDirectBuffer(bb); } memoryMappedRegions.clear(); // To make this compatible with all versions of Windows, we must wait with // deleting the file until it is unmapped. // See also // https://stackoverflow.com/questions/11099295/file-flag-delete-on-close-and-memory-mapped-files/51649618#51649618 Files.delete(filePath); }
3.26
flink_SqlLikeUtils_sqlToRegexLike_rdh
/** * Translates a SQL LIKE pattern to Java regex pattern. */ static String sqlToRegexLike(String sqlPattern, char escapeChar) { int i; final int v9 = sqlPattern.length(); final StringBuilder javaPattern = new StringBuilder(v9 + v9); for (i = 0; i < v9; i++) { char c = sqlPattern.charAt(i); if (JAVA_REGEX_SPECIALS.indexOf(c) >= 0) { javaPattern.append('\\'); }if (c == escapeChar) { if (i == (sqlPattern.length() - 1)) { throw invalidEscapeSequence(sqlPattern, i); } char nextChar = sqlPattern.charAt(i + 1);if (((nextChar == '_') || (nextChar == '%')) || (nextChar == escapeChar)) { javaPattern.append(nextChar); i++; } else { throw invalidEscapeSequence(sqlPattern, i); } } else if (c == '_') { javaPattern.append('.'); } else if (c == '%') { javaPattern.append("(?s:.*)"); } else { javaPattern.append(c); } } return javaPattern.toString(); }
3.26
flink_SqlLikeUtils_like_rdh
/** * SQL {@code LIKE} function with escape. */ public static boolean like(String s, String pattern, String escape) { final String regex = sqlToRegexLike(pattern, escape); return Pattern.matches(regex, s); }
3.26
flink_SqlLikeUtils_m0_rdh
/** * Translates a SQL SIMILAR pattern to Java regex pattern, with optional escape string. */public static String m0(String sqlPattern, CharSequence escapeStr) { final char escapeChar; if (escapeStr != null) { if (escapeStr.length() != 1) { throw invalidEscapeCharacter(escapeStr.toString()); } escapeChar = escapeStr.charAt(0); } else { escapeChar = 0; } return sqlToRegexSimilar(sqlPattern, escapeChar); }
3.26
flink_SqlLikeUtils_sqlToRegexSimilar_rdh
/** * Translates SQL SIMILAR pattern to Java regex pattern. */ public static String sqlToRegexSimilar(String sqlPattern, char escapeChar) { similarEscapeRuleChecking(sqlPattern, escapeChar); boolean insideCharacterEnumeration = false; final StringBuilder javaPattern = new StringBuilder(sqlPattern.length() * 2); final int len = sqlPattern.length(); for (int i = 0; i < len; i++) { char c = sqlPattern.charAt(i); if (c == escapeChar) { if (i == (len - 1)) { // It should never reach here after the escape rule // checking. throw invalidEscapeSequence(sqlPattern, i); } char nextChar = sqlPattern.charAt(i + 1); if (SQL_SIMILAR_SPECIALS.indexOf(nextChar) >= 0) { // special character, use \ to replace the escape char. if (JAVA_REGEX_SPECIALS.indexOf(nextChar) >= 0) { javaPattern.append('\\'); } javaPattern.append(nextChar); } else if (nextChar == escapeChar) { javaPattern.append(nextChar); } else { // It should never reach here after the escape rule // checking. throw invalidEscapeSequence(sqlPattern, i); } i++;// we already process the next char. } else { switch (c) { case '_' : javaPattern.append('.'); break; case '%' :javaPattern.append("(?s:.*)"); break; case '[' : javaPattern.append('['); insideCharacterEnumeration = true; i = sqlSimilarRewriteCharEnumeration(sqlPattern, javaPattern, i, escapeChar); break; case ']' : if (!insideCharacterEnumeration) { throw invalidRegularExpression(sqlPattern, i); } insideCharacterEnumeration = false; javaPattern.append(']'); break; case '\\' : javaPattern.append("\\\\"); break; case '$' : // $ is special character in java regex, but regular in // SQL regex. javaPattern.append("\\$"); break; default : javaPattern.append(c); } } } if (insideCharacterEnumeration) { throw invalidRegularExpression(sqlPattern, len); } return javaPattern.toString();}
3.26
flink_SqlLikeUtils_similar_rdh
/** * SQL {@code SIMILAR} function with escape. */ public static boolean similar(String s, String pattern, String escape) {final String regex = m0(pattern, escape); return Pattern.matches(regex, s); }
3.26
flink_SqlLikeUtils_ilike_rdh
/** * SQL {@code ILIKE} function with escape. */ public static boolean ilike(String s, String patternStr, String escape) { final String regex = sqlToRegexLike(patternStr, escape); Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(s); return matcher.matches(); }
3.26
flink_FixedLengthRecordSorter_isEmpty_rdh
/** * Checks whether the buffer is empty. * * @return True, if no record is contained, false otherwise. */ @Override public boolean isEmpty() { return this.numRecords == 0; }
3.26
flink_FixedLengthRecordSorter_writeToOutput_rdh
/** * Writes a subset of the records in this buffer in their logical order to the given output. * * @param output * The output view to write the records to. * @param start * The logical start position of the subset. * @param num * The number of elements to write. * @throws IOException * Thrown, if an I/O exception occurred writing to the output view. */ @Override public void writeToOutput(final ChannelWriterOutputView output, final int start, int num) throws IOException { final TypeComparator<T> comparator = this.comparator; final TypeSerializer<T> serializer = this.serializer; T record = this.recordInstance; final SingleSegmentInputView inView = this.inView; final int recordsPerSegment = this.recordsPerSegment; int currentMemSeg = start / recordsPerSegment; int offset = (start % recordsPerSegment) * this.recordSize; while (num > 0) { final MemorySegment currentIndexSegment = this.sortBuffer.get(currentMemSeg++); inView.set(currentIndexSegment, offset); // check whether we have a full or partially full segment if ((num >= recordsPerSegment) && (offset == 0)) { // full segment for (int numInMemSeg = 0; numInMemSeg < recordsPerSegment; numInMemSeg++) { record = comparator.readWithKeyDenormalization(record, inView); serializer.serialize(record, output);} num -= recordsPerSegment; } else { // partially filled segment for (; (num > 0) && (offset <= this.lastEntryOffset); num-- , offset += this.recordSize) { record = comparator.readWithKeyDenormalization(record, inView); serializer.serialize(record, output); } } offset = 0; } }
3.26
flink_FixedLengthRecordSorter_getRecord_rdh
// ------------------------------------------------------------------------- // Retrieving and Writing // ------------------------------------------------------------------------- @Override public T getRecord(int logicalPosition) throws IOException { return getRecord(f0.createInstance(), logicalPosition); }
3.26
flink_FixedLengthRecordSorter_compare_rdh
// ------------------------------------------------------------------------- // Sorting // ------------------------------------------------------------------------- @Override public int compare(int i, int j) { final int segmentNumberI = i / this.recordsPerSegment; final int segmentOffsetI = (i % this.recordsPerSegment) * this.recordSize; final int segmentNumberJ = j / this.recordsPerSegment;final int segmentOffsetJ = (j % this.recordsPerSegment) * this.recordSize; return compare(segmentNumberI, segmentOffsetI, segmentNumberJ, segmentOffsetJ); }
3.26
flink_FixedLengthRecordSorter_getIterator_rdh
/** * Gets an iterator over all records in this buffer in their logical order. * * @return An iterator returning the records in their logical order. */ @Override public final MutableObjectIterator<T> getIterator() { final SingleSegmentInputView startIn = new SingleSegmentInputView(this.recordsPerSegment * this.recordSize);startIn.set(this.sortBuffer.get(0), 0);return new MutableObjectIterator<T>() {private final SingleSegmentInputView in = startIn; private final TypeComparator<T> comp = comparator; private final int numTotal = size(); private final int numPerSegment = recordsPerSegment; private int currentTotal = 0; private int currentInSegment = 0; private int currentSegmentIndex = 0; @Override public T next(T reuse) { if (this.currentTotal < this.numTotal) { if (this.currentInSegment >= this.numPerSegment) {this.currentInSegment = 0; this.currentSegmentIndex++; this.in.set(sortBuffer.get(this.currentSegmentIndex), 0);} this.currentTotal++; this.currentInSegment++; try { return this.comp.readWithKeyDenormalization(reuse, this.in); } catch (IOException ioe) { throw new RuntimeException(ioe); } } else { return null; } } @Override public T next() { if (this.currentTotal < this.numTotal) { if (this.currentInSegment >= this.numPerSegment) { this.currentInSegment = 0; this.currentSegmentIndex++; this.in.set(sortBuffer.get(this.currentSegmentIndex), 0); } this.currentTotal++; this.currentInSegment++; try { return this.comp.readWithKeyDenormalization(f0.createInstance(), this.in); } catch (IOException ioe) { throw new RuntimeException(ioe); } } else { return null; } } }; }
3.26
flink_FixedLengthRecordSorter_write_rdh
/** * Writes a given record to this sort buffer. The written record will be appended and take the * last logical position. * * @param record * The record to be written. * @return True, if the record was successfully written, false, if the sort buffer was full. * @throws IOException * Thrown, if an error occurred while serializing the record into the * buffers. */ @Override public boolean write(T record) throws IOException { // check whether we need a new memory segment for the sort index if (this.currentSortBufferOffset > this.lastEntryOffset) { if (memoryAvailable()) { this.currentSortBufferSegment = nextMemorySegment(); this.sortBuffer.add(this.currentSortBufferSegment); this.outView.set(this.currentSortBufferSegment); this.currentSortBufferOffset = 0; this.sortBufferBytes += this.segmentSize; } else { return false; } } // serialize the record into the data buffers try { this.comparator.writeWithKeyNormalization(record, this.outView);this.numRecords++; this.currentSortBufferOffset += this.recordSize;return true; } catch (EOFException eofex) { throw new IOException("Error: Serialization consumes more bytes than announced by the serializer."); } }
3.26
flink_FixedLengthRecordSorter_memoryAvailable_rdh
// ------------------------------------------------------------------------ // Access Utilities // ------------------------------------------------------------------------ private boolean memoryAvailable() { return !this.freeMemory.isEmpty(); }
3.26
flink_SlotPoolService_castInto_rdh
/** * Tries to cast this slot pool service into the given clazz. * * @param clazz * to cast the slot pool service into * @param <T> * type of clazz * @return {@link Optional#of} the target type if it can be cast; otherwise {@link Optional#empty()} */ default <T> Optional<T> castInto(Class<T> clazz) { if (clazz.isAssignableFrom(this.getClass())) { return Optional.of(clazz.cast(this)); } else { return Optional.empty(); } }
3.26
flink_SlotPoolService_notifyNotEnoughResourcesAvailable_rdh
/** * Notifies that not enough resources are available to fulfill the resource requirements. * * @param acquiredResources * the resources that have been acquired */ default void notifyNotEnoughResourcesAvailable(Collection<ResourceRequirement> acquiredResources) { }
3.26
flink_Trigger_canMerge_rdh
/** * Returns true if this trigger supports merging of trigger state and can therefore be used with * a {@link org.apache.flink.streaming.api.windowing.assigners.MergingWindowAssigner}. * * <p>If this returns {@code true} you must properly implement {@link #onMerge(Window, * OnMergeContext)} */ public boolean canMerge() { return false; } /** * Called when several windows have been merged into one window by the {@link org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}
3.26
flink_NonReusingKeyGroupedIterator_nextKey_rdh
/** * Moves the iterator to the next key. This method may skip any values that have not yet been * returned by the iterator created by the {@link #getValues()} method. Hence, if called * multiple times it "removes" key groups. * * @return true, if the input iterator has an other group of records with the same key. */ public boolean nextKey() throws IOException { if (lookahead != null) { // common case: whole value-iterator was consumed and a new key group is available. this.comparator.setReference(this.lookahead); this.valuesIterator.next = this.lookahead; this.lastKeyRecord = this.lookahead; this.lookahead = null; this.valuesIterator.iteratorAvailable = true; return true; } // first element, empty/done, or the values iterator was not entirely consumed if (this.done) { return false; } if (this.valuesIterator != null) { // values was not entirely consumed. move to the next key // Required if user code / reduce() method did not read the whole value iterator. E next; while (true) { if ((next = this.iterator.next()) != null) { if (!this.comparator.equalToReference(next)) { // the keys do not match, so we have a new group. store the current key this.comparator.setReference(next); this.valuesIterator.next = next; this.lastKeyRecord = next; this.valuesIterator.iteratorAvailable = true; return true; } } else {// input exhausted this.valuesIterator.next = null; this.valuesIterator = null; this.lastKeyRecord = null; this.done = true; return false; } } } else { // first element // get the next element E first = this.iterator.next(); if (first != null) { this.comparator.setReference(first); this.valuesIterator = new ValuesIterator(first); this.lastKeyRecord = first; return true; } else { // empty input, set everything null this.done = true; return false; } } }
3.26
flink_JobMasterId_generate_rdh
/** * Generates a new random JobMasterId. */ public static JobMasterId generate() { return new JobMasterId(); }
3.26
flink_JobMasterId_toUUID_rdh
/** * Creates a UUID with the bits from this JobMasterId. */ public UUID toUUID() { return new UUID(getUpperPart(), getLowerPart()); }
3.26
flink_JobMasterId_fromUuidOrNull_rdh
/** * If the given uuid is null, this returns null, otherwise a JobMasterId that corresponds to the * UUID, via {@link #JobMasterId(UUID)}. */ public static JobMasterId fromUuidOrNull(@Nullable UUID uuid) { return uuid == null ? null : new JobMasterId(uuid); }
3.26
flink_TableConfigValidation_validateTimeZone_rdh
/** * Validates user configured time zone. */ public static void validateTimeZone(String zone) { boolean isValid; try { // We enforce a zone string that is compatible with both java.util.TimeZone and // java.time.ZoneId to avoid bugs. // In general, advertising either TZDB ID, GMT+xx:xx, or UTC is the best we can do. isValid = TimeZone.getTimeZone(zone).toZoneId().equals(ZoneId.of(zone)); } catch (Exception e) { isValid = false; } if (!isValid) { throw new ValidationException((("Invalid time zone. The value should be a Time Zone Database (TZDB) ID " + "such as 'America/Los_Angeles' to include daylight saving time. Fixed ") + "offsets are supported using 'GMT-03:00' or 'GMT+03:00'. Or use 'UTC' ") + "without time zone and daylight saving time."); } }
3.26
flink_ScopeFormats_fromConfig_rdh
// ------------------------------------------------------------------------ // Parsing from Config // ------------------------------------------------------------------------ /** * Creates the scope formats as defined in the given configuration. * * @param config * The configuration that defines the formats * @return The ScopeFormats parsed from the configuration */ public static ScopeFormats fromConfig(Configuration config) { String jmFormat = config.getString(MetricOptions.SCOPE_NAMING_JM); String jmJobFormat = config.getString(MetricOptions.SCOPE_NAMING_JM_JOB); String tmFormat = config.getString(MetricOptions.SCOPE_NAMING_TM);String tmJobFormat = config.getString(MetricOptions.SCOPE_NAMING_TM_JOB); String taskFormat = config.getString(MetricOptions.SCOPE_NAMING_TASK); String operatorFormat = config.getString(MetricOptions.SCOPE_NAMING_OPERATOR); String jmOperatorFormat = config.getString(MetricOptions.SCOPE_NAMING_JM_OPERATOR); return new ScopeFormats(jmFormat, jmJobFormat, tmFormat, tmJobFormat, taskFormat, operatorFormat, jmOperatorFormat); }
3.26
flink_ScopeFormats_getJobManagerFormat_rdh
// ------------------------------------------------------------------------ // Accessors // ------------------------------------------------------------------------ public JobManagerScopeFormat getJobManagerFormat() { return this.jobManagerFormat; }
3.26
flink_ZooKeeperStateHandleStore_m0_rdh
/** * Releases the lock from the node under the given ZooKeeper path. If no lock exists, then * nothing happens. * * @param pathInZooKeeper * Path describing the ZooKeeper node * @throws Exception * if the delete operation of the lock node fails */ @Override public void m0(String pathInZooKeeper) throws Exception { final String path = normalizePath(pathInZooKeeper); final String lockPath = getInstanceLockPath(path); try {deleteIfExists(lockPath); } catch (Exception e) { throw new Exception(("Could not release the lock: " + lockPath) + '.', e); } }
3.26
flink_ZooKeeperStateHandleStore_clearEntries_rdh
/** * Recursively deletes all children. * * @throws Exception * ZK errors */ @Override public void clearEntries() throws Exception { final String path = "/" + client.getNamespace(); LOG.info("Removing {} from ZooKeeper", path); ZKPaths.deleteChildren(client.getZookeeperClient().getZooKeeper(), path, true); }
3.26
flink_ZooKeeperStateHandleStore_getAndLock_rdh
/** * Gets the {@link RetrievableStateHandle} stored in the given ZooKeeper node and locks it. A * locked node cannot be removed by another {@link ZooKeeperStateHandleStore} instance as long * as this instance remains connected to ZooKeeper. * * @param pathInZooKeeper * Path to the ZooKeeper node which contains the state handle * @return The retrieved state handle from the specified ZooKeeper node * @throws IOException * Thrown if the method failed to deserialize the stored state handle * @throws Exception * Thrown if a ZooKeeper operation failed */ @Override public RetrievableStateHandle<T> getAndLock(String pathInZooKeeper) throws Exception {return get(pathInZooKeeper, true); }
3.26
flink_ZooKeeperStateHandleStore_normalizePath_rdh
/** * Makes sure that every path starts with a "/". * * @param path * Path to normalize * @return Normalized path such that it starts with a "/" */ private static String normalizePath(String path) { if (path.startsWith("/")) { return path; } else { return '/' + path; } }
3.26
flink_ZooKeeperStateHandleStore_setStateHandle_rdh
// this method is provided for the sole purpose of easier testing @VisibleForTesting protected void setStateHandle(String path, byte[] serializedStateHandle, int expectedVersion) throws Exception { // Replace state handle in ZooKeeper. We use idempotent set here to avoid a scenario, where // we retry an update, because we didn't receive a proper acknowledgement due to temporary // connection loss. Without idempotent flag this would result in a BadVersionException, // because the version on server no longer matches our expected version. With this flag, // when curator receives BadVersionException internally, it checks whether the content on // the server matches our intended update and its version is our expectedVersion + 1. client.setData().idempotent().withVersion(expectedVersion).forPath(path, serializedStateHandle); }
3.26
flink_ZooKeeperStateHandleStore_getRootLockPath_rdh
/** * Returns the sub-path for lock nodes of the corresponding node (referred to through the passed * {@code rooPath}. The returned sub-path collects the lock nodes for the {@code rootPath}'s * node. The {@code rootPath} is marked for deletion if the sub-path for lock nodes is deleted. */ @VisibleForTesting static String getRootLockPath(String rootPath) { return rootPath + "/locks"; }
3.26
flink_ZooKeeperStateHandleStore_addAndLock_rdh
/** * Creates a state handle, stores it in ZooKeeper and locks it. A locked node cannot be removed * by another {@link ZooKeeperStateHandleStore} instance as long as this instance remains * connected to ZooKeeper. * * <p><strong>Important</strong>: This will <em>not</em> store the actual state in ZooKeeper, * but create a state handle and store it in ZooKeeper. This level of indirection makes sure * that data in ZooKeeper is small. * * <p>The operation will fail if there is already a node under the given path. * * @param pathInZooKeeper * Destination path in ZooKeeper (expected to *not* exist yet) * @param state * State to be added * @return The Created {@link RetrievableStateHandle}. * @throws PossibleInconsistentStateException * if the write-to-ZooKeeper operation failed. This * indicates that it's not clear whether the new state was successfully written to ZooKeeper * or not. Proper error handling has to be applied on the caller's side. * @throws Exception * If a ZooKeeper or state handle operation fails */ @Override public RetrievableStateHandle<T> addAndLock(String pathInZooKeeper, T state) throws PossibleInconsistentStateException, Exception { checkNotNull(pathInZooKeeper, "Path in ZooKeeper"); checkNotNull(state, "State"); final String path = normalizePath(pathInZooKeeper); final Optional<Stat> maybeStat = getStat(path); if (maybeStat.isPresent()) { if (isNotMarkedForDeletion(maybeStat.get())) { throw new AlreadyExistException(String.format("ZooKeeper node %s already exists.", path)); } Preconditions.checkState(releaseAndTryRemove(path), "The state is marked for deletion and, therefore, should be deletable."); } final RetrievableStateHandle<T> storeHandle = storage.store(state); final byte[] serializedStoreHandle = serializeOrDiscard(storeHandle); try {writeStoreHandleTransactionally(path, serializedStoreHandle); return storeHandle;} catch (KeeperException.NodeExistsException e) { // Transactions are not idempotent in the curator version we're currently using, so it // is actually possible that we've re-tried a transaction that has already succeeded. // We've ensured that the node hasn't been present prior executing the transaction, so // we can assume that this is a result of the retry mechanism. return storeHandle; } catch (Exception e) { if (indicatesPossiblyInconsistentState(e)) { throw new PossibleInconsistentStateException(e);} // In case of any other failure, discard the state and rethrow the exception. storeHandle.discardState(); throw e; } }
3.26
flink_ZooKeeperStateHandleStore_getAllAndLock_rdh
/** * Gets all available state handles from ZooKeeper and locks the respective state nodes. * * <p>If there is a concurrent modification, the operation is retried until it succeeds. * * @return All state handles from ZooKeeper. * @throws Exception * If a ZooKeeper or state handle operation fails */ @Override public List<Tuple2<RetrievableStateHandle<T>, String>> getAllAndLock() throws Exception { return getAllAndLock(parentNodePath -> client.getChildren().forPath(parentNodePath)); }
3.26
flink_ZooKeeperStateHandleStore_get_rdh
// --------------------------------------------------------------------------------------------------------- // Private methods // --------------------------------------------------------------------------------------------------------- /** * Gets a state handle from ZooKeeper and optionally locks it. * * @param pathInZooKeeper * Path in ZooKeeper to get the state handle from * @param lock * True if we should lock the node; otherwise false * @return The state handle * @throws IOException * Thrown if the method failed to deserialize the stored state handle * @throws Exception * Thrown if a ZooKeeper operation failed */ private RetrievableStateHandle<T> get(String pathInZooKeeper, boolean lock) throws Exception { checkNotNull(pathInZooKeeper, "Path in ZooKeeper"); final String v32 = normalizePath(pathInZooKeeper); if (lock) { // try to lock the node try { client.create().withMode(CreateMode.EPHEMERAL).forPath(getInstanceLockPath(v32)); } catch (KeeperException.NodeExistsException ignored) { // we have already created the lock } catch (KeeperException.NoNodeException ex) { // We could run into this exception because the parent node does not exist when we // are trying to lock. // We wrap the exception here so that it could be caught in DefaultJobGraphStore throw new NotExistException(("ZooKeeper node " + v32) + " does not exist.", ex); } } boolean success = false; try { byte[] data = client.getData().forPath(v32);RetrievableStateHandle<T> retrievableStateHandle = deserialize(data); success = true; return retrievableStateHandle; } catch (KeeperException.NoNodeException ex) { // We wrap the exception here so that it could be caught in DefaultJobGraphStore throw new NotExistException(("ZooKeeper node " + v32) + " does not exist.", ex); } catch (IOException | ClassNotFoundException e) { throw new IOException(("Failed to deserialize state handle from ZooKeeper data from " + v32) + '.', e); } finally { if ((!success) && lock) { // release the lock m0(v32); } } }
3.26
flink_ZooKeeperStateHandleStore_releaseAll_rdh
/** * Releases all lock nodes of this ZooKeeperStateHandleStore. * * @throws Exception * if the delete operation of a lock file fails */ @Override public void releaseAll() throws Exception { Collection<String> children = getAllHandles(); Exception exception = null; for (String child : children) { try { m0(child); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } }if (exception != null) { throw new Exception("Could not properly release all state nodes.", exception); } }
3.26
flink_ZooKeeperStateHandleStore_getInstanceLockPath_rdh
/** * Returns the path for the lock node relative to the given path. * * @param rootPath * Root path under which the lock node shall be created * @return Path for the lock node */ @VisibleForTesting String getInstanceLockPath(String rootPath) { return (getRootLockPath(rootPath) + '/') + lockNode; }
3.26