name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ModifyKindSet_intersect_rdh | /**
* Returns a new ModifyKindSet with all kinds set in both this set and in another set.
*/
public ModifyKindSet intersect(ModifyKindSet other) {
Builder builder = new Builder();
for (ModifyKind kind : other.m0()) {if
(this.contains(kind)) {
builder.addContainedKind(kind);
}
}
return builder.build();
} | 3.26 |
flink_ModifyKindSet_newBuilder_rdh | /**
* Builder for configuring and creating instances of {@link ModifyKindSet}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
flink_DeserializationSchema_deserialize_rdh | /**
* Deserializes the byte message.
*
* <p>Can output multiple records through the {@link Collector}. Note that number and size of
* the produced records should be relatively small. Depending on the source implementation
* records can be buffered in memory or collecting records might delay emitting checkpoint
* barrier.
*
* @param message
* The message, as a byte array.
* @param out
* The collector to put the resulting messages.
*/
@PublicEvolving
default void deserialize(byte[] message, Collector<T> out) throws IOException
{
T deserialize = deserialize(message);
if (deserialize != null) {
out.collect(deserialize);
}
} | 3.26 |
flink_DeserializationSchema_open_rdh | /**
* Initialization method for the schema. It is called before the actual working methods {@link #deserialize} and thus suitable for one time setup work.
*
* <p>The provided {@link InitializationContext} can be used to access additional features such
* as e.g. registering user metrics.
*
* @param context
* Contextual information that can be used during initialization.
*/
@PublicEvolving
default void open(InitializationContext context) throws Exception {
} | 3.26 |
flink_BinaryStringDataUtil_toBoolean_rdh | /**
* Parse a {@link StringData} to boolean.
*/
public static boolean toBoolean(BinaryStringData str) throws TableException {
BinaryStringData lowerCase = str.toLowerCase();
if (TRUE_STRINGS.contains(lowerCase)) {
return true;
}
if (FALSE_STRINGS.contains(lowerCase)) {
return false;
}throw new TableException(("Cannot parse '" + str) + "' as BOOLEAN.");
} | 3.26 |
flink_BinaryStringDataUtil_toLong_rdh | /**
* Parses this BinaryStringData to Long.
*
* <p>Note that, in this method we accumulate the result in negative format, and convert it to
* positive format at the end, if this string is not started with '-'. This is because min value
* is bigger than max value in digits, e.g. Long.MAX_VALUE is '9223372036854775807' and
* Long.MIN_VALUE is '-9223372036854775808'.
*
* <p>This code is mostly copied from LazyLong.parseLong in Hive.
*/public static long toLong(BinaryStringData str) throws NumberFormatException {
int sizeInBytes = str.getSizeInBytes(); byte[] tmpBytes = getTmpBytes(str, sizeInBytes);
if (sizeInBytes == 0) {
throw numberFormatExceptionFor(str, "Input is empty.");
}
int i = 0;
byte b = tmpBytes[i];
final boolean negative = b == '-';
if (negative || (b == '+')) {
i++;
if (sizeInBytes == 1) {
throw numberFormatExceptionFor(str, "Input has only positive or negative symbol.");
}
}
long result = 0;
final byte separator = '.';
final int radix = 10;
final long stopValue = Long.MIN_VALUE / radix;
while (i < sizeInBytes) {
b = tmpBytes[i];
i++;
if (b == separator) {
// We allow decimals and will return a truncated integral in that case.
// Therefore we won't throw an exception here (checking the fractional
// part happens below.)
break;
}
int digit;
if ((b
>= '0') && (b <= '9')) {
digit = b - '0';
} else {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
// We are going to process the new digit and accumulate the result. However, before
// doing this, if the result is already smaller than the
// stopValue(Long.MIN_VALUE / radix), then result * 10 will definitely be smaller
// than minValue, and we can stop.
if (result < stopValue) {
throw
numberFormatExceptionFor(str, "Overflow.");}
result = (result * radix) - digit;// Since the previous result is less than or equal to
// stopValue(Long.MIN_VALUE / radix), we can just use `result > 0` to check overflow.
// If result overflows, we should stop.
if (result > 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
// This is the case when we've encountered a decimal separator. The fractional
// part will not change the number, but we will verify that the fractional part
// is well formed.
while (i < sizeInBytes) {byte currentByte = tmpBytes[i];
if ((currentByte < '0') || (currentByte > '9')) {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
i++;
}
if (!negative) {
result = -result;
if (result < 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
return result; } | 3.26 |
flink_BinaryStringDataUtil_keyValue_rdh | /**
* Parse target string as key-value string and return the value matches key name. If accept any
* null arguments, return null. example: keyvalue('k1=v1;k2=v2', ';', '=', 'k2') = 'v2'
* keyvalue('k1:v1,k2:v2', ',', ':', 'k3') = NULL
*
* @param split1
* separator between key-value tuple.
* @param split2
* separator between key and value.
* @param keyName
* name of the key whose value you want return.
* @return target value.
*/
public static BinaryStringData keyValue(BinaryStringData str, byte split1, byte split2, BinaryStringData keyName) {
str.ensureMaterialized();
if ((keyName == null) || (keyName.getSizeInBytes() == 0)) {
return null;
}
if (str.inFirstSegment() && keyName.inFirstSegment()) {
// position in byte
int byteIdx = 0;
// position of last split1
int lastSplit1Idx = -1;
while (byteIdx < str.getSizeInBytes()) {
// If find next split1 in str, process current kv
if (str.getSegments()[0].get(str.getOffset() + byteIdx) == split1) {
int currentKeyIdx = lastSplit1Idx + 1;
// If key of current kv is keyName, return the value directly
BinaryStringData value = findValueOfKey(str, split2, keyName, currentKeyIdx, byteIdx);
if (value != null) {
return value;
}
lastSplit1Idx = byteIdx;
}
byteIdx++;
}
// process the string which is not ends with split1
int currentKeyIdx
= lastSplit1Idx + 1;
return findValueOfKey(str, split2, keyName, currentKeyIdx, str.getSizeInBytes());
} else {
return keyValueSlow(str, split1, split2, keyName);
}
} | 3.26 |
flink_BinaryStringDataUtil_toDecimal_rdh | /**
* Parses a {@link BinaryStringData} to {@link DecimalData}.
*
* @return DecimalData value if the parsing was successful.
*/
public static DecimalData toDecimal(BinaryStringData str, int precision, int scale) throws NumberFormatException {
str.ensureMaterialized();
DecimalData data;
if (DecimalDataUtils.isByteArrayDecimal(precision) || DecimalDataUtils.isByteArrayDecimal(str.getSizeInBytes())) {data = toBigPrecisionDecimal(str, precision, scale);
} else {
int sizeInBytes = str.getSizeInBytes();
data = toDecimalFromBytes(precision, scale, getTmpBytes(str, sizeInBytes), 0, sizeInBytes);
}
if (data == null) {
throw numberFormatExceptionFor(str,
"Overflow.");
}return data;
} | 3.26 |
flink_BinaryStringDataUtil_concat_rdh | /**
* Concatenates input strings together into a single string. Returns NULL if any argument is
* NULL.
*/
public static BinaryStringData concat(BinaryStringData... inputs) {
return concat(Arrays.asList(inputs));
} | 3.26 |
flink_BinaryStringDataUtil_splitByWholeSeparatorPreserveAllTokens_rdh | /**
* Splits the provided text into an array, separator string specified.
*
* <p>The separator is not included in the returned String array. Adjacent separators are
* treated as separators for empty tokens.
*
* <p>A {@code null} separator splits on whitespace.
*
* <pre>
* "".splitByWholeSeparatorPreserveAllTokens(*) = []
* "ab de fg".splitByWholeSeparatorPreserveAllTokens(null) = ["ab", "de", "fg"]
* "ab de fg".splitByWholeSeparatorPreserveAllTokens(null) = ["ab", "", "", "de", "fg"]
* "ab:cd:ef".splitByWholeSeparatorPreserveAllTokens(":") = ["ab", "cd", "ef"]
* "ab-!-cd-!-ef".splitByWholeSeparatorPreserveAllTokens("-!-") = ["ab", "cd", "ef"]
* </pre>
*
* <p>Note: returned binary strings reuse memory segments from the input str.
*
* @param separator
* String containing the String to be used as a delimiter, {@code null} splits
* on whitespace
* @return an array of parsed Strings, {@code null} if null String was input
*/
public static BinaryStringData[] splitByWholeSeparatorPreserveAllTokens(BinaryStringData str,
BinaryStringData separator) {str.ensureMaterialized();
final int
v1 = str.getSizeInBytes();
MemorySegment[] segments = str.getSegments();
int offset = str.getOffset();
if (v1 == 0) {
return EMPTY_STRING_ARRAY;
}
if ((separator == null) ||
EMPTY_UTF8.equals(separator)) {
// Split on whitespace.
return splitByWholeSeparatorPreserveAllTokens(str, fromString(" "));
}
separator.ensureMaterialized();
int sepSize =
separator.getSizeInBytes();
MemorySegment[] sepSegs = separator.getSegments();
int sepOffset = separator.getOffset();
final ArrayList<BinaryStringData> substrings = new ArrayList<>();
int beg = 0;
int end
= 0;
while (end < v1) {
end = SegmentsUtil.find(segments, offset + beg, v1 - beg, sepSegs, sepOffset, sepSize) - offset;
if (end > (-1)) {
if (end > beg) {
// The following is OK, because String.substring( beg, end ) excludes
// the character at the position 'end'.
substrings.add(fromAddress(segments, offset + beg, end - beg));
// Set the starting point for the next search.
// The following is equivalent to beg = end + (separatorLength - 1) + 1,
// which is the right calculation:
beg = end
+ sepSize;
} else
{
// We found a consecutive occurrence of the separator.
substrings.add(EMPTY_UTF8);
beg = end + sepSize;
} } else {
// String.substring( beg ) goes from 'beg' to the end of the String.
substrings.add(fromAddress(segments, offset + beg, v1 - beg));
end = v1;
}
}
return substrings.toArray(new
BinaryStringData[0]);
} | 3.26 |
flink_BinaryStringDataUtil_concatWs_rdh | /**
* Concatenates input strings together into a single string using the separator. Returns NULL If
* the separator is NULL.
*
* <p>Note: CONCAT_WS() does not skip any empty strings, however it does skip any NULL values
* after the separator. For example, concat_ws(",", "a", null, "c") would yield "a,c".
*/
public static BinaryStringData concatWs(BinaryStringData separator, BinaryStringData... inputs) {
return concatWs(separator, Arrays.asList(inputs));
} | 3.26 |
flink_BinaryStringDataUtil_reverse_rdh | /**
* Reverse each character in current string.
*
* @return a new string which character order is reverse to current string.
*/
public static BinaryStringData reverse(BinaryStringData str) {
str.ensureMaterialized();
if (str.inFirstSegment()) {
byte[] result = new byte[str.getSizeInBytes()];
// position in byte
int byteIdx = 0;
while (byteIdx
< str.getSizeInBytes()) {
int charBytes = numBytesForFirstByte(str.getByteOneSegment(byteIdx));
str.getSegments()[0].get(str.getOffset() + byteIdx, result, (result.length - byteIdx) - charBytes, charBytes);
byteIdx += charBytes;
}
return BinaryStringData.fromBytes(result);
} else {
return reverseMultiSegs(str);
}
} | 3.26 |
flink_BinaryStringDataUtil_hash_rdh | /**
* Calculate the hash value of the given bytes use {@link MessageDigest}.
*/
public static BinaryStringData hash(byte[] bytes, MessageDigest md) {
return fromString(EncodingUtils.hex(md.digest(bytes)));
}
/**
* Calculate the hash value of a given string use {@link MessageDigest} | 3.26 |
flink_BinaryStringDataUtil_trim_rdh | /**
* Walk each character of current string from both ends, remove the character if it is in trim
* string. Return the new substring which both ends trim characters have been removed.
*
* @param trimStr
* the trim string
* @return A subString which both ends trim characters have been removed.
*/
public static BinaryStringData trim(BinaryStringData str, BinaryStringData trimStr) {
if (trimStr == null) {return null;
}
return trimRight(trimLeft(str, trimStr), trimStr);
} | 3.26 |
flink_BinaryStringDataUtil_toTimestamp_rdh | /**
* Used by {@code CAST(x as TIMESTAMP_LTZ)}.
*/
public static TimestampData toTimestamp(BinaryStringData input, int precision, TimeZone timeZone) throws DateTimeException {
return DateTimeUtils.parseTimestampData(input.toString(), precision, timeZone);
} | 3.26 |
flink_CatalogTableStatistics_copy_rdh | /**
* Create a deep copy of "this" instance.
*
* @return a deep copy
*/
public CatalogTableStatistics copy() {
return new CatalogTableStatistics(this.rowCount, this.fileCount, this.totalSize, this.rawDataSize, new HashMap<>(this.properties));
} | 3.26 |
flink_CatalogTableStatistics_getRowCount_rdh | /**
* The number of rows.
*/
public long getRowCount() {
return this.rowCount;
} | 3.26 |
flink_TableOperatorWrapperGenerator_calculateManagedMemoryFraction_rdh | /**
* calculate managed memory fraction for each operator wrapper.
*/
private void calculateManagedMemoryFraction() {
for (Map.Entry<Transformation<?>, TableOperatorWrapper<?>> entry : visitedTransforms.entrySet()) {
double fraction
= 0;
if (managedMemoryWeight != 0) {
fraction = (entry.getKey().getManagedMemoryOperatorScopeUseCaseWeights().getOrDefault(ManagedMemoryUseCase.OPERATOR, 0) * 1.0) / this.managedMemoryWeight;
}
entry.getValue().setManagedMemoryFraction(fraction);
}
} | 3.26 |
flink_SecurityContextFactory_isCompatibleWith_rdh | /**
* A factory for a {@link SecurityContext}.
*
* <p>There can only be one security context installed in each secure runtime.
*/
@FunctionalInterfacepublic interface SecurityContextFactory {
/**
* Check if this factory is compatible with the security configuration.
*
* <p>Specific implementation must override this to provide compatibility check, by default it
* will always return {@code false}.
*
* @param securityConfig
* security configurations.
* @return {@code true} if factory is compatible with the configuration.
*/
default boolean isCompatibleWith(final SecurityConfiguration securityConfig) {
return false;
} | 3.26 |
flink_SnapshotDirectory_cleanup_rdh | /**
* Calling this method will attempt delete the underlying snapshot directory recursively, if the
* state is "ongoing". In this case, the state will be set to "deleted" as a result of this
* call.
*
* @return <code>true</code> if delete is successful, <code>false</code> otherwise.
* @throws IOException
* if an exception happens during the delete.
*/
public boolean cleanup() throws IOException {
if (state.compareAndSet(State.ONGOING, State.DELETED)) {
FileUtils.deleteDirectory(directory.toFile());
}
return true;
} | 3.26 |
flink_SnapshotDirectory_listDirectory_rdh | /**
* List the files in the snapshot directory.
*
* @return the files in the snapshot directory.
* @throws IOException
* if there is a problem creating the file statuses.
*/
public Path[] listDirectory() throws IOException
{
return FileUtils.listDirectory(directory);
} | 3.26 |
flink_SnapshotDirectory_isSnapshotCompleted_rdh | /**
* Returns <code>true</code> if the snapshot is marked as completed.
*/
public boolean isSnapshotCompleted() {
return State.COMPLETED == state.get();
}
/**
* Calling this method completes the snapshot for this snapshot directory, if possible, and
* creates a corresponding {@link DirectoryStateHandle} that points to the snapshot directory.
* Calling this method can change the lifecycle state from ONGOING to COMPLETED if the directory
* should no longer deleted in {@link #cleanup()} | 3.26 |
flink_SnapshotDirectory_temporary_rdh | /**
* Creates a local temporary snapshot directory for the given path. This will always return
* "null" as result of {@link #completeSnapshotAndGetHandle()} and always attempt to delete the
* underlying directory in {@link #cleanup()}.
*/
public static SnapshotDirectory temporary(@Nonnull
File directory) throws IOException {
return new TemporarySnapshotDirectory(directory);
} | 3.26 |
flink_SnapshotDirectory_permanent_rdh | /**
* Creates a permanent snapshot directory for the given path, which will not delete the
* underlying directory in {@link #cleanup()} after {@link #completeSnapshotAndGetHandle()} was
* called.
*/
public static SnapshotDirectory permanent(@Nonnull
Path directory) throws IOException {
return new PermanentSnapshotDirectory(directory);
} | 3.26 |
flink_ThreadBase_run_rdh | /**
* Implements exception handling and delegates to go().
*/
public void run() {try {
go();
} catch (Throwable t) {
m1(new IOException((("Thread '" + getName()) + "' terminated due to an exception: ") + t.getMessage(), t));}
} | 3.26 |
flink_ThreadBase_uncaughtException_rdh | /* (non-Javadoc)
@see java.lang.Thread.UncaughtExceptionHandler#uncaughtException(java.lang.Thread, java.lang.Throwable)
*/
@Override
public void uncaughtException(Thread t, Throwable e) {
m1(new IOException((("Thread '" + t.getName()) + "' terminated due to an uncaught exception: ") + e.getMessage(), e));
} | 3.26 |
flink_ThreadBase_m1_rdh | /**
* Internally handles an exception and makes sure that this method returns without a problem.
*
* @param ioex
* The exception to handle.
*/
protected final void m1(IOException ioex) {
if (!m0()) {
// discard any exception that occurs when after the thread is killed.
return;
}
if (this.exceptionHandler != null) {
try {this.exceptionHandler.handleException(ioex);
} catch (Throwable ignored) {
}
}
} | 3.26 |
flink_AbstractParameterTool_getDouble_rdh | /**
* Returns the Double value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Double.
*/
public double getDouble(String key, double defaultValue) {
addToDefaults(key, Double.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Double.valueOf(value);
}} | 3.26 |
flink_AbstractParameterTool_m0_rdh | /**
* Returns the Boolean value for the given key. If the key does not exists it will return the
* default value given. The method returns whether the string of the value is "true" ignoring
* cases.
*/
public boolean m0(String key, boolean defaultValue) {
addToDefaults(key, Boolean.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Boolean.valueOf(value);
}
} | 3.26 |
flink_AbstractParameterTool_getByte_rdh | /**
* Returns the Byte value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Byte.
*/
public byte getByte(String key, byte defaultValue) {
addToDefaults(key, Byte.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Byte.valueOf(value);}
} | 3.26 |
flink_AbstractParameterTool_getBoolean_rdh | // -------------- BOOLEAN
/**
* Returns the Boolean value for the given key. The method fails if the key does not exist.
*/public boolean getBoolean(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Boolean.valueOf(value);
} | 3.26 |
flink_AbstractParameterTool_getShort_rdh | /**
* Returns the Short value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Short.
*/
public short getShort(String key, short defaultValue) {
addToDefaults(key, Short.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Short.valueOf(value);
}} | 3.26 |
flink_AbstractParameterTool_getFloat_rdh | /**
* Returns the Float value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Float.
*/
public float getFloat(String key, float defaultValue) {
addToDefaults(key, Float.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Float.valueOf(value);
}
} | 3.26 |
flink_AbstractParameterTool_addToDefaults_rdh | // --------------- Internals
protected void addToDefaults(String key, String value) {
final String currentValue = defaultData.get(key);
if (currentValue == null) {
if (value == null) {
value = DEFAULT_UNDEFINED;
}
defaultData.put(key, value);
} else // there is already an entry for this key. Check if the value is the undefined
if (currentValue.equals(DEFAULT_UNDEFINED) && (value != null)) {
// update key with better default value
defaultData.put(key, value);
}
} | 3.26 |
flink_AbstractParameterTool_getRequired_rdh | /**
* Returns the String value for the given key. If the key does not exist it will throw a {@link RuntimeException}.
*/
public String getRequired(String key) {
addToDefaults(key, null);
String value = get(key);
if (value == null) {
throw new RuntimeException(("No data for required key '" + key) + "'");
}
return value;
} | 3.26 |
flink_AbstractParameterTool_getLong_rdh | /**
* Returns the Long value for the given key. If the key does not exists it will return the
* default value given. The method fails if the value is not a Long.
*/public long getLong(String key, long defaultValue) {
addToDefaults(key, Long.toString(defaultValue));
String v5 = get(key);
if (v5 ==
null) {
return defaultValue;
}
return Long.parseLong(v5);
} | 3.26 |
flink_AbstractParameterTool_get_rdh | /**
* Returns the String value for the given key. If the key does not exist it will return the
* given default value.
*/
public String get(String key, String defaultValue) {
addToDefaults(key, defaultValue);
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return value;
}
} | 3.26 |
flink_AbstractParameterTool_getUnrequestedParameters_rdh | /**
* Returns the set of parameter names which have not been requested with {@link #has(String)} or
* one of the {@code get} methods. Access to the map returned by {@link #toMap()} is not
* tracked.
*/
@PublicEvolving
public Set<String> getUnrequestedParameters() {
return Collections.unmodifiableSet(unrequestedParameters);
} | 3.26 |
flink_FlatMapOperatorBase_executeOnCollections_rdh | // ------------------------------------------------------------------------
@Override
protected List<OUT> executeOnCollections(List<IN> input, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
FlatMapFunction<IN, OUT> function = userFunction.getUserCodeObject();
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
ArrayList<OUT> result = new ArrayList<OUT>(input.size());
TypeSerializer<IN> inSerializer = getOperatorInfo().getInputType().createSerializer(executionConfig);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
CopyingListCollector<OUT> resultCollector = new CopyingListCollector<OUT>(result, outSerializer);
for (IN element : input) {
IN inCopy = inSerializer.copy(element);
function.flatMap(inCopy, resultCollector);}
FunctionUtils.closeFunction(function);
return result;
} | 3.26 |
flink_ArrowSourceFunction_loadBatch_rdh | /**
* Load the specified batch of data to process.
*/
private ArrowRecordBatch loadBatch(int nextIndexOfArrowDataToProcess) throws IOException {
ByteArrayInputStream bais = new ByteArrayInputStream(arrowData[nextIndexOfArrowDataToProcess]);
return MessageSerializer.deserializeRecordBatch(new ReadChannel(Channels.newChannel(bais)), allocator);
} | 3.26 |
flink_MetricConfig_getLong_rdh | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key
* the hashtable key.
* @param defaultValue
* a default value.
* @return the value in this property list with the specified key value parsed as a long.
*/
public long getLong(String key, long defaultValue) {
String argument = getProperty(key, null);
return argument == null ? defaultValue : Long.parseLong(argument);
} | 3.26 |
flink_MetricConfig_getInteger_rdh | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key
* the hashtable key.
* @param defaultValue
* a default value.
* @return the value in this property list with the specified key value parsed as an int.
*/
public int getInteger(String key, int defaultValue) {
String argument = getProperty(key, null);
return argument == null ? defaultValue : Integer.parseInt(argument);
} | 3.26 |
flink_MetricConfig_getFloat_rdh | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key
* the hashtable key.
* @param defaultValue
* a default value.
* @return the value in this property list with the specified key value parsed as a float.
*/
public float getFloat(String key, float defaultValue) {
String argument = getProperty(key, null);
return
argument == null ? defaultValue : Float.parseFloat(argument);
} | 3.26 |
flink_MetricConfig_getBoolean_rdh | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key
* the hashtable key.
* @param defaultValue
* a default value.
* @return the value in this property list with the specified key value parsed as a boolean.
*/
public boolean getBoolean(String key, boolean defaultValue) {
String v4 = getProperty(key, null);
return v4 == null ? defaultValue : Boolean.parseBoolean(v4);
} | 3.26 |
flink_MetricConfig_getDouble_rdh | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key
* the hashtable key.
* @param defaultValue
* a default value.
* @return the value in this property list with the specified key value parsed as a double.
*/
public double getDouble(String key, double defaultValue) {
String argument = getProperty(key, null);
return argument == null ? defaultValue : Double.parseDouble(argument);
} | 3.26 |
flink_FormatDescriptor_option_rdh | /**
* Sets the given option on the format.
*
* <p>Note that format options must not be prefixed with the format identifier itself here.
* For example,
*
* <pre>{@code FormatDescriptor.forFormat("json")
* .option("ignore-parse-errors", "true")
* .build();}</pre>
*
* <p>will automatically be converted into its prefixed form:
*
* <pre>{@code 'format' = 'json'
* 'json.ignore-parse-errors' = 'true'}</pre>
*/
public Builder option(String key, String value) {
Preconditions.checkNotNull(key, "Key must not be null.");
Preconditions.checkNotNull(value, "Value must not be null.");
options.put(key, value);
return this;
} | 3.26 |
flink_FormatDescriptor_build_rdh | /**
* Returns an immutable instance of {@link FormatDescriptor}.
*/
public FormatDescriptor build() {
return new FormatDescriptor(format, options);} | 3.26 |
flink_FormatDescriptor_m1_rdh | // ---------------------------------------------------------------------------------------------
@Override
public String m1() {
return String.format("%s[%s]", format, f0); } | 3.26 |
flink_PushWatermarkIntoTableSourceScanRuleBase_getNewScan_rdh | /**
* It uses the input watermark expression to generate the {@link WatermarkGeneratorSupplier}.
* After the {@link WatermarkStrategy} is pushed into the scan, it will build a new scan.
* However, when {@link FlinkLogicalWatermarkAssigner} is the parent of the {@link FlinkLogicalTableSourceScan} it should modify the rowtime type to keep the type of plan is
* consistent. In other cases, it just keep the data type of the scan as same as before and
* leave the work when rewriting the projection.
*
* <p>NOTES: the row type of the scan is not always as same as the watermark assigner. Because
* the scan will not add the rowtime column into the row when pushing the watermark assigner
* into the scan. In some cases, query may have computed columns defined on rowtime column. If
* modifying the type of the rowtime(with time attribute), it will also influence the type of
* the computed column. Therefore, if the watermark assigner is not the parent of the scan, set
* the type of the scan as before and leave the work to projection.
*/
protected FlinkLogicalTableSourceScan getNewScan(FlinkLogicalWatermarkAssigner watermarkAssigner, RexNode watermarkExpr, FlinkLogicalTableSourceScan scan, TableConfig tableConfig, boolean useWatermarkAssignerRowType) {
final TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
final DynamicTableSource newDynamicTableSource = tableSourceTable.tableSource().copy();
final boolean isSourceWatermark = (newDynamicTableSource instanceof SupportsSourceWatermark) && hasSourceWatermarkDeclaration(watermarkExpr);
final RelDataType newType;
if (useWatermarkAssignerRowType) {
// project is trivial and set rowtime type in scan
newType = watermarkAssigner.getRowType();
} else {
// project add/delete columns and set the rowtime column type in project
newType = scan.getRowType();
}
final RowType producedType = ((RowType) (FlinkTypeFactory.toLogicalType(newType)));
final SourceAbilityContext abilityContext = SourceAbilityContext.from(scan);
final SourceAbilitySpec abilitySpec;
if (isSourceWatermark) {
final SourceWatermarkSpec sourceWatermarkSpec = new SourceWatermarkSpec(true, producedType);
sourceWatermarkSpec.apply(newDynamicTableSource, abilityContext);
abilitySpec = sourceWatermarkSpec;
} else {
final
Duration globalIdleTimeout = tableConfig.get(ExecutionConfigOptions.TABLE_EXEC_SOURCE_IDLE_TIMEOUT);
final long globalIdleTimeoutMillis;
if ((!globalIdleTimeout.isZero()) && (!globalIdleTimeout.isNegative())) {
globalIdleTimeoutMillis = globalIdleTimeout.toMillis();
} else {
globalIdleTimeoutMillis = -1L;
}
Optional<RelHint> optionsHintOptional = scan.getHints().stream().filter(relHint -> relHint.hintName.equalsIgnoreCase(FlinkHints.HINT_NAME_OPTIONS)).findFirst();
Configuration hintOptions = optionsHintOptional.map(relHint -> Configuration.fromMap(relHint.kvOptions)).orElseGet(Configuration::new);
RelOptTable v12 = scan.getTable();
Configuration tableOptions = Optional.of(v12).filter(TableSourceTable.class::isInstance).map(t ->
{
Map<String, String> tableConfigs = ((TableSourceTable) (t)).contextResolvedTable().getResolvedTable().getOptions();return Configuration.fromMap(tableConfigs);
}).orElseGet(Configuration::new);
WatermarkParams watermarkParams = parseWatermarkParams(hintOptions, tableOptions);
final WatermarkPushDownSpec watermarkPushDownSpec = new WatermarkPushDownSpec(watermarkExpr, globalIdleTimeoutMillis, producedType, watermarkParams);
watermarkPushDownSpec.apply(newDynamicTableSource, abilityContext);
abilitySpec = watermarkPushDownSpec;
}TableSourceTable v17 = tableSourceTable.copy(newDynamicTableSource, newType, new SourceAbilitySpec[]{ abilitySpec });
return FlinkLogicalTableSourceScan.create(scan.getCluster(), scan.getHints(), v17);
} | 3.26 |
flink_RexNodeJsonSerializer_serializeSqlOperator_rdh | // --------------------------------------------------------------------------------------------
/**
* Logic shared with {@link AggregateCallJsonSerializer}.
*/
static void serializeSqlOperator(SqlOperator operator, JsonGenerator gen, SerializerProvider serializerProvider, boolean serializeCatalogObjects) throws IOException {
if (operator.getSyntax() != SqlSyntax.FUNCTION) {
gen.writeStringField(FIELD_NAME_SYNTAX, calciteToSerializable(operator.getSyntax()).getValue());
}
if (operator instanceof BridgingSqlFunction) {
final BridgingSqlFunction function = ((BridgingSqlFunction) (operator));
serializeBridgingSqlFunction(function.getName(), function.getResolvedFunction(), gen, serializerProvider, serializeCatalogObjects);
} else if (operator instanceof BridgingSqlAggFunction) {final BridgingSqlAggFunction function = ((BridgingSqlAggFunction)
(operator));
serializeBridgingSqlFunction(function.getName(), function.getResolvedFunction(), gen, serializerProvider, serializeCatalogObjects);
} else if (((operator instanceof ScalarSqlFunction) || (operator instanceof TableSqlFunction)) || (operator instanceof AggSqlFunction)) {
throw legacyException(operator.toString());
} else if (operator.getName().isEmpty()) {
gen.writeStringField(FIELD_NAME_SQL_KIND, operator.getKind().name());
} else {
// We assume that all regular SqlOperators are internal. Only the function
// definitions
// stack is exposed to the user and can thus be external.
gen.writeStringField(FIELD_NAME_INTERNAL_NAME, BuiltInSqlOperator.toQualifiedName(operator));
}} | 3.26 |
flink_OperationManager_m4_rdh | // -------------------------------------------------------------------------------------------
@VisibleForTesting
public int m4() {
return submittedOperations.size();
} | 3.26 |
flink_OperationManager_m0_rdh | /**
* Get the results of the operation.
*
* @param operationHandle
* identifies the {@link Operation}.
* @param token
* identifies which batch of data to fetch.
* @param maxRows
* the maximum number of rows to fetch.
* @return ResultSet contains the results.
*/
public ResultSet m0(OperationHandle operationHandle, long token, int maxRows) {
return getOperation(operationHandle).fetchResults(token, maxRows);
} | 3.26 |
flink_OperationManager_close_rdh | /**
* Closes the {@link OperationManager} and all operations.
*/
public void close() {
stateLock.writeLock().lock();
Exception closeException = null;
try {
isRunning = false;
IOUtils.closeAll(submittedOperations.values(), Throwable.class);
}
catch (Exception e) {
closeException = e;
} finally {
submittedOperations.clear();
stateLock.writeLock().unlock();
}
// wait all operations closed
try {
operationLock.acquire();
} catch (Exception e) {
LOG.error("Failed to wait all operation closed.", e);
} finally {
operationLock.release();
}
LOG.debug("Closes the Operation Manager.");
if (closeException != null) {
throw new SqlExecutionException("Failed to close the OperationManager.", closeException);
}
} | 3.26 |
flink_OperationManager_getOperationInfo_rdh | /**
* Get the {@link OperationInfo} of the operation.
*
* @param operationHandle
* identifies the {@link Operation}.
*/
public OperationInfo getOperationInfo(OperationHandle operationHandle) {
return getOperation(operationHandle).getOperationInfo();
} | 3.26 |
flink_OperationManager_getOperationResultSchema_rdh | /**
* Get the {@link ResolvedSchema} of the operation.
*
* @param operationHandle
* identifies the {@link Operation}.
*/
public ResolvedSchema getOperationResultSchema(OperationHandle operationHandle) throws Exception {
return getOperation(operationHandle).getResultSchema();
} | 3.26 |
flink_OperationManager_cancelOperation_rdh | /**
* Cancel the execution of the operation.
*
* @param operationHandle
* identifies the {@link Operation}.
*/
public void cancelOperation(OperationHandle operationHandle) {
getOperation(operationHandle).cancel();
}
/**
* Close the operation and release all resources used by the {@link Operation}.
*
* @param operationHandle
* identifies the {@link Operation} | 3.26 |
flink_OperationManager_submitOperation_rdh | /**
* Submit the operation to the {@link OperationManager}. The {@link OperationManager} manges the
* lifecycle of the {@link Operation}, including register resources, fire the execution and so
* on.
*
* @param fetcherSupplier
* offer the fetcher to get the results.
* @return OperationHandle to fetch the results or check the status.
*/
public OperationHandle submitOperation(Function<OperationHandle, ResultFetcher> fetcherSupplier)
{
OperationHandle handle = OperationHandle.create();
Operation v4 = new Operation(handle, () -> fetcherSupplier.apply(handle));
submitOperationInternal(handle, v4);
return handle;
} | 3.26 |
flink_LocalSlicingWindowAggOperator_computeMemorySize_rdh | /**
* Compute memory size from memory faction.
*/
private long computeMemorySize() {
final Environment environment = getContainingTask().getEnvironment();
return environment.getMemoryManager().computeMemorySize(getOperatorConfig().getManagedMemoryFractionOperatorUseCaseOfSlot(ManagedMemoryUseCase.OPERATOR, environment.getTaskManagerInfo().getConfiguration(), environment.getUserCodeClassLoader().asClassLoader()));
} | 3.26 |
flink_ColumnReferenceFinder_findReferencedColumn_rdh | /**
* Find referenced column names that derive the computed column.
*
* @param columnName
* the name of the column
* @param schema
* the schema contains the computed column definition
* @return the referenced column names
*/
public static Set<String> findReferencedColumn(String columnName, ResolvedSchema schema) {
Column column = schema.getColumn(columnName).orElseThrow(() -> new ValidationException(String.format("The input column %s doesn't exist in the schema.", columnName)));
if (!(column instanceof Column.ComputedColumn)) {
return Collections.emptySet();
}
ColumnReferenceVisitor visitor = // the input ref index is based on a projection of non-computed columns
new ColumnReferenceVisitor(schema.getColumns().stream().filter(c -> !(c instanceof Column.ComputedColumn)).map(Column::getName).collect(Collectors.toList()));
return visitor.visit(((Column.ComputedColumn)
(column)).getExpression());
} | 3.26 |
flink_ColumnReferenceFinder_findWatermarkReferencedColumn_rdh | /**
* Find referenced column names that derive the watermark expression.
*
* @param schema
* resolved columns contains the watermark expression.
* @return the referenced column names
*/
public static Set<String> findWatermarkReferencedColumn(ResolvedSchema schema) {
ColumnReferenceVisitor visitor = new ColumnReferenceVisitor(schema.getColumnNames());
return schema.getWatermarkSpecs().stream().flatMap(spec -> Stream.concat(visitor.visit(spec.getWatermarkExpression()).stream(), Stream.of(spec.getRowtimeAttribute()))).collect(Collectors.toSet());
} | 3.26 |
flink_OperatingSystemRestriction_restrictTo_rdh | /**
* Restricts the execution to the given set of operating systems.
*
* @param reason
* reason for the restriction
* @param operatingSystems
* allowed operating systems
* @throws AssumptionViolatedException
* if this method is called on a forbidden operating system
*/
public static void restrictTo(final String reason, final OperatingSystem... operatingSystems) throws AssumptionViolatedException {
final EnumSet<OperatingSystem> allowed = EnumSet.copyOf(Arrays.asList(operatingSystems));Assume.assumeTrue(reason, allowed.contains(OperatingSystem.getCurrentOperatingSystem()));
} | 3.26 |
flink_OperatingSystemRestriction_forbid_rdh | /**
* Forbids the execution on the given set of operating systems.
*
* @param reason
* reason for the restriction
* @param forbiddenSystems
* forbidden operating systems
* @throws AssumptionViolatedException
* if this method is called on a forbidden operating system
*/
public static void forbid(final String reason, final OperatingSystem... forbiddenSystems) throws AssumptionViolatedException {
final OperatingSystem os = OperatingSystem.getCurrentOperatingSystem();
for (final OperatingSystem forbiddenSystem : forbiddenSystems) {
Assume.assumeTrue(reason, os != forbiddenSystem);
}
} | 3.26 |
flink_TwoStageOptimizedWindowAggregateRule_isInputSatisfyRequiredDistribution_rdh | // ------------------------------------------------------------------------------------------
private boolean isInputSatisfyRequiredDistribution(RelNode input, int[] keys) {
FlinkRelDistribution requiredDistribution = createDistribution(keys);
FlinkRelDistribution inputDistribution
= input.getTraitSet().getTrait(FlinkRelDistributionTraitDef.INSTANCE());
return inputDistribution.satisfies(requiredDistribution);
} | 3.26 |
flink_Operator_setParallelism_rdh | /**
* Sets the parallelism for this operator. The parallelism must be 1 or more.
*
* @param parallelism
* The parallelism for this operator. A value equal to {@link ExecutionConfig#PARALLELISM_DEFAULT} will use the system default.
* @return The operator with set parallelism.
*/
public O setParallelism(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism);
this.parallelism = parallelism;
@SuppressWarnings("unchecked")
O returnType
= ((O) (this));
return returnType;
} | 3.26 |
flink_Operator_getMinResources_rdh | /**
* Returns the minimum resource of this operator. If no minimum resource has been set, it
* returns the default empty resource.
*
* @return The minimum resource of this operator.
*/public ResourceSpec getMinResources() {
return this.minResources;
} | 3.26 |
flink_Operator_setResources_rdh | /**
* Sets the resources for this operator. This overrides the default minimum and preferred
* resources.
*
* @param resources
* The resources for this operator.
* @return The operator with set minimum and preferred resources.
*/
private O setResources(ResourceSpec resources) {
OperatorValidationUtils.validateResources(resources);
this.minResources = resources;
this.preferredResources = resources;
@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_Operator_name_rdh | /**
* Sets the name of this operator. This overrides the default name, which is either a generated
* description of the operation (such as for example "Aggregate(1:SUM, 2:MIN)") or the name the
* user-defined function or input/output format executed by the operator.
*
* @param newName
* The name for this operator.
* @return The operator with a new name.
*/
public O name(String newName) {
this.name = newName;
@SuppressWarnings("unchecked")
O returnType = ((O) (this));
return returnType;
} | 3.26 |
flink_Operator_getParallelism_rdh | /**
* Returns the parallelism of this operator.
*
* @return The parallelism of this operator.
*/
public int getParallelism() {
return this.parallelism;
} | 3.26 |
flink_DataSourceNode_getOperator_rdh | /**
* Gets the contract object for this data source node.
*
* @return The contract.
*/
@Override
public GenericDataSourceBase<?, ?> getOperator() {
return ((GenericDataSourceBase<?, ?>) (super.getOperator()));
} | 3.26 |
flink_LongMaximum_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "LongMaximum " + this.max;
} | 3.26 |
flink_LongMaximum_add_rdh | // ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(long value) {
this.max = Math.max(this.max, value);
} | 3.26 |
flink_OperatorChain_createOperator_rdh | /**
* Create and return a single operator from the given {@param operatorConfig} that will be
* producing records to the {@param output}.
*/
private <OUT, OP extends StreamOperator<OUT>> OP createOperator(StreamTask<OUT, ?> containingTask, StreamConfig operatorConfig, ClassLoader userCodeClassloader, WatermarkGaugeExposingOutput<StreamRecord<OUT>> output, List<StreamOperatorWrapper<?, ?>> allOperatorWrappers, boolean isHead) {
// now create the operator and give it the output collector to write its output to
Tuple2<OP, Optional<ProcessingTimeService>> chainedOperatorAndTimeService = StreamOperatorFactoryUtil.createOperator(operatorConfig.getStreamOperatorFactory(userCodeClassloader), containingTask, operatorConfig, output, operatorEventDispatcher);
OP chainedOperator
= chainedOperatorAndTimeService.f0;
allOperatorWrappers.add(createOperatorWrapper(chainedOperator, containingTask, operatorConfig, chainedOperatorAndTimeService.f1, isHead));chainedOperator.getMetricGroup().gauge(MetricNames.IO_CURRENT_OUTPUT_WATERMARK, output.getWatermarkGauge()::getValue);
return chainedOperator;
} | 3.26 |
flink_OperatorChain_closeAllOperators_rdh | /**
* Execute {@link StreamOperator#close()} of each operator in the chain of this {@link StreamTask}. Closing happens from <b>tail to head</b> operator in the chain.
*/
public void closeAllOperators() throws Exception {
isClosed = true;
} | 3.26 |
flink_OperatorChain_flushOutputs_rdh | /**
* This method should be called before finishing the record emission, to make sure any data that
* is still buffered will be sent. It also ensures that all data sending related exceptions are
* recognized.
*
* @throws IOException
* Thrown, if the buffered data cannot be pushed into the output streams.
*/
public void flushOutputs() throws IOException {
for (RecordWriterOutput<?> streamOutput : getStreamOutputs()) {
streamOutput.flush();
}
} | 3.26 |
flink_OperatorChain_close_rdh | /**
* This method releases all resources of the record writer output. It stops the output flushing
* thread (if there is one) and releases all buffers currently held by the output serializers.
*
* <p>This method should never fail.
*/
public void close() throws
IOException {
closer.close(); } | 3.26 |
flink_OperatorChain_getOperatorRecordsOutCounter_rdh | /**
* Get the numRecordsOut counter for the operator represented by the given config. And re-use
* the operator-level counter for the task-level numRecordsOut counter if this operator is at
* the end of the operator chain.
*
* <p>Return null if we should not use the numRecordsOut counter to track the records emitted by
* this operator.
*/
@Nullable
private Counter getOperatorRecordsOutCounter(StreamTask<?, ?> containingTask, StreamConfig operatorConfig) {
ClassLoader userCodeClassloader = containingTask.getUserCodeClassLoader();
Class<StreamOperatorFactory<?>> streamOperatorFactoryClass = operatorConfig.getStreamOperatorFactoryClass(userCodeClassloader);
// Do not use the numRecordsOut counter on output if this operator is SinkWriterOperator.
//
// Metric "numRecordsOut" is defined as the total number of records written to the
// external system in FLIP-33, but this metric is occupied in AbstractStreamOperator as the
// number of records sent to downstream operators, which is number of Committable batches
// sent to SinkCommitter. So we skip registering this metric on output and leave this metric
// to sink writer implementations to report.
try {
Class<?> sinkWriterFactoryClass = userCodeClassloader.loadClass(SinkWriterOperatorFactory.class.getName());
if (sinkWriterFactoryClass.isAssignableFrom(streamOperatorFactoryClass)) {
return null;
}
} catch
(ClassNotFoundException e) {
throw new StreamTaskException("Could not load SinkWriterOperatorFactory class from userCodeClassloader.", e);
}
InternalOperatorMetricGroup operatorMetricGroup = containingTask.getEnvironment().getMetricGroup().getOrAddOperator(operatorConfig.getOperatorID(), operatorConfig.getOperatorName());
return operatorMetricGroup.getIOMetricGroup().getNumRecordsOutCounter();
} | 3.26 |
flink_OperatorChain_createOperatorChain_rdh | /**
* Recursively create chain of operators that starts from the given {@param operatorConfig}.
* Operators are created tail to head and wrapped into an {@link WatermarkGaugeExposingOutput}.
*/
private <IN, OUT> WatermarkGaugeExposingOutput<StreamRecord<IN>> createOperatorChain(StreamTask<OUT, ?> containingTask, StreamConfig prevOperatorConfig, StreamConfig operatorConfig, Map<Integer, StreamConfig> chainedConfigs, ClassLoader userCodeClassloader, Map<IntermediateDataSetID, RecordWriterOutput<?>> recordWriterOutputs, List<StreamOperatorWrapper<?, ?>> allOperatorWrappers, OutputTag<IN> outputTag, MailboxExecutorFactory mailboxExecutorFactory, boolean shouldAddMetricForPrevOperator) {
// create the output that the operator writes to first. this may recursively create more
// operators
WatermarkGaugeExposingOutput<StreamRecord<OUT>> chainedOperatorOutput = m4(containingTask, operatorConfig, chainedConfigs, userCodeClassloader, recordWriterOutputs, allOperatorWrappers, mailboxExecutorFactory, true);
OneInputStreamOperator<IN, OUT> chainedOperator = createOperator(containingTask, operatorConfig, userCodeClassloader, chainedOperatorOutput, allOperatorWrappers, false);
return wrapOperatorIntoOutput(chainedOperator, containingTask, prevOperatorConfig, operatorConfig, userCodeClassloader, outputTag, shouldAddMetricForPrevOperator);
} | 3.26 |
flink_OperatorChain_createChainOutputs_rdh | // ------------------------------------------------------------------------
// initialization utilities
// ------------------------------------------------------------------------
private void createChainOutputs(List<NonChainedOutput> outputsInOrder, RecordWriterDelegate<SerializationDelegate<StreamRecord<OUT>>> recordWriterDelegate, Map<Integer, StreamConfig> chainedConfigs, StreamTask<OUT, OP> containingTask, Map<IntermediateDataSetID, RecordWriterOutput<?>> recordWriterOutputs) {
for (int i = 0; i < outputsInOrder.size(); ++i) {
NonChainedOutput output = outputsInOrder.get(i);
RecordWriterOutput<?> recordWriterOutput = createStreamOutput(recordWriterDelegate.getRecordWriter(i), output, chainedConfigs.get(output.getSourceNodeId()), containingTask.getEnvironment());
this.streamOutputs[i] = recordWriterOutput;
recordWriterOutputs.put(output.getDataSetId(), recordWriterOutput);
}
} | 3.26 |
flink_OperatorChain_getAllOperators_rdh | /**
* Returns an {@link Iterable} which traverses all operators in forward or reverse topological
* order.
*/
protected Iterable<StreamOperatorWrapper<?, ?>> getAllOperators(boolean reverse) {
return reverse ? new StreamOperatorWrapper.ReadIterator(tailOperatorWrapper, true) : new StreamOperatorWrapper.ReadIterator(mainOperatorWrapper, false);
} | 3.26 |
flink_SystemClock_absoluteTimeMillis_rdh | // ------------------------------------------------------------------------
@Override
public long absoluteTimeMillis() {
return System.currentTimeMillis();
} | 3.26 |
flink_SharedObjects_create_rdh | /**
* Creates a new instance. Usually that should be done inside a JUnit test class as an
* instance-field annotated with {@link org.junit.Rule}.
*/
public static SharedObjects create()
{
return new SharedObjects(LAST_ID.getAndIncrement());
} | 3.26 |
flink_SharedObjects_add_rdh | /**
* Adds a new object to this {@code SharedObjects}. Although not necessary, it is recommended to
* only access the object through the returned {@link SharedReference}.
*/
public <T> SharedReference<T>
add(T object) {
SharedReference<T> tag = new DefaultTag<>(id, objects.size());
objects.put(tag, object);
return tag;
} | 3.26 |
flink_DynamicTableFactory_getPrimaryKeyIndexes_rdh | /**
* Returns the primary key indexes, if any, otherwise returns an empty array. A factory can
* use it to compute the schema projection of the key fields with {@code Projection.of(ctx.getPrimaryKeyIndexes()).project(dataType)}.
*
* <p>Shortcut for {@code getCatalogTable().getResolvedSchema().getPrimaryKeyIndexes()}.
*
* @see ResolvedSchema#getPrimaryKeyIndexes()
*/
default int[]
getPrimaryKeyIndexes() {
return m0().getResolvedSchema().getPrimaryKeyIndexes();
} | 3.26 |
flink_DynamicTableFactory_forwardOptions_rdh | /**
* Returns a set of {@link ConfigOption} that are directly forwarded to the runtime
* implementation but don't affect the final execution topology.
*
* <p>Options declared here can override options of the persisted plan during an enrichment
* phase. Since a restored topology is static, an implementer has to ensure that the declared
* options don't affect fundamental abilities such as {@link SupportsProjectionPushDown} or
* {@link SupportsFilterPushDown}.
*
* <p>For example, given a database connector, if an option defines the connection timeout,
* changing this value does not affect the pipeline topology and can be allowed. However, an
* option that defines whether the connector supports {@link SupportsReadingMetadata} or not is
* not allowed. The planner might not react to changed abilities anymore.
*
* @see DynamicTableFactory.Context#getEnrichmentOptions()
* @see TableFactoryHelper#getOptions()
* @see FormatFactory#forwardOptions()
*/
default Set<ConfigOption<?>> forwardOptions() {
return Collections.emptySet();
} | 3.26 |
flink_DynamicTableFactory_getEnrichmentOptions_rdh | /**
* Returns a map of options that can enrich the options of the original {@link #getCatalogTable()} during a plan restore.
*
* <p>If and only if {@code table.plan.restore.catalog-objects} is set to {@code ALL}, this
* method may return a non-empty {@link Map} of options retrieved from the {@link Catalog}.
*
* <p>Because only the {@link DynamicTableFactory} is able to decide which options are safe
* to be forwarded without affecting the original topology, enrichment options are exposed
* through this method. In general, it's highly recommended using the {@link FactoryUtil#createTableFactoryHelper(DynamicTableFactory, Context)} to merge the options
* and then get the result with {@link TableFactoryHelper#getOptions()}. The helper
* considers both {@link #forwardOptions()} and {@link FormatFactory#forwardOptions()}.
*
* <p>Since a restored topology is static, an implementer has to ensure that the declared
* options don't affect fundamental abilities. The planner might not react to changed
* abilities anymore.
*
* @see TableFactoryHelper
*/
default Map<String, String> getEnrichmentOptions() {
return Collections.emptyMap();
} | 3.26 |
flink_DynamicTableFactory_getPhysicalRowDataType_rdh | /**
* Returns the physical schema to use for encoding and decoding records. The returned row
* data type contains only physical columns. It does not include computed or metadata
* columns. A factory can use the returned data type to configure the table connector, and
* can manipulate it using the {@link DataType} static methods:
*
* <pre>{@code // Project some fields into a new data type
* DataType projectedDataType = Projection.of(projectedIndexes)
* .project(context.getPhysicalRowDataType());
*
* // Create key data type
* DataType keyDataType = Projection.of(context.getPrimaryKeyIndexes())
* .project(context.getPhysicalRowDataType());
*
* // Create a new data type filtering columns of the original data type
* DataType myOwnDataType = DataTypes.ROW(
* DataType.getFields(context.getPhysicalRowDataType())
* .stream()
* .filter(myFieldFilterPredicate)
* .toArray(DataTypes.Field[]::new))}</pre>
*
* <p>Shortcut for {@code getCatalogTable().getResolvedSchema().toPhysicalRowDataType()}.
*
* @see ResolvedSchema#toPhysicalRowDataType()
*/
default DataType getPhysicalRowDataType() {
return m0().getResolvedSchema().toPhysicalRowDataType();
} | 3.26 |
flink_IntMinimum_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "IntMinimum " + this.min;
} | 3.26 |
flink_IntMinimum_add_rdh | // ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(int value) {
this.min = Math.min(this.min, value);
} | 3.26 |
flink_TempBarrier_getIterator_rdh | /**
* This method resets the input!
*
* @see org.apache.flink.runtime.operators.util.CloseableInputProvider#getIterator()
*/
@Override
public MutableObjectIterator<T> getIterator() throws InterruptedException, IOException {
synchronized(this.lock) {
while ((this.exception == null) &&
(!this.writingDone))
{
this.lock.wait(5000);
}
}
if (this.exception != null) {
throw new RuntimeException("An error occurred creating the temp table.", this.exception);
} else if (this.writingDone) {
final DataInputView v0 = this.buffer.flip();
return new InputViewIterator<>(v0, this.serializer);
} else {
return null;
}
} | 3.26 |
flink_TempBarrier_startReading_rdh | // --------------------------------------------------------------------------------------------
public void startReading() {
this.tempWriter.start();
} | 3.26 |
flink_SplitAssignmentTracker_getAndRemoveUncheckpointedAssignment_rdh | /**
* This method is invoked when a source reader fails over. In this case, the source reader will
* restore its split assignment to the last successful checkpoint. Any split assignment to that
* source reader after the last successful checkpoint will be lost on the source reader side as
* if those splits were never assigned. To handle this case, the coordinator needs to find those
* splits and return them back to the SplitEnumerator for re-assignment.
*
* @param subtaskId
* the subtask id of the reader that failed over.
* @param restoredCheckpointId
* the ID of the checkpoint that the reader was restored to.
* @return A list of splits that needs to be added back to the {@link SplitEnumerator}.
*/
public List<SplitT> getAndRemoveUncheckpointedAssignment(int subtaskId, long restoredCheckpointId) {
final ArrayList<SplitT> splits = new ArrayList<>();
for (final Map.Entry<Long, Map<Integer, LinkedHashSet<SplitT>>> entry : assignmentsByCheckpointId.entrySet())
{
if (entry.getKey() > restoredCheckpointId) {
removeFromAssignment(subtaskId, entry.getValue(), splits);
}
}
removeFromAssignment(subtaskId, uncheckpointedAssignments, splits);
return splits;
} | 3.26 |
flink_SplitAssignmentTracker_onCheckpointComplete_rdh | /**
* when a checkpoint has been successfully made, this method is invoked to clean up the
* assignment history before this successful checkpoint.
*
* @param checkpointId
* the id of the successful checkpoint.
*/
public void onCheckpointComplete(long checkpointId) {
assignmentsByCheckpointId.entrySet().removeIf(entry -> entry.getKey() <= checkpointId);
} | 3.26 |
flink_SplitAssignmentTracker_recordSplitAssignment_rdh | /**
* Record a new split assignment.
*
* @param splitsAssignment
* the new split assignment.
*/
public void recordSplitAssignment(SplitsAssignment<SplitT> splitsAssignment) {
addSplitAssignment(splitsAssignment, uncheckpointedAssignments);
} | 3.26 |
flink_SplitAssignmentTracker_onCheckpoint_rdh | /**
* Behavior of SplitAssignmentTracker on checkpoint. Tracker will mark uncheckpointed assignment
* as checkpointed with current checkpoint ID.
*
* @param checkpointId
* the id of the ongoing checkpoint
*/
public void onCheckpoint(long checkpointId) throws Exception {
// Include the uncheckpointed assignments to the snapshot.
assignmentsByCheckpointId.put(checkpointId, uncheckpointedAssignments);
uncheckpointedAssignments = new HashMap<>();
} | 3.26 |
flink_SplitAssignmentTracker_removeFromAssignment_rdh | // -------------- private helpers ---------------
private void removeFromAssignment(int subtaskId, Map<Integer, LinkedHashSet<SplitT>> assignments, List<SplitT> toPutBack) {
Set<SplitT> splitForSubtask = assignments.remove(subtaskId);
if (splitForSubtask != null) {
toPutBack.addAll(splitForSubtask);
}
} | 3.26 |
flink_SplitAssignmentTracker_assignmentsByCheckpointId_rdh | // ------------- Methods visible for testing ----------------
@VisibleForTesting
SortedMap<Long, Map<Integer, LinkedHashSet<SplitT>>> assignmentsByCheckpointId() {
return assignmentsByCheckpointId;
} | 3.26 |
flink_HiveDDLUtils_validateConstraint_rdh | // returns a constraint trait that requires VALIDATE
public static byte validateConstraint(byte trait) {return ((byte) (trait | HIVE_CONSTRAINT_VALIDATE));
} | 3.26 |
flink_HiveDDLUtils_relyConstraint_rdh | // returns a constraint trait that requires RELY
public static byte relyConstraint(byte trait) {
return ((byte) (trait | HIVE_CONSTRAINT_RELY));
} | 3.26 |
flink_HiveDDLUtils_noValidateConstraint_rdh | // returns a constraint trait that doesn't require VALIDATE
public static byte noValidateConstraint(byte trait) {
return ((byte) (trait & (~HIVE_CONSTRAINT_VALIDATE)));
} | 3.26 |
flink_HiveDDLUtils_requireValidateConstraint_rdh | // returns whether a trait requires VALIDATE constraint
public static boolean requireValidateConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_VALIDATE) != 0;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.