name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_BinaryStringData_compareTo_rdh | /**
* Compares two strings lexicographically. Since UTF-8 uses groups of six bits, it is sometimes
* useful to use octal notation which uses 3-bit groups. With a calculator which can convert
* between hexadecimal and octal it can be easier to manually create or interpret UTF-8 compared
* with using binary. So we just compare the binary.
*/
@Override
public int
compareTo(@Nonnull
StringData
o) {
// BinaryStringData is the only implementation of StringData
BinaryStringData other = ((BinaryStringData) (o));
if ((javaObject != null) && (other.javaObject != null)) {
return javaObject.compareTo(other.javaObject); }
ensureMaterialized();
other.ensureMaterialized();
if ((segments.length == 1) && (other.binarySection.segments.length == 1)) {
int len = Math.min(binarySection.sizeInBytes, other.binarySection.sizeInBytes);
MemorySegment seg1 = binarySection.segments[0];
MemorySegment seg2 = other.binarySection.segments[0];
for (int i = 0; i < len; i++) {
int res = (seg1.get(binarySection.offset + i) & 0xff) - (seg2.get(other.binarySection.offset + i) & 0xff);
if (res != 0) {
return res;
}
}
return binarySection.sizeInBytes - other.binarySection.sizeInBytes;
}
// if there are multi segments.
return compareMultiSegments(other);
} | 3.26 |
flink_BinaryStringData_compareMultiSegments_rdh | /**
* Find the boundaries of segments, and then compare MemorySegment.
*/
private int compareMultiSegments(BinaryStringData other) {
if ((binarySection.sizeInBytes == 0) || (other.binarySection.sizeInBytes == 0)) {return binarySection.sizeInBytes - other.binarySection.sizeInBytes;
}
int len = Math.min(binarySection.sizeInBytes, other.binarySection.sizeInBytes);
MemorySegment seg1 = binarySection.segments[0];
MemorySegment seg2 = other.binarySection.segments[0];
int segmentSize = binarySection.segments[0].size();
int otherSegmentSize = other.binarySection.segments[0].size();
int sizeOfFirst1
= segmentSize - binarySection.offset;
int sizeOfFirst2 = otherSegmentSize - other.binarySection.offset;
int varSegIndex1 = 1;int varSegIndex2 = 1;
// find the first segment of this string.
while (sizeOfFirst1 <= 0) {sizeOfFirst1 += segmentSize;
seg1 = binarySection.segments[varSegIndex1++];
}
while (sizeOfFirst2 <= 0) {
sizeOfFirst2 += otherSegmentSize;
seg2 = other.binarySection.segments[varSegIndex2++];
}
int offset1 = segmentSize - sizeOfFirst1;
int offset2 = otherSegmentSize - sizeOfFirst2;
int needCompare = Math.min(Math.min(sizeOfFirst1, sizeOfFirst2), len);
while (needCompare > 0) {
// compare in one segment.
for (int i = 0; i < needCompare; i++) {
int res = (seg1.get(offset1 + i) & 0xff) - (seg2.get(offset2 + i) & 0xff);
if (res != 0) { return res;
}
}
if (needCompare == len) {
break;
}
len -= needCompare;
// next segment
if (sizeOfFirst1 < sizeOfFirst2) {
// I am smaller
seg1 = binarySection.segments[varSegIndex1++];
offset1 = 0;
offset2 += needCompare;
sizeOfFirst1 = segmentSize;
sizeOfFirst2 -= needCompare;
} else if (sizeOfFirst1 > sizeOfFirst2) {
// other is smaller
seg2 = other.binarySection.segments[varSegIndex2++];
offset2 = 0;
offset1 += needCompare;
sizeOfFirst2 =
otherSegmentSize;
sizeOfFirst1 -= needCompare;
} else {
// same, should go ahead both.
seg1 = binarySection.segments[varSegIndex1++];
seg2 = other.binarySection.segments[varSegIndex2++];
offset1 = 0;
offset2 = 0;
sizeOfFirst1 =
segmentSize;
sizeOfFirst2 = otherSegmentSize;
}
needCompare = Math.min(Math.min(sizeOfFirst1, sizeOfFirst2), len);
}
checkArgument(needCompare == len);
return binarySection.sizeInBytes - other.binarySection.sizeInBytes;
} | 3.26 |
flink_BinaryStringData_fromString_rdh | /**
* Creates a {@link BinaryStringData} instance from the given Java string.
*/
public static BinaryStringData fromString(String str) {
if (str == null) {
return null;
} else {
return
new BinaryStringData(str);
}
} | 3.26 |
flink_BinaryStringData_toUpperCase_rdh | /**
* Converts all of the characters in this {@code BinaryStringData} to upper case.
*
* @return the {@code BinaryStringData}, converted to uppercase.
*/
public BinaryStringData toUpperCase() {
if (javaObject != null) {
return javaToUpperCase();
}
if (binarySection.sizeInBytes == 0) {
return EMPTY_UTF8;
}
int
size = binarySection.segments[0].size();
BinaryStringData.SegmentAndOffset segmentAndOffset = startSegmentAndOffset(size);
byte[] bytes
= new byte[binarySection.sizeInBytes];
bytes[0] = ((byte) (Character.toTitleCase(segmentAndOffset.value())));for (int i = 0; i < binarySection.sizeInBytes; i++) {
byte b = segmentAndOffset.value();
if (numBytesForFirstByte(b) != 1) {
// fallback
return
javaToUpperCase();
}
int upper = Character.toUpperCase(((int) (b)));
if (upper > 127) {
// fallback
return javaToUpperCase();
}
bytes[i] = ((byte) (upper));
segmentAndOffset.nextByte(size);
}
return fromBytes(bytes);
} | 3.26 |
flink_BinaryStringData_fromAddress_rdh | // ------------------------------------------------------------------------------------------
// Construction Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates a {@link BinaryStringData} instance from the given address (base and offset) and
* length.
*/public
static BinaryStringData fromAddress(MemorySegment[] segments, int offset, int numBytes) {return new BinaryStringData(segments, offset, numBytes);
} | 3.26 |
flink_BinaryStringData_contains_rdh | /**
* Returns true if and only if this BinaryStringData contains the specified sequence of bytes
* values.
*
* @param s
* the sequence to search for
* @return true if this BinaryStringData contains {@code s}, false otherwise
*/
public boolean contains(final BinaryStringData s) {
ensureMaterialized();
s.ensureMaterialized();
if (s.binarySection.sizeInBytes == 0) {
return true;
}
int v46 = BinarySegmentUtils.find(binarySection.segments, binarySection.offset, binarySection.sizeInBytes, s.binarySection.segments, s.binarySection.offset, s.binarySection.sizeInBytes);
return v46 != (-1);
}
/**
* Tests if this BinaryStringData starts with the specified prefix.
*
* @param prefix
* the prefix.
* @return {@code true} if the bytes represented by the argument is a prefix of the bytes
represented by this string; {@code false} otherwise. Note also that {@code true} will be
returned if the argument is an empty BinaryStringData or is equal to this {@code BinaryStringData} object as determined by the {@link #equals(Object)} | 3.26 |
flink_HadoopDataInputStream_forceSeek_rdh | /**
* Positions the stream to the given location. In contrast to {@link #seek(long)}, this method
* will always issue a "seek" command to the dfs and may not replace it by {@link #skip(long)}
* for small seeks.
*
* <p>Notice that the underlying DFS implementation can still decide to do skip instead of seek.
*
* @param seekPos
* the position to seek to.
* @throws IOException
*/
public void forceSeek(long seekPos) throws IOException {
fsDataInputStream.seek(seekPos);
} | 3.26 |
flink_HadoopDataInputStream_skipFully_rdh | /**
* Skips over a given amount of bytes in the stream.
*
* @param bytes
* the number of bytes to skip.
* @throws IOException
*/
public void skipFully(long bytes) throws IOException {
while (bytes > 0) {
bytes -= fsDataInputStream.skip(bytes);
}
} | 3.26 |
flink_HadoopDataInputStream_getHadoopInputStream_rdh | /**
* Gets the wrapped Hadoop input stream.
*
* @return The wrapped Hadoop input stream.
*/
public FSDataInputStream
getHadoopInputStream() {
return fsDataInputStream;
} | 3.26 |
flink_StrategyUtils_findDataType_rdh | /**
* Finds a data type that is close to the given data type in terms of nullability and conversion
* class but of the given logical root.
*/
static Optional<DataType> findDataType(CallContext callContext, boolean throwOnFailure, DataType actualDataType, LogicalTypeRoot expectedRoot, @Nullable
Boolean expectedNullability) {
final LogicalType actualType = actualDataType.getLogicalType();
return // check if type can be implicitly casted
// preserve bridging class if possible
// set nullability
Optional.ofNullable(findDataTypeOfRoot(actualDataType, expectedRoot)).map(newDataType -> {
if (Objects.equals(expectedNullability, Boolean.TRUE)) {
return newDataType.nullable();
} else if (Objects.equals(expectedNullability, Boolean.FALSE)) {
return newDataType.notNull();
} else
if (actualType.isNullable()) {return newDataType.nullable();
}
return newDataType.notNull();
}).map(newDataType -> {
final Class<?> clazz = actualDataType.getConversionClass();
final LogicalType newType
= newDataType.getLogicalType();
if (newType.supportsOutputConversion(clazz)) {
return newDataType.bridgedTo(clazz);
}
return newDataType;
}).filter(newDataType -> {
if (supportsImplicitCast(actualType, newDataType.getLogicalType())) {
return true;
}
if (throwOnFailure) {
throw callContext.newValidationError("Unsupported argument type. Expected type root '%s' but actual type was '%s'.", expectedRoot, actualType);
}
return false;});} | 3.26 |
flink_StrategyUtils_findDataTypeOfRoot_rdh | /**
* Returns a data type for the given data type and expected root.
*
* <p>This method is aligned with {@link LogicalTypeCasts#supportsImplicitCast(LogicalType,
* LogicalType)}.
*
* <p>The "fallback" data type for each root represents the default data type for a NULL
* literal. NULL literals will receive the smallest precision possible for having little impact
* when finding a common type. The output of this method needs to be checked again if an
* implicit cast is supported.
*/
@Nullable
private static DataType findDataTypeOfRoot(DataType actualDataType, LogicalTypeRoot
expectedRoot) {
final LogicalType actualType = actualDataType.getLogicalType();
if (actualType.is(expectedRoot)) {
return actualDataType;
}
switch (expectedRoot) {
case CHAR :
return DataTypes.CHAR(CharType.DEFAULT_LENGTH);
case VARCHAR :if (actualType.is(CHAR)) {
return DataTypes.VARCHAR(getLength(actualType));
}
return DataTypes.VARCHAR(VarCharType.DEFAULT_LENGTH);
case BOOLEAN :
return DataTypes.BOOLEAN();
case BINARY :
return DataTypes.BINARY(BinaryType.DEFAULT_LENGTH);
case VARBINARY :
if (actualType.is(BINARY)) {
return DataTypes.VARBINARY(getLength(actualType));
}
return DataTypes.VARBINARY(VarBinaryType.DEFAULT_LENGTH);
case DECIMAL :
if (actualType.is(EXACT_NUMERIC)) {
return DataTypes.DECIMAL(getPrecision(actualType), getScale(actualType));
} else if (actualType.is(APPROXIMATE_NUMERIC)) {
final int precision = getPrecision(actualType);
// we don't know where the precision occurs (before or after the dot)
return DataTypes.DECIMAL(precision * 2, precision);
}
return DataTypes.DECIMAL(DecimalType.MIN_PRECISION, DecimalType.MIN_SCALE);
case TINYINT :
return
DataTypes.TINYINT();
case
SMALLINT :
return DataTypes.SMALLINT();case INTEGER :
return DataTypes.INT();
case BIGINT :
return DataTypes.BIGINT();
case FLOAT :
return DataTypes.FLOAT();
case DOUBLE :
return DataTypes.DOUBLE();
case DATE :
return DataTypes.DATE();
case TIME_WITHOUT_TIME_ZONE :
if (actualType.is(TIMESTAMP_WITHOUT_TIME_ZONE)) {
return DataTypes.TIME(getPrecision(actualType));
}return DataTypes.TIME();
case TIMESTAMP_WITHOUT_TIME_ZONE :
return DataTypes.TIMESTAMP();
case TIMESTAMP_WITH_TIME_ZONE :
return DataTypes.TIMESTAMP_WITH_TIME_ZONE();
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE();
case INTERVAL_YEAR_MONTH :
return DataTypes.INTERVAL(DataTypes.MONTH());
case INTERVAL_DAY_TIME :
return DataTypes.INTERVAL(DataTypes.SECOND());
case NULL :
return DataTypes.NULL();
case ARRAY :
case MULTISET :
case MAP :
case ROW :
case DISTINCT_TYPE :
case STRUCTURED_TYPE :
case RAW :
case SYMBOL :
case UNRESOLVED :
default :
return null;
}
} | 3.26 |
flink_InstantiationUtil_m0_rdh | /**
* Clones the given writable using the {@link IOReadableWritable serialization}.
*
* @param original
* Object to clone
* @param <T>
* Type of the object to clone
* @return Cloned object
* @throws IOException
* Thrown is the serialization fails.
*/
public static <T extends IOReadableWritable> T m0(T original) throws IOException {
if (original == null) {
return null;
}
final ByteArrayOutputStream
baos = new ByteArrayOutputStream();
try (DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(baos)) {
original.write(out);
}
final ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
try (DataInputViewStreamWrapper in = new DataInputViewStreamWrapper(bais)) {
@SuppressWarnings("unchecked")
T copy = ((T) (instantiate(original.getClass())));
copy.read(in);return copy;
}
} | 3.26 |
flink_InstantiationUtil_clone_rdh | /**
* Clones the given serializable object using Java serialization, using the given classloader to
* resolve the cloned classes.
*
* @param obj
* Object to clone
* @param classLoader
* The classloader to resolve the classes during deserialization.
* @param <T>
* Type of the object to clone
* @return Cloned object
* @throws IOException
* Thrown if the serialization or deserialization process fails.
* @throws ClassNotFoundException
* Thrown if any of the classes referenced by the object cannot
* be resolved during deserialization.
*/
public static <T extends Serializable> T clone(T obj, ClassLoader classLoader) throws IOException, ClassNotFoundException {
if (obj == null) {
return null;
} else {
final byte[] v30 = serializeObject(obj);
return deserializeObject(v30, classLoader);
}
} | 3.26 |
flink_InstantiationUtil_isProperClass_rdh | /**
* Checks, whether the class is a proper class, i.e. not abstract or an interface, and not a
* primitive type.
*
* @param clazz
* The class to check.
* @return True, if the class is a proper class, false otherwise.
*/
public static boolean isProperClass(Class<?> clazz) {
int mods = clazz.getModifiers();
return !((Modifier.isAbstract(mods) || Modifier.isInterface(mods)) || Modifier.isNative(mods));} | 3.26 |
flink_InstantiationUtil_hasPublicNullaryConstructor_rdh | /**
* Checks, whether the given class has a public nullary constructor.
*
* @param clazz
* The class to check.
* @return True, if the class has a public nullary constructor, false if not.
*/
public static boolean hasPublicNullaryConstructor(Class<?> clazz) {
Constructor<?>[] constructors = clazz.getConstructors();
for (Constructor<?> constructor : constructors) {
if ((constructor.getParameterCount() == 0) && Modifier.isPublic(constructor.getModifiers())) {
return true;
}
}
return false;
} | 3.26 |
flink_InstantiationUtil_isNonStaticInnerClass_rdh | /**
* Checks, whether the class is an inner class that is not statically accessible. That is
* especially true for anonymous inner classes.
*
* @param clazz
* The class to check.
* @return True, if the class is a non-statically accessible inner class.
*/
public static boolean isNonStaticInnerClass(Class<?> clazz) {
return (clazz.getEnclosingClass() != null) && ((clazz.getDeclaringClass() == null) || (!Modifier.isStatic(clazz.getModifiers())));
} | 3.26 |
flink_InstantiationUtil_checkForInstantiation_rdh | /**
* Performs a standard check whether the class can be instantiated by {@code Class#newInstance()}.
*
* @param clazz
* The class to check.
* @throws RuntimeException
* Thrown, if the class cannot be instantiated by {@code Class#newInstance()}.
*/
public static void checkForInstantiation(Class<?> clazz) {
final String errorMessage = checkForInstantiationError(clazz);
if (errorMessage !=
null) {
throw new RuntimeException((("The class '" + clazz.getName()) + "' is not instantiable: ") + errorMessage);
}
} | 3.26 |
flink_InstantiationUtil_resolveClassByName_rdh | /**
* Loads a class by name from the given input stream and reflectively instantiates it.
*
* <p>This method will use {@link DataInputView#readUTF()} to read the class name, and then
* attempt to load the class from the given ClassLoader.
*
* <p>The resolved class is checked to be equal to or a subtype of the given supertype class.
*
* @param in
* The stream to read the class name from.
* @param cl
* The class loader to resolve the class.
* @param supertype
* A class that the resolved class must extend.
* @throws IOException
* Thrown, if the class name could not be read, the class could not be
* found, or the class is not a subtype of the given supertype class.
*/
public static <T>
Class<T> resolveClassByName(DataInputView in, ClassLoader cl, Class<? super T> supertype) throws IOException {
final String className = in.readUTF();
final Class<?> rawClazz;
try {
rawClazz = Class.forName(className, false, cl);
} catch (ClassNotFoundException e) {String error = ("Could not find class '" + className) + "' in classpath.";
if (className.contains("SerializerConfig")) {
error += ((" TypeSerializerConfigSnapshot and it's subclasses are not supported since Flink 1.17." + " If you are using built-in serializers, please first migrate to Flink 1.16.") + " If you are using custom serializers, please migrate them to") + " TypeSerializerSnapshot using Flink 1.16.";
}
throw new IOException(error, e);
}
if (!supertype.isAssignableFrom(rawClazz)) {
throw new IOException((("The class " + className) + " is not a subclass of ") + supertype.getName());
}
@SuppressWarnings("unchecked")
Class<T> clazz = ((Class<T>) (rawClazz));
return clazz;
} | 3.26 |
flink_InstantiationUtil_instantiate_rdh | /**
* Creates a new instance of the given class.
*
* @param <T>
* The generic type of the class.
* @param clazz
* The class to instantiate.
* @return An instance of the given class.
* @throws RuntimeException
* Thrown, if the class could not be instantiated. The exception
* contains a detailed message about the reason why the instantiation failed.
*/
public static <T> T instantiate(Class<T> clazz) {
if (clazz == null) {
throw new NullPointerException();
} // try to instantiate the class
try {
return clazz.newInstance();
} catch (InstantiationException | IllegalAccessException iex) {
// check for the common problem causes
checkForInstantiation(clazz);
// here we are, if non of the common causes was the problem. then the error was
// most likely an exception in the constructor or field initialization
throw new RuntimeException((("Could not instantiate type '" + clazz.getName()) + "' due to an unspecified exception: ") + iex.getMessage(), iex);
} catch (Throwable t) {
String message = t.getMessage();
throw new RuntimeException((("Could not instantiate type '" + clazz.getName()) + "' Most likely the constructor (or a member variable initialization) threw an exception") + (message == null ? "." : ": " + message),
t);}
} | 3.26 |
flink_InstantiationUtil_isPublic_rdh | /**
* Checks, whether the given class is public.
*
* @param clazz
* The class to check.
* @return True, if the class is public, false if not.
*/
public static boolean isPublic(Class<?> clazz) {
return Modifier.isPublic(clazz.getModifiers());} | 3.26 |
flink_InstantiationUtil_cloneUnchecked_rdh | /**
* Unchecked equivalent of {@link #clone(Serializable)}.
*
* @param obj
* Object to clone
* @param <T>
* Type of the object to clone
* @return The cloned object
*/
public static <T extends Serializable> T cloneUnchecked(T obj) {
try {
return clone(obj, obj.getClass().getClassLoader());
} catch (IOException | ClassNotFoundException
e) {
throw new RuntimeException(String.format("Unable to clone instance of %s.", obj.getClass().getName()),
e);}
} | 3.26 |
flink_StreamTaskSourceInput_checkpointStarted_rdh | /**
* This method is used with unaligned checkpoints to mark the arrival of a first {@link CheckpointBarrier}. For chained sources, there is no {@link CheckpointBarrier} per se flowing
* through the job graph. We can assume that an imaginary {@link CheckpointBarrier} was produced
* by the source, at any point of time of our choosing.
*
* <p>We are choosing to interpret it, that {@link CheckpointBarrier} for sources was received
* immediately as soon as we receive either checkpoint start RPC, or {@link CheckpointBarrier}
* from a network input. So that we can checkpoint state of the source and all of the other
* operators at the same time.
*
* <p>Also we are choosing to block the source, as a best effort optimisation as: - either there
* is no backpressure and the checkpoint "alignment" will happen very quickly anyway - or there
* is a backpressure, and it's better to prioritize processing data from the network to speed up
* checkpointing. From the cluster resource utilisation perspective, by blocking chained source
* doesn't block any resources from being used, as this task running the source has a backlog of
* buffered input data waiting to be processed.
*
* <p>However from the correctness point of view, {@link #checkpointStarted(CheckpointBarrier)}
* and {@link #checkpointStopped(long)} methods could be empty no-op.
*/@Override
public void checkpointStarted(CheckpointBarrier barrier) {
blockConsumption(null);
} | 3.26 |
flink_ResultSubpartition_onConsumedSubpartition_rdh | /**
* Notifies the parent partition about a consumed {@link ResultSubpartitionView}.
*/
protected void onConsumedSubpartition() {
f0.onConsumedSubpartition(getSubPartitionIndex());
} | 3.26 |
flink_AbstractFileIOChannel_getChannelID_rdh | // --------------------------------------------------------------------------------------------
@Overridepublic final ID getChannelID() {
return
this.id;
} | 3.26 |
flink_DeclarativeAggregateFunction_mergeOperand_rdh | /**
* Merge input of {@link #mergeExpressions()}, the input are AGG buffer generated by user
* definition.
*/
public final UnresolvedReferenceExpression mergeOperand(UnresolvedReferenceExpression
aggBuffer) {
String name = String.valueOf(Arrays.asList(aggBufferAttributes()).indexOf(aggBuffer));
validateOperandName(name);
return unresolvedRef(name);
} | 3.26 |
flink_DeclarativeAggregateFunction_operand_rdh | /**
* Arg of accumulate and retract, the input value (usually obtained from a new arrived data).
*/
public final UnresolvedReferenceExpression operand(int i) {
String name =
String.valueOf(i);
if (m1().contains(name)) {
throw new IllegalStateException(String.format("Agg buffer name(%s) should not same to operands.", name));
}
return unresolvedRef(name);
} | 3.26 |
flink_DeclarativeAggregateFunction_operands_rdh | /**
* Args of accumulate and retract, the input value (usually obtained from a new arrived data).
*/
public final UnresolvedReferenceExpression[] operands() {
int operandCount = operandCount();
Preconditions.checkState(operandCount >= 0, "inputCount must be greater than or equal to 0.");
UnresolvedReferenceExpression[] ret = new UnresolvedReferenceExpression[operandCount];
for (int i = 0; i < operandCount; i++) {
String name = String.valueOf(i);
validateOperandName(name);
ret[i] = unresolvedRef(name);
}
return ret;
} | 3.26 |
flink_DeclarativeAggregateFunction_mergeOperands_rdh | /**
* Merge inputs of {@link #mergeExpressions()}, these inputs are agg buffer generated by user
* definition.
*/
public final UnresolvedReferenceExpression[] mergeOperands() {
UnresolvedReferenceExpression[] aggBuffers = aggBufferAttributes();
UnresolvedReferenceExpression[] ret = new UnresolvedReferenceExpression[aggBuffers.length];
for (int i = 0; i < aggBuffers.length; i++) {
String name = String.valueOf(i);
validateOperandName(name);
ret[i] = unresolvedRef(name);
}
return ret;
} | 3.26 |
flink_AbstractPythonStreamGroupAggregateOperator_onProcessingTime_rdh | /**
* Invoked when a processing-time timer fires.
*/
@Overridepublic void onProcessingTime(InternalTimer<RowData, VoidNamespace> timer) throws Exception {
if (stateCleaningEnabled) {
RowData v0 = timer.getKey();
long timestamp = timer.getTimestamp();
reuseTimerRowData.setLong(2, timestamp);
reuseTimerRowData.setField(3, v0);
udfInputTypeSerializer.serialize(reuseTimerRowData, baosWrapper);
pythonFunctionRunner.process(baos.toByteArray());
baos.reset();
elementCount++;
}
} | 3.26 |
flink_AbstractPythonStreamGroupAggregateOperator_onEventTime_rdh | /**
* Invoked when an event-time timer fires.
*/
@Override
public void onEventTime(InternalTimer<RowData, VoidNamespace> timer) {
} | 3.26 |
flink_ZooKeeperCheckpointStoreUtil_nameToCheckpointID_rdh | /**
* Converts a path to the checkpoint id.
*
* @param path
* in ZooKeeper
* @return Checkpoint id parsed from the path
*/
@Override
public long nameToCheckpointID(String path) {
try {
String numberString;// check if we have a leading slash
if ('/' == path.charAt(0)) {
numberString = path.substring(1);
} else {
numberString = path;
}
return Long.parseLong(numberString);
} catch (NumberFormatException e) {
f0.warn("Could not parse checkpoint id from {}. This indicates that the " + "checkpoint id to path conversion has changed.", path);
return INVALID_CHECKPOINT_ID;}
} | 3.26 |
flink_ZooKeeperCheckpointStoreUtil_checkpointIDToName_rdh | /**
* Convert a checkpoint id into a ZooKeeper path.
*
* @param checkpointId
* to convert to the path
* @return Path created from the given checkpoint id
*/
@Override
public String
checkpointIDToName(long checkpointId) {
return String.format("/%019d", checkpointId);
} | 3.26 |
flink_SourceReaderTestBase_testRead_rdh | /**
* Simply test the reader reads all the splits fine.
*/
@Test
void testRead() throws Exception {
try (SourceReader<Integer, SplitT> reader = createReader()) {
reader.addSplits(getSplits(numSplits, NUM_RECORDS_PER_SPLIT, Boundedness.BOUNDED));
ValidatingSourceOutput output = new ValidatingSourceOutput();
while (output.count < totalNumRecords) {
reader.pollNext(output);
}
output.validate();
}
} | 3.26 |
flink_DispatcherGateway_stopWithSavepointAndGetLocation_rdh | /**
* Stops the job with a savepoint, returning a future that completes with the savepoint location
* when the savepoint is completed.
*
* @param jobId
* the job id
* @param targetDirectory
* Target directory for the savepoint.
* @param savepointMode
* context of the savepoint operation
* @param timeout
* for the rpc call
* @return Future which is completed with the savepoint location once it is completed
*/
default CompletableFuture<String> stopWithSavepointAndGetLocation(JobID jobId, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, @RpcTimeout
final Time timeout) {throw new UnsupportedOperationException();
} | 3.26 |
flink_DispatcherGateway_triggerSavepointAndGetLocation_rdh | /**
* Triggers a savepoint with the given savepoint directory as a target, returning a future that
* completes with the savepoint location when it is complete.
*
* @param jobId
* the job id
* @param targetDirectory
* Target directory for the savepoint.
* @param formatType
* Binary format of the savepoint.
* @param savepointMode
* context of the savepoint operation
* @param timeout
* Timeout for the asynchronous operation
* @return Future which is completed once the operation is triggered successfully
*/
default CompletableFuture<String> triggerSavepointAndGetLocation(JobID jobId, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, @RpcTimeout
Time timeout) {
throw new UnsupportedOperationException();
} | 3.26 |
flink_FileIOChannel_getPathFile_rdh | /**
* Returns the path to the underlying temporary file as a File.
*/
public File getPathFile() {
return path;
} | 3.26 |
flink_FileIOChannel_getPath_rdh | /**
* Returns the path to the underlying temporary file.
*/
public String getPath() {
return path.getAbsolutePath();
} | 3.26 |
flink_Tuple7_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6), where
* the individual fields are the value returned by calling {@link Object#toString} on that
* field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",")
+ StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ")";
} | 3.26 |
flink_Tuple7_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple7)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple7 tuple = ((Tuple7) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null)
{
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple7_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
} | 3.26 |
flink_Tuple7_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6> Tuple7<T0, T1, T2, T3,
T4, T5, T6> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5,
T6 f6) {
return new Tuple7<>(f0, f1, f2, f3, f4, f5, f6);
} | 3.26 |
flink_IntValue_compareTo_rdh | // --------------------------------------------------------------------------------------------
@Override
public int compareTo(IntValue o) {
final int other = o.value;
return this.value < other ? -1 : this.value > other ? 1 : 0;
} | 3.26 |
flink_IntValue_getValue_rdh | /**
* Returns the value of the encapsulated int.
*
* @return the value of the encapsulated int.
*/
public int getValue() {return this.value;
} | 3.26 |
flink_IntValue_read_rdh | // --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
this.value = in.readInt();
} | 3.26 |
flink_IntValue_getMaxNormalizedKeyLen_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getMaxNormalizedKeyLen() {
return 4;} | 3.26 |
flink_IntValue_getBinaryLength_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() {
return 4;
} | 3.26 |
flink_IntValue_setValue_rdh | /**
* Sets the encapsulated int to the specified value.
*
* @param value
* the new value of the encapsulated int.
*/
public void setValue(int value) {
this.value = value;
} | 3.26 |
flink_ConnectedComponents_main_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String... args) throws Exception {
LOGGER.warn(DATASET_DEPRECATION_INFO);
// Checking input parameters
final ParameterTool params = ParameterTool.fromArgs(args);
// set up execution environment
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
final int maxIterations = params.getInt("iterations", 10);
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
// read vertex and edge data
DataSet<Long> vertices = getVertexDataSet(env, params);
DataSet<Tuple2<Long, Long>> edges = getEdgeDataSet(env, params).flatMap(new UndirectEdge());
// assign the initial components (equal to the vertex id)
DataSet<Tuple2<Long, Long>> verticesWithInitialId = vertices.map(new DuplicateValue<Long>());
// open a delta iteration
DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration = verticesWithInitialId.iterateDelta(verticesWithInitialId, maxIterations, 0);
// apply the step logic: join with the edges, select the minimum neighbor, update if the
// component of the candidate is smaller
DataSet<Tuple2<Long, Long>> changes = iteration.getWorkset().join(edges).where(0).equalTo(0).with(new NeighborWithComponentIDJoin()).groupBy(0).aggregate(Aggregations.MIN, 1).join(iteration.getSolutionSet()).where(0).equalTo(0).with(new ComponentIdFilter());
// close the delta iteration (delta and new workset are identical)
DataSet<Tuple2<Long, Long>> result = iteration.closeWith(changes, changes);
// emit result
if (params.has("output")) {
result.writeAsCsv(params.get("output"), "\n", " ");
// execute program
env.execute("Connected Components Example");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
result.print();
}
} | 3.26 |
flink_ConnectedComponents_getVertexDataSet_rdh | // *************************************************************************
// UTIL METHODS
// *************************************************************************
private static DataSet<Long> getVertexDataSet(ExecutionEnvironment env, ParameterTool params) {
if (params.has("vertices")) {
return env.readCsvFile(params.get("vertices")).types(Long.class).map(new MapFunction<Tuple1<Long>, Long>() {
public Long map(Tuple1<Long>
value) {
return value.f0;
} });} else {
System.out.println("Executing Connected Components example with default vertices data set.");
System.out.println("Use --vertices to specify file input.");
return ConnectedComponentsData.getDefaultVertexDataSet(env);
}
} | 3.26 |
flink_DataSinkNode_computeUnclosedBranchStack_rdh | // --------------------------------------------------------------------------------------------
// Branch Handling
// --------------------------------------------------------------------------------------------
@Override
public void computeUnclosedBranchStack() {
if (this.openBranches !=
null) {
return;
}
// we need to track open branches even in the sinks, because they get "closed" when
// we build a single "root" for the data flow plan
addClosedBranches(getPredecessorNode().closedBranchingNodes);
this.openBranches = getPredecessorNode().getBranchesForParent(this.input);
} | 3.26 |
flink_DataSinkNode_getOutgoingConnections_rdh | /**
* Gets all outgoing connections, which is an empty set for the data sink.
*
* @return An empty list.
*/
@Override
public List<DagConnection> getOutgoingConnections() {
return Collections.emptyList();
} | 3.26 |
flink_DataSinkNode_getAlternativePlans_rdh | // --------------------------------------------------------------------------------------------
// Recursive Optimization
// --------------------------------------------------------------------------------------------
@Override
public List<PlanNode> getAlternativePlans(CostEstimator estimator) {
// check if we have a cached version
if (this.cachedPlans != null) {
return this.cachedPlans;
}
// calculate alternative sub-plans for predecessor
List<? extends PlanNode> subPlans = getPredecessorNode().getAlternativePlans(estimator);
List<PlanNode> outputPlans = new ArrayList<PlanNode>();
final int parallelism = getParallelism();
final int inDop = getPredecessorNode().getParallelism();
final ExecutionMode executionMode = this.input.getDataExchangeMode();
final boolean dopChange = parallelism != inDop;
final boolean breakPipeline = this.input.isBreakingPipeline();
InterestingProperties ips =
this.input.getInterestingProperties();
for (PlanNode p : subPlans) {
for (RequestedGlobalProperties
gp : ips.getGlobalProperties()) {
for (RequestedLocalProperties lp : ips.getLocalProperties()) {
Channel c = new Channel(p);
gp.parameterizeChannel(c, dopChange, executionMode, breakPipeline); lp.parameterizeChannel(c);
c.setRequiredLocalProps(lp);
c.setRequiredGlobalProps(gp);
// no need to check whether the created properties meet what we need in case
// of ordering or global ordering, because the only interesting properties we
// have
// are what we require
outputPlans.add(new SinkPlanNode(this, ("DataSink (" + this.getOperator().getName()) + ")", c));
}
}
}
// cost and prune the plans
for (PlanNode node : outputPlans) {
estimator.costOperator(node);
}
prunePlanAlternatives(outputPlans);
this.cachedPlans = outputPlans; return outputPlans;
} | 3.26 |
flink_DataSinkNode_getInputConnection_rdh | // --------------------------------------------------------------------------------------
/**
* Gets the input of the sink.
*
* @return The input connection.
*/
public DagConnection getInputConnection() {
return this.input;
} | 3.26 |
flink_DataSinkNode_getOperator_rdh | /**
* Gets the operator for which this optimizer sink node was created.
*
* @return The node's underlying operator.
*/
@Override
public GenericDataSinkBase<?> getOperator() {
return ((GenericDataSinkBase<?>) (super.getOperator()));
} | 3.26 |
flink_DataSinkNode_accept_rdh | // Miscellaneous
// --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<OptimizerNode> visitor) {
if (visitor.preVisit(this)) {
if (getPredecessorNode() != null) {
getPredecessorNode().accept(visitor);
} else {
throw new CompilerException();
}
visitor.postVisit(this);
}
} | 3.26 |
flink_DataSinkNode_computeOperatorSpecificDefaultEstimates_rdh | /**
* Computes the estimated outputs for the data sink. Since the sink does not modify anything, it
* simply copies the output estimates from its direct predecessor.
*/
@Overrideprotected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
this.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();
this.estimatedOutputSize = getPredecessorNode().getEstimatedOutputSize();
} | 3.26 |
flink_StreamIterationHead_processInput_rdh | // ------------------------------------------------------------------------
@Override
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
StreamRecord<OUT> nextRecord
= (shouldWait) ? dataChannel.poll(iterationWaitTime, TimeUnit.MILLISECONDS) : dataChannel.take();
if (nextRecord != null) {
for (RecordWriterOutput<OUT> output : streamOutputs) {
output.collect(nextRecord);
}
} else {
controller.suspendDefaultAction();
mailboxProcessor.suspend();
}
} | 3.26 |
flink_StreamIterationHead_createBrokerIdString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Creates the identification string with which head and tail task find the shared blocking
* queue for the back channel. The identification string is unique per parallel head/tail pair
* per iteration per job.
*
* @param jid
* The job ID.
* @param iterationID
* The id of the iteration in the job.
* @param subtaskIndex
* The parallel subtask number
* @return The identification string.
*/
public static String createBrokerIdString(JobID jid, String iterationID, int subtaskIndex) {
return (((jid + "-") + iterationID) + "-") + subtaskIndex;
} | 3.26 |
flink_Tuple18_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17), where the individual fields are the value
* returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",")
+ StringUtils.arrayAwareToString(this.f1)) +
",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") +
StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") +
StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ")";
} | 3.26 |
flink_Tuple18_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> Tuple18<T0, T1, T2, T3, T4,
T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
T15, T16, T17> of(T0 f0, T1 f1, T2
f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17) {
return new Tuple18<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9,
f10, f11, f12, f13, f14, f15, f16, f17);
} | 3.26 |
flink_Tuple18_m0_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
* @param f14
* The value for field 14
* @param f15
* The value for field 15
* @param f16
* The value for field 16
* @param f17
* The value for field 17
*/
public void m0(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 =
f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
} | 3.26 |
flink_Tuple18_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple18)) {
return false;
}@SuppressWarnings("rawtypes")
Tuple18 tuple = ((Tuple18) (o));if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4)
: tuple.f4 !=
null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if
(f10 != null ? !f10.equals(tuple.f10)
: tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null)
{
return false;
}
if
(f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {return false;
}
if (f15 != null ?
!f15.equals(tuple.f15) : tuple.f15 != null) {return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
return true;
} | 3.26 |
flink_CompletedCheckpointStore_getLatestCheckpointId_rdh | /**
* Returns the id of the latest completed checkpoints.
*/
default long getLatestCheckpointId() {
try {List<CompletedCheckpoint> allCheckpoints =
getAllCheckpoints();
if (allCheckpoints.isEmpty()) {
return 0;
}
return allCheckpoints.get(allCheckpoints.size() - 1).getCheckpointID();
} catch (Throwable throwable) {
LOG.warn("Get the latest completed checkpoints failed", throwable);
return 0;
}
} | 3.26 |
flink_CompletedCheckpointStore_getLatestCheckpoint_rdh | /**
* Returns the latest {@link CompletedCheckpoint} instance or <code>null</code> if none was
* added.
*/
default CompletedCheckpoint getLatestCheckpoint() throws Exception {
List<CompletedCheckpoint> allCheckpoints = getAllCheckpoints();
if (allCheckpoints.isEmpty()) {
return null;
}
return allCheckpoints.get(allCheckpoints.size() -
1);
} | 3.26 |
flink_CatalogColumnStatistics_copy_rdh | /**
* Create a deep copy of "this" instance.
*
* @return a deep copy
*/
public CatalogColumnStatistics copy() {
Map<String, CatalogColumnStatisticsDataBase> copy = CollectionUtil.newHashMapWithExpectedSize(columnStatisticsData.size());
for (Map.Entry<String, CatalogColumnStatisticsDataBase> entry : columnStatisticsData.entrySet()) {
copy.put(entry.getKey(), entry.getValue().copy());
}
return new CatalogColumnStatistics(copy, new HashMap<>(this.properties));
} | 3.26 |
flink_ResourceCounter_getResourcesWithCount_rdh | /**
* Gets the stored resources and their counts. The counts are guaranteed to be positive (> 0).
*
* @return collection of {@link ResourceProfile} and count pairs
*/
public Collection<Map.Entry<ResourceProfile, Integer>> getResourcesWithCount() {
return resources.entrySet();
}
/**
* Checks whether resourceProfile is contained in this counter.
*
* @param resourceProfile
* resourceProfile to check whether it is contained
* @return {@code true} if the counter has a positive count for the given resourceProfile;
otherwise {@code false} | 3.26 |
flink_ResourceCounter_subtract_rdh | /**
* Subtracts decrement from the count of the given resourceProfile and returns the new value.
*
* @param resourceProfile
* resourceProfile from which to subtract decrement
* @param decrement
* decrement is the number by which to decrease resourceProfile
* @return new ResourceCounter containing the new value
*/
public ResourceCounter subtract(ResourceProfile resourceProfile, int decrement) {
final Map<ResourceProfile, Integer> newValues = new HashMap<>(resources);
final int newValue = resources.getOrDefault(resourceProfile, 0) - decrement;
updateNewValue(newValues, resourceProfile, newValue);
return new ResourceCounter(newValues);
} | 3.26 |
flink_ResourceCounter_withResources_rdh | /**
* Creates a resource counter with the specified set of resources.
*
* @param resources
* resources with which to initialize the resource counter
* @return ResourceCounter which contains the specified set of resources
*/public static ResourceCounter withResources(Map<ResourceProfile, Integer> resources) {
return new ResourceCounter(new HashMap<>(resources));
} | 3.26 |
flink_ResourceCounter_withResource_rdh | /**
* Creates a resource counter with the given resourceProfile and its count.
*
* @param resourceProfile
* resourceProfile for the given count
* @param count
* count of the given resourceProfile
* @return ResourceCounter which contains the specified resourceProfile and its count
*/
public static ResourceCounter withResource(ResourceProfile resourceProfile, int count) {
Preconditions.checkArgument(count >= 0);
return count == 0 ? empty() : new ResourceCounter(Collections.singletonMap(resourceProfile, count));
} | 3.26 |
flink_ResourceCounter_isEmpty_rdh | /**
* Checks whether the resource counter is empty.
*
* @return {@code true} if the counter does not contain any counts; otherwise {@code false}
*/
public boolean isEmpty() {
return resources.isEmpty();
} | 3.26 |
flink_ResourceCounter_getTotalResourceCount_rdh | /**
* Computes the total number of resources in this counter.
*
* @return the total number of resources in this counter
*/public int getTotalResourceCount() {
return resources.isEmpty() ? 0 : resources.values().stream().reduce(0, Integer::sum);} | 3.26 |
flink_ResourceCounter_getTotalResource_rdh | /**
* Computes the total resources in this counter.
*
* @return the total resources in this counter
*/
public ResourceProfile getTotalResource() {
return resources.entrySet().stream().map(entry
-> entry.getKey().multiply(entry.getValue())).reduce(ResourceProfile.ZERO, ResourceProfile::merge);
} | 3.26 |
flink_ResourceCounter_add_rdh | /**
* Adds increment to the count of resourceProfile and returns the new value.
*
* @param resourceProfile
* resourceProfile to which to add increment
* @param increment
* increment is the number by which to increase the resourceProfile
* @return new ResourceCounter containing the result of the addition
*/
public ResourceCounter add(ResourceProfile resourceProfile, int increment) {
final Map<ResourceProfile, Integer> newValues = new HashMap<>(resources);
final int newValue = resources.getOrDefault(resourceProfile, 0) + increment;
updateNewValue(newValues, resourceProfile, newValue);
return new ResourceCounter(newValues);
} | 3.26 |
flink_ResourceCounter_getResources_rdh | /**
* Gets all stored {@link ResourceProfile ResourceProfiles}.
*
* @return collection of stored {@link ResourceProfile ResourceProfiles}
*/
public Set<ResourceProfile> getResources() {
return resources.keySet(); } | 3.26 |
flink_ResourceCounter_empty_rdh | /**
* Creates an empty resource counter.
*
* @return empty resource counter
*/
public static ResourceCounter empty() {return
new ResourceCounter(Collections.emptyMap());
} | 3.26 |
flink_HashBasedDataBuffer_append_rdh | /**
* Partial data of the target record can be written if this {@link HashBasedDataBuffer} is full.
* The remaining data of the target record will be written to the next data region (a new data
* buffer or this data buffer after reset).
*/
@Override
public boolean append(ByteBuffer source, int targetChannel, Buffer.DataType dataType) throws IOException {
checkArgument(source.hasRemaining(), "Cannot append empty data.");
checkState(!isFinished, "Sort buffer is already finished.");
checkState(!isReleased, "Sort buffer is already released.");
int totalBytes = source.remaining();
if (dataType.isBuffer()) {
writeRecord(source, targetChannel);
} else {
writeEvent(source, targetChannel, dataType);
}
if (source.hasRemaining()) {
return true;
}
++numTotalRecords;
numTotalBytes += totalBytes - source.remaining();
return false;
} | 3.26 |
flink_HiveGenericUDAF_createAccumulator_rdh | /**
* This is invoked without calling open(), so we need to call init() for
* getNewAggregationBuffer(). TODO: re-evaluate how this will fit into Flink's new type
* inference and udf system
*/
@Override
public AggregationBuffer createAccumulator() {
try {
if (!initialized)
{
init();
}
return partialEvaluator.getNewAggregationBuffer();
} catch (Exception e) {
throw new FlinkHiveUDFException(String.format("Failed to create accumulator for %s", hiveFunctionWrapper.getUDFClassName()), e);
}
} | 3.26 |
flink_SharedReference_applySync_rdh | /**
* Executes the code on the referenced object in a synchronized fashion. Note that this method
* is prone to deadlock if multiple references are accessed in a synchronized fashion in a
* nested call-chain.
*/
default <R> R applySync(Function<T, R> function) {
T object = get();
synchronized(object) {
return function.apply(object);
}
} | 3.26 |
flink_SharedReference_consumeSync_rdh | /**
* Executes the code on the referenced object in a synchronized fashion. Note that this method
* is prone to deadlock if multiple references are accessed in a synchronized fashion in a
* nested call-chain.
*/
default void consumeSync(Consumer<T> consumer) {
T object
= get();
synchronized(object) {
consumer.accept(object);
}
} | 3.26 |
flink_KafkaEventsGeneratorJob_rpsFromSleep_rdh | // Used for backwards compatibility to convert legacy 'sleep' parameter to records per second.
private static double rpsFromSleep(int sleep, int parallelism) {
return (1000.0 / sleep) * parallelism;
} | 3.26 |
flink_MemCheckpointStreamFactory_close_rdh | // --------------------------------------------------------------------
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
m0();
}
} | 3.26 |
flink_MemCheckpointStreamFactory_closeAndGetBytes_rdh | /**
* Closes the stream and returns the byte array containing the stream's data.
*
* @return The byte array containing the stream's data.
* @throws IOException
* Thrown if the size of the data exceeds the maximal
*/
public byte[] closeAndGetBytes() throws IOException {
if (closed.compareAndSet(false, true)) {
checkSize(os.size(), maxSize);
byte[] bytes = os.toByteArray();
m0();
return bytes;
} else {
throw new IOException("stream has already been closed");
}
} | 3.26 |
flink_SourceEventWrapper_getSourceEvent_rdh | /**
*
* @return The {@link SourceEvent} in this SourceEventWrapper.
*/
public SourceEvent getSourceEvent() {
return sourceEvent;
} | 3.26 |
flink_Configuration_addAll_rdh | /**
* Adds all entries from the given configuration into this configuration. The keys are prepended
* with the given prefix.
*
* @param other
* The configuration whose entries are added to this configuration.
* @param prefix
* The prefix to prepend.
*/
public void addAll(Configuration other, String prefix)
{
final StringBuilder bld = new StringBuilder();
bld.append(prefix);
final int pl = bld.length();
synchronized(this.confData) {
synchronized(other.confData) {
for (Map.Entry<String, Object> entry : other.confData.entrySet())
{
bld.setLength(pl);
bld.append(entry.getKey());
this.confData.put(bld.toString(), entry.getValue());
}}
}
} | 3.26 |
flink_Configuration_setDouble_rdh | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key
* the option specifying the key to be added
* @param value
* the value of the key/value pair to be added
*/
@PublicEvolving
public void setDouble(ConfigOption<Double> key, double value) {
setValueInternal(key.key(),
value);
} | 3.26 |
flink_Configuration_getInteger_rdh | /**
* Returns the value associated with the given config option as an integer. If no value is
* mapped under any key of the option, it returns the specified default instead of the option's
* default value.
*
* @param configOption
* The configuration option
* @param overrideDefault
* The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public int getInteger(ConfigOption<Integer> configOption, int overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.26 |
flink_Configuration_m0_rdh | /**
* Returns the value associated with the given config option as a boolean. If no value is mapped
* under any key of the option, it returns the specified default instead of the option's default
* value.
*
* @param configOption
* The configuration option
* @param overrideDefault
* The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public boolean m0(ConfigOption<Boolean> configOption, boolean overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.26 |
flink_Configuration_removeKey_rdh | /**
* Removes given key from the configuration.
*
* @param key
* key of a config option to remove
* @return true is config has been removed, false otherwise
*/
public boolean
removeKey(String key) {
synchronized(this.confData) {
boolean removed = this.confData.remove(key) != null;
removed |= removePrefixMap(confData, key);
return removed;
}
} | 3.26 |
flink_Configuration_getFloat_rdh | /**
* Returns the value associated with the given config option as a float. If no value is mapped
* under any key of the option, it returns the specified default instead of the option's default
* value.
*
* @param configOption
* The configuration option
* @param overrideDefault
* The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public float getFloat(ConfigOption<Float> configOption, float overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.26 |
flink_Configuration_getBoolean_rdh | /**
* Returns the value associated with the given config option as a boolean.
*
* @param configOption
* The configuration option
* @return the (default) value associated with the given config option
*/
@PublicEvolving
public boolean getBoolean(ConfigOption<Boolean> configOption) {
return getOptional(configOption).orElseGet(configOption::defaultValue);
} | 3.26 |
flink_Configuration_keySet_rdh | // --------------------------------------------------------------------------------------------
/**
* Returns the keys of all key/value pairs stored inside this configuration object.
*
* @return the keys of all key/value pairs stored inside this configuration object
*/
public Set<String> keySet() {
synchronized(this.confData) {
return new HashSet<>(this.confData.keySet());
}
} | 3.26 |
flink_Configuration_setValueInternal_rdh | // --------------------------------------------------------------------------------------------
<T> void setValueInternal(String key, T value, boolean canBePrefixMap) {
if (key == null) {
throw
new NullPointerException("Key must not be null.");
}
if (value == null) {
throw new NullPointerException("Value must not be null.");
}synchronized(this.confData) {
if (canBePrefixMap) {
removePrefixMap(this.confData,
key);
}
this.confData.put(key, value);
}
} | 3.26 |
flink_Configuration_setInteger_rdh | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key
* the option specifying the key to be added
* @param value
* the value of the key/value pair to be added
*/
@PublicEvolving
public void setInteger(ConfigOption<Integer> key, int value) {
setValueInternal(key.key(), value);
} | 3.26 |
flink_Configuration_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
int hash = 0;
for (String v35 : this.confData.keySet()) {
hash ^= v35.hashCode();
}
return hash;
} | 3.26 |
flink_Configuration_setBoolean_rdh | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key
* the option specifying the key to be added
* @param value
* the value of the key/value pair to be added
*/
@PublicEvolving
public void setBoolean(ConfigOption<Boolean> key, boolean value) {
setValueInternal(key.key(), value);
} | 3.26 |
flink_Configuration_setString_rdh | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key
* the option specifying the key to be added
* @param value
* the value of the key/value pair to be added
*/
@PublicEvolving
public void setString(ConfigOption<String> key, String value) {
setValueInternal(key.key(), value);
} | 3.26 |
flink_Configuration_fromMap_rdh | // --------------------------------------------------------------------------------------------
/**
* Creates a new configuration that is initialized with the options of the given map.
*/
public static Configuration fromMap(Map<String, String> map) {
final Configuration configuration = new Configuration();
map.forEach(configuration::setString);
return configuration;
} | 3.26 |
flink_Configuration_get_rdh | /**
* Please check the java doc of {@link #getRawValueFromOption(ConfigOption)}. If no keys are
* found in {@link Configuration}, default value of the given option will return. Please make
* sure there will be at least one value available. Otherwise, a NPE will be thrown by Flink
* when the value is used.
*
* <p>NOTE: current logic is not able to get the default value of the fallback key's
* ConfigOption, in case the given ConfigOption has no default value. If you want to use
* fallback key, please make sure its value could be found in {@link Configuration} at runtime.
*
* @param option
* metadata of the option to read
* @return the value of the given option
*/
@Override
public <T> T get(ConfigOption<T> option) {
return getOptional(option).orElseGet(option::defaultValue);
} | 3.26 |
flink_Configuration_getString_rdh | /**
* Returns the value associated with the given config option as a string. If no value is mapped
* under any key of the option, it returns the specified default instead of the option's default
* value.
*
* @param configOption
* The configuration option
* @return the (default) value associated with the given config option
*/@PublicEvolving
public String getString(ConfigOption<String> configOption, String overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.26 |
flink_Configuration_getEnum_rdh | /**
* Returns the value associated with the given config option as an enum.
*
* @param enumClass
* The return enum class
* @param configOption
* The configuration option
* @throws IllegalArgumentException
* If the string associated with the given config option cannot
* be parsed as a value of the provided enum class.
*/
@PublicEvolving
public <T extends
Enum<T>> T getEnum(final Class<T> enumClass, final ConfigOption<String> configOption) {
checkNotNull(enumClass, "enumClass must not be null");
checkNotNull(configOption, "configOption must not be null");
Object rawValue = getRawValueFromOption(configOption).orElseGet(configOption::defaultValue);
try {
return ConfigurationUtils.convertToEnum(rawValue, enumClass);
} catch (IllegalArgumentException ex) {
final String errorMessage = String.format("Value for config option %s must be one of %s (was %s)", configOption.key(), Arrays.toString(enumClass.getEnumConstants()), rawValue);
throw new IllegalArgumentException(errorMessage);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.