name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Tuple24_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple24)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple24 tuple = ((Tuple24) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {return false;
}
if (f4 != null ?
!f4.equals(tuple.f4) : tuple.f4 != null) {return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 !=
null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {return false;
}
if (f17 != null ? !f17.equals(tuple.f17)
: tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
if (f20 != null ?
!f20.equals(tuple.f20) : tuple.f20 != null) {
return false;
}
if (f21 != null ? !f21.equals(tuple.f21) : tuple.f21 != null) {
return false;
}
if (f22 != null ? !f22.equals(tuple.f22) : tuple.f22 != null) {
return false;}
if (f23
!= null ? !f23.equals(tuple.f23) : tuple.f23 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple24_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23), where the
* individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ",") + StringUtils.arrayAwareToString(this.f19)) + ",") + StringUtils.arrayAwareToString(this.f20)) + ",")
+ StringUtils.arrayAwareToString(this.f21)) + ",") + StringUtils.arrayAwareToString(this.f22)) + ",") + StringUtils.arrayAwareToString(this.f23)) + ")";
} | 3.26 |
flink_Tuple24_copy_rdh | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple24<T0, T1, T2,
T3,
T4, T5,
T6, T7, T8, T9, T10,
T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> copy() {
return new Tuple24<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19, this.f20, this.f21, this.f22, this.f23);
} | 3.26 |
flink_Tuple24_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> of(T0 f0, T1
f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20, T21 f21, T22 f22, T23
f23) {return new
Tuple24<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20,
f21, f22, f23);
} | 3.26 |
flink_BulkIterationPlanNode_getSerializerForIterationChannel_rdh | // --------------------------------------------------------------------------------------------
public TypeSerializerFactory<?> getSerializerForIterationChannel() {
return serializerForIterationChannel;
} | 3.26 |
flink_Deadline_fromNowWithClock_rdh | /**
* Constructs a Deadline that is a given duration after now, where now and all other times from
* this deadline are defined by the given {@link Clock}.
*
* @param duration
* Duration for this deadline.
* @param clock
* Time provider for this deadline.
*/
public static Deadline fromNowWithClock(Duration duration, Clock clock) {
return new Deadline(addHandlingOverflow(clock.relativeTimeNanos(), duration.toNanos()), clock);
} | 3.26 |
flink_Deadline_timeLeftIfAny_rdh | /**
* Returns the time left between the deadline and now. If no time is left, a {@link TimeoutException} will be thrown.
*
* @throws TimeoutException
* if no time is left
*/public Duration timeLeftIfAny() throws TimeoutException {
long nanos = Math.subtractExact(timeNanos, clock.relativeTimeNanos());
if (nanos <=
0) {
throw new TimeoutException();
}
return Duration.ofNanos(nanos);
} | 3.26 |
flink_Deadline_fromNow_rdh | /**
* Constructs a Deadline that is a given duration after now.
*/
public static Deadline fromNow(Duration duration) {
return new Deadline(addHandlingOverflow(System.nanoTime(), duration.toNanos()), SystemClock.getInstance());
} | 3.26 |
flink_Deadline_now_rdh | // ------------------------------------------------------------------------
// Creating Deadlines
// ------------------------------------------------------------------------
/**
* Constructs a {@link Deadline} that has now as the deadline. Use this and then extend via
* {@link #plus(Duration)} to specify a deadline in the future.
*/
public static Deadline now() {return new Deadline(System.nanoTime(), SystemClock.getInstance());} | 3.26 |
flink_Deadline_addHandlingOverflow_rdh | // -------------------- private helper methods ----------------
private static long addHandlingOverflow(long x, long y) {
// The logic is copied over from Math.addExact() in order to handle overflows.
long r = x + y;
if (((x ^ r) & (y ^ r)) < 0) {
return Long.MAX_VALUE;
} else {
return x + y;
}
} | 3.26 |
flink_Deadline_hasTimeLeft_rdh | /**
* Returns whether there is any time left between the deadline and now.
*/
public boolean
hasTimeLeft() {
return !isOverdue();
} | 3.26 |
flink_HiveParserRowResolver_get_rdh | /**
* Gets the column Info to tab_alias.col_alias type of a column reference. I the tab_alias is
* not provided as can be the case with an non aliased column, this function looks up the column
* in all the table aliases in this row resolver and returns the match. It also throws an
* exception if the column is found in multiple table aliases. If no match is found a null
* values is returned. This allows us to interpret both select t.c1 type of references and
* select c1 kind of references. The later kind are what we call non aliased column references
* in the query.
*
* @param tabAlias
* The table alias to match (this is null if the column reference is non
* aliased)
* @param colAlias
* The column name that is being searched for
* @return ColumnInfo
* @throws SemanticException
*/
public ColumnInfo get(String tabAlias, String colAlias) throws SemanticException {
ColumnInfo ret = null;
if ((!isExprResolver) && isAmbiguousReference(tabAlias, colAlias)) {
String colNamePrefix = (tabAlias != null) ? tabAlias + "." :
"";
String fullQualifiedName = colNamePrefix + colAlias;
throw new SemanticException("Ambiguous column reference: " + fullQualifiedName);
}
if (tabAlias
!= null) {
tabAlias = tabAlias.toLowerCase();
HashMap<String, ColumnInfo> fMap = rslvMap.get(tabAlias);
if (fMap == null) {
return null;
}ret = fMap.get(colAlias);
} else {
boolean found = false;
String foundTbl = null;for (Map.Entry<String, LinkedHashMap<String, ColumnInfo>> rslvEntry : rslvMap.entrySet()) {
String rslvKey = rslvEntry.getKey();
LinkedHashMap<String, ColumnInfo> cmap = rslvEntry.getValue();
for (Map.Entry<String, ColumnInfo> cmapEnt : cmap.entrySet()) {
if (colAlias.equalsIgnoreCase(cmapEnt.getKey())) {
// We can have an unaliased and one aliased mapping to a Column.
if ((found && (foundTbl != null)) && (rslvKey != null)) {
throw new SemanticException(("Column " + colAlias) + " Found in more than One Tables/Subqueries");
}
found = true;
foundTbl = (rslvKey == null) ? foundTbl : rslvKey;
ret = cmapEnt.getValue();
}
}
}
}
return ret;
} | 3.26 |
flink_HiveParserRowResolver_getCombinedRR_rdh | /**
* Return a new row resolver that is combination of left RR and right RR. The schema will be
* schema of left, schema of right.
*/
public static HiveParserRowResolver getCombinedRR(HiveParserRowResolver leftRR, HiveParserRowResolver rightRR) throws SemanticException {
HiveParserRowResolver combinedRR = new HiveParserRowResolver();
HiveParserRowResolver.IntRef outputColPos = new HiveParserRowResolver.IntRef();
if (!add(combinedRR, leftRR, outputColPos)) {LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
if (!add(combinedRR,
rightRR, outputColPos)) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
return combinedRR;
} | 3.26 |
flink_HiveParserRowResolver_getReferenceableColumnAliases_rdh | // Get a list of aliases for non-hidden columns.
public List<String> getReferenceableColumnAliases(String tableAlias, int max) {
int count = 0;
Set<String> columnNames = new LinkedHashSet<String>();
int tables =
rslvMap.size();
Map<String, ColumnInfo> mapping = rslvMap.get(tableAlias);
if (mapping != null) {
for (Map.Entry<String, ColumnInfo> entry : mapping.entrySet()) {
if ((max > 0) && (count >= max)) {
break;
}
ColumnInfo columnInfo = entry.getValue();
if (!columnInfo.isHiddenVirtualCol()) {
columnNames.add(entry.getKey());
count++;
}
}} else {
for (ColumnInfo columnInfo : getColumnInfos()) {
if ((max > 0) &&
(count >= max)) {
break;
}
if (!columnInfo.isHiddenVirtualCol()) {
String[] inverse = (!isExprResolver) ? m0(columnInfo.getInternalName()) : null;
if (inverse != null) {
columnNames.add((inverse[0] == null) || (tables <= 1) ? inverse[1] : (inverse[0] + ".") +
inverse[1]);
} else {
columnNames.add(columnInfo.getAlias());
}
count++;
}
}
}
return new ArrayList<>(columnNames);
} | 3.26 |
flink_HiveParserRowResolver_add_rdh | // TODO: 1) How to handle collisions? 2) Should we be cloning ColumnInfo or not?
private static boolean add(HiveParserRowResolver rrToAddTo, HiveParserRowResolver rrToAddFrom, HiveParserRowResolver.IntRef outputColPosRef, int numColumns) throws SemanticException {
boolean hasDuplicates = false;
String tabAlias;
String
colAlias;
String[] qualifiedColName;
int i = 0;
int outputColPos = (outputColPosRef == null) ? 0 : outputColPosRef.val;
for (ColumnInfo cInfoFrmInput : rrToAddFrom.getRowSchema().getSignature()) {
if ((numColumns >= 0) && (i == numColumns)) {
break;
}
ColumnInfo newCI = null;
String internalName = cInfoFrmInput.getInternalName();
qualifiedColName = rrToAddFrom.m0(internalName);
tabAlias = qualifiedColName[0];
colAlias
= qualifiedColName[1];
newCI = new ColumnInfo(cInfoFrmInput);
newCI.setInternalName(HiveParserBaseSemanticAnalyzer.getColumnInternalName(outputColPos));
outputColPos++;
boolean isUnique = rrToAddTo.putWithCheck(tabAlias, colAlias, internalName, newCI);
hasDuplicates |= !isUnique;
qualifiedColName = rrToAddFrom.getAlternateMappings(internalName);
if (qualifiedColName != null) {
tabAlias = qualifiedColName[0];
colAlias = qualifiedColName[1];
rrToAddTo.put(tabAlias,
colAlias, newCI);}
i++;
}
if (outputColPosRef != null) {
outputColPosRef.val = outputColPos;
}
return !hasDuplicates;
} | 3.26 |
flink_HiveParserRowResolver_putExpression_rdh | /**
* Puts a resolver entry corresponding to a source expression which is to be used for identical
* expression recognition (e.g. for matching expressions in the SELECT list with the GROUP BY
* clause). The convention for such entries is an empty-string ("") as the table alias together
* with the string rendering of the HiveParserASTNode as the column alias.
*/ public void putExpression(HiveParserASTNode node, ColumnInfo colInfo) {
String treeAsString = node.toStringTree();
expressionMap.put(treeAsString, node);
put("", treeAsString, colInfo);
} | 3.26 |
flink_HiveParserRowResolver_putWithCheck_rdh | /**
* Adds column to RR, checking for duplicate columns. Needed because CBO cannot handle the Hive
* behavior of blindly overwriting old mapping in RR and still somehow working after that.
*
* @return True if mapping was added without duplicates.
*/
public boolean putWithCheck(String tabAlias, String colAlias, String internalName, ColumnInfo newCI) throws SemanticException {
ColumnInfo existing = get(tabAlias, colAlias);
// Hive adds the same mapping twice... I wish we could fix stuff like that.
if (existing == null) {
put(tabAlias, colAlias, newCI);
return true;
} else if (existing.isSameColumnForRR(newCI)) {
return true;
}
LOG.warn((("Found duplicate column alias in RR: " + existing.toMappingString(tabAlias, colAlias)) + " adding ") + newCI.toMappingString(tabAlias, colAlias));
if (internalName != null) {
existing = get(tabAlias, internalName);
if (existing == null) {
keepAmbiguousInfo(colAlias, tabAlias);
put(tabAlias, internalName, newCI);
return true;
} else if (existing.isSameColumnForRR(newCI)) {
return true;
}
LOG.warn("Failed to use internal name after finding a duplicate: " + existing.toMappingString(tabAlias, internalName));
}
return false;
} | 3.26 |
flink_HiveParserRowResolver_getExpression_rdh | /**
* Retrieves the ColumnInfo corresponding to a source expression which exactly matches the
* string rendering of the given HiveParserASTNode.
*/
public ColumnInfo getExpression(HiveParserASTNode node) throws SemanticException {
return get("", node.toStringTree());} | 3.26 |
flink_ListKeyGroupedIterator_getValues_rdh | /**
* Returns an iterator over all values that belong to the current key. The iterator is initially
* <code>null</code> (before the first call to {@link #nextKey()} and after all keys are
* consumed. In general, this method returns always a non-null value, if a previous call to
* {@link #nextKey()} return <code>true</code>.
*
* @return Iterator over all values that belong to the current key.
*/public ValuesIterator getValues() {
return this.valuesIterator;
} | 3.26 |
flink_ListKeyGroupedIterator_nextKey_rdh | /**
* Moves the iterator to the next key. This method may skip any values that have not yet been
* returned by the iterator created by the {@link #getValues()} method. Hence, if called
* multiple times it "removes" key groups.
*
* @return true, if the input iterator has an other group of records with the same key.
*/
public boolean nextKey() throws IOException {
if (lookahead != null) {
// common case: whole value-iterator was consumed and a new key group is available.
this.comparator.setReference(this.lookahead);
this.valuesIterator.next = this.lookahead;
this.lookahead = null;
this.valuesIterator.iteratorAvailable = true;
return true;
}
// first element, empty/done, or the values iterator was not entirely consumed
if (this.done) {
return false;
}
if (this.valuesIterator != null) {// values was not entirely consumed. move to the next key
// Required if user code / reduce() method did not read the whole value iterator.
E next;
while (true) {
if ((currentPosition < input.size()) && ((next = this.input.get(currentPosition++)) != null)) {
if (!this.comparator.equalToReference(next)) {
// the keys do not match, so we have a new group. store the current key
this.comparator.setReference(next);
this.valuesIterator.next = next;
this.valuesIterator.iteratorAvailable = true;
return true; }
} else {
// input exhausted
this.valuesIterator.next = null;
this.valuesIterator = null;
this.done = true;
return false;
}
}
} else {
// first element
// get the next element
E v1 = input.get(currentPosition++);
if (v1 != null) {
this.comparator.setReference(v1);
this.valuesIterator = new ValuesIterator(v1, serializer);
return true;
} else {
// empty input, set everything null
this.done = true;
return false;
}
}
} | 3.26 |
flink_AvroSerializerSnapshot_resolveSchemaCompatibility_rdh | // ------------------------------------------------------------------------------------------------------------
// Helpers
// ------------------------------------------------------------------------------------------------------------
/**
* Resolves writer/reader schema compatibly.
*
* <p>Checks whenever a new version of a schema (reader) can read values serialized with the old
* schema (writer). If the schemas are compatible according to {@code Avro} schema resolution
* rules (@see <a href="https://avro.apache.org/docs/current/spec.html#Schema+Resolution">Schema
* Resolution</a>).
*/
@VisibleForTesting
static <T> TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(Schema writerSchema, Schema readerSchema) {
if (Objects.equals(writerSchema, readerSchema)) {
return TypeSerializerSchemaCompatibility.compatibleAsIs();
}
final SchemaPairCompatibility compatibility = SchemaCompatibility.checkReaderWriterCompatibility(readerSchema, writerSchema);
return avroCompatibilityToFlinkCompatibility(compatibility);
} | 3.26 |
flink_LocalFileSystem_pathToFile_rdh | // ------------------------------------------------------------------------
/**
* Converts the given Path to a File for this file system. If the path is empty, we will return
* <tt>new File(".")</tt> instead of <tt>new File("")</tt>, since the latter returns
* <tt>false</tt> for <tt>isDirectory</tt> judgement (See issue
* https://issues.apache.org/jira/browse/FLINK-18612).
*/
public File pathToFile(Path path) {
String localPath = path.getPath();
checkState(localPath != null, "Cannot convert a null path to File");
if (localPath.length() == 0) {
return new File(".");
}
return new File(localPath);
} | 3.26 |
flink_LocalFileSystem_getSharedInstance_rdh | /**
* Gets the shared instance of this file system.
*
* @return The shared instance of this file system.
*/
public static LocalFileSystem getSharedInstance() {return f0;
} | 3.26 |
flink_LocalFileSystem_getLocalFsURI_rdh | // ------------------------------------------------------------------------
/**
* Gets the URI that represents the local file system. That URI is {@code "file:/"} on Windows
* platforms and {@code "file:///"} on other UNIX family platforms.
*
* @return The URI that represents the local file system.
*/
public static URI
getLocalFsURI() {
return LOCAL_URI;
} | 3.26 |
flink_LocalFileSystem_getFileBlockLocations_rdh | // ------------------------------------------------------------------------
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
if (file instanceof
LocalFileStatus) {
return ((LocalFileStatus) (file)).getBlockLocations();
}
throw new IOException("File status does not belong to the LocalFileSystem: " + file);
} | 3.26 |
flink_GenericDataSinkBase_setLocalOrder_rdh | /**
* Sets the order in which the sink must write its data within each fragment in the distributed
* file system. For any value other then <tt>NONE</tt>, this will cause the system to perform a
* local sort, or try to reuse an order from a previous operation.
*
* @param localOrder
* The local order to write the data in.
*/
public void setLocalOrder(Ordering localOrder) {
this.localOrdering = localOrder;
} | 3.26 |
flink_GenericDataSinkBase_addInput_rdh | /**
* Adds to the input the union of the given operators.
*
* @param inputs
* The operator(s) to be unioned with the input.
* @deprecated This method will be removed in future versions. Use the {@link org.apache.flink.api.common.operators.Union} operator instead.
*/
@Deprecated
public void addInput(Operator<IN>... inputs) {
checkNotNull(inputs, "The input may not be null.");
this.input = Operator.createUnionCascade(this.input, inputs);
} | 3.26 |
flink_GenericDataSinkBase_setInputs_rdh | /**
* Sets the input to the union of the given operators.
*
* @param inputs
* The operator(s) that form the input.
* @deprecated This method will be removed in future versions. Use the {@link org.apache.flink.api.common.operators.Union} operator instead.
*/
@Deprecated
public void setInputs(List<Operator<IN>> inputs) {
checkNotNull(inputs, "The inputs may not be null.");
this.input = Operator.createUnionCascade(inputs);
} | 3.26 |
flink_GenericDataSinkBase_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return this.name;
} | 3.26 |
flink_GenericDataSinkBase_setInput_rdh | /**
* Sets the given operator as the input to this operator.
*
* @param input
* The operator to use as the input.
*/
public void setInput(Operator<IN> input) {
this.input = checkNotNull(input, "The input may not be null.");
}
/**
* Sets the input to the union of the given operators.
*
* @param inputs
* The operator(s) that form the input.
* @deprecated This method will be removed in future versions. Use the {@link org.apache.flink.api.common.operators.Union} | 3.26 |
flink_GenericDataSinkBase_m0_rdh | /**
* Gets the class describing the output format.
*
* <p>This method is basically identical to {@link #getFormatWrapper()}.
*
* @return The class describing the output format.
* @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper()
*/
@Override
public UserCodeWrapper<? extends OutputFormat<IN>> m0() {
return this.formatWrapper;} | 3.26 |
flink_GenericDataSinkBase_addInputs_rdh | /**
* Adds to the input the union of the given operators.
*
* @param inputs
* The operator(s) to be unioned with the input.
* @deprecated This method will be removed in future versions. Use the {@link org.apache.flink.api.common.operators.Union} operator instead.
*/
@SuppressWarnings("unchecked")
@Deprecated
public void addInputs(List<? extends Operator<IN>> inputs) {
checkNotNull(inputs, "The inputs may not be null.");
this.input = createUnionCascade(this.input, ((Operator<IN>[]) (inputs.toArray(new Operator[inputs.size()]))));
} | 3.26 |
flink_GenericDataSinkBase_accept_rdh | // --------------------------------------------------------------------------------------------
/**
* Accepts the visitor and applies it this instance. This method applies the visitor in a
* depth-first traversal. The visitors pre-visit method is called and, if returning
* <tt>true</tt>, the visitor is recursively applied on the single input. After the recursion
* returned, the post-visit method is called.
*
* @param visitor
* The visitor.
* @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor)
*/
@Override
public void accept(Visitor<Operator<?>> visitor) {
boolean descend = visitor.preVisit(this);
if (descend) {
this.input.accept(visitor);
visitor.postVisit(this);
}
} | 3.26 |
flink_GenericDataSinkBase_getFormatWrapper_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the class describing this sinks output format.
*
* @return The output format class.
*/
public UserCodeWrapper<? extends OutputFormat<IN>> getFormatWrapper() {
return this.formatWrapper;
} | 3.26 |
flink_GenericDataSinkBase_executeOnCollections_rdh | // --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
protected void executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
OutputFormat<IN> format = this.formatWrapper.getUserCodeObject();
TypeInformation<IN> inputType = getInput().getOperatorInfo().getOutputType();
if (this.localOrdering != null) {
int[] sortColumns = this.localOrdering.getFieldPositions();
boolean[] sortOrderings = this.localOrdering.getFieldSortDirections();
final TypeComparator<IN> sortComparator;if (inputType instanceof CompositeType) {
sortComparator = ((CompositeType<IN>) (inputType)).createComparator(sortColumns, sortOrderings, 0, executionConfig);
} else
if (inputType instanceof AtomicType) {
sortComparator = ((AtomicType<IN>) (inputType)).createComparator(sortOrderings[0], executionConfig);
} else {
throw new UnsupportedOperationException(("Local output sorting does not support type " + inputType) + " yet.");
}
Collections.sort(inputData, new Comparator<IN>() {
@Override
public int compare(IN o1, IN o2) {
return sortComparator.compare(o1, o2);
}
});
}
if (format instanceof InitializeOnMaster) {
((InitializeOnMaster) (format)).initializeGlobal(1);
}
format.configure(this.parameters);
if (format instanceof RichOutputFormat) {
((RichOutputFormat<?>) (format)).setRuntimeContext(ctx);
}
format.open(0, 1);
for (IN element : inputData) {
format.writeRecord(element);
}
format.close();
if (format instanceof FinalizeOnMaster) {
((FinalizeOnMaster) (format)).finalizeGlobal(1);
}
} | 3.26 |
flink_RpcGatewayUtils_isRpcTimeout_rdh | /**
* Checks whether any of the annotations is of type {@link RpcTimeout}.
*
* @param annotations
* Array of annotations
* @return True if {@link RpcTimeout} was found; otherwise false
*/
private static boolean isRpcTimeout(Annotation[] annotations) {
for (Annotation annotation
: annotations) {
if (annotation.annotationType().equals(RpcTimeout.class)) {
return true;
}
}
return false;
} | 3.26 |
flink_RpcGatewayUtils_extractRpcTimeout_rdh | /**
* Extracts the {@link RpcTimeout} annotated rpc timeout value from the list of given method
* arguments. If no {@link RpcTimeout} annotated parameter could be found, then the default
* timeout is returned.
*
* @param parameterAnnotations
* Parameter annotations
* @param args
* Array of arguments
* @param defaultTimeout
* Default timeout to return if no {@link RpcTimeout} annotated parameter
* has been found
* @return Timeout extracted from the array of arguments or the default timeout
*/
public static Duration extractRpcTimeout(Annotation[][] parameterAnnotations, Object[] args, Duration defaultTimeout) {
if (args != null) {
Preconditions.checkArgument(parameterAnnotations.length == args.length);
for (int i = 0; i < parameterAnnotations.length; i++) {
if (isRpcTimeout(parameterAnnotations[i])) {
if (args[i] instanceof
Time) {
return TimeUtils.toDuration(((Time) (args[i])));} else if (args[i] instanceof Duration) {
return ((Duration) (args[i]));
} else {
throw new RuntimeException(((((("The rpc timeout parameter must be of type " + Time.class.getName()) + " or ") + Duration.class.getName()) + ". The type ") + args[i].getClass().getName()) + " is not supported.");
}
}
}
}
return defaultTimeout;
} | 3.26 |
flink_TypeOfFunction_open_rdh | // --------------------------------------------------------------------------------------------
// Runtime
// --------------------------------------------------------------------------------------------
@Override
public void open(FunctionContext context) throws Exception {
this.typeStringData = StringData.fromString(typeString);
} | 3.26 |
flink_TypeOfFunction_isForceSerializable_rdh | // --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
private static boolean isForceSerializable(CallContext context) {
final List<DataType> argumentDataTypes = context.getArgumentDataTypes();
if (argumentDataTypes.size() != 2) {
return false;
}
return context.getArgumentValue(1, Boolean.class).orElse(false);
} | 3.26 |
flink_FieldSet_isValidSubset_rdh | // --------------------------------------------------------------------------------------------
/**
* Checks if the given set of fields is a valid subset of this set of fields. For unordered
* sets, this is the case if all of the given set's fields are also part of this field.
*
* <p>Subclasses that describe field sets where the field order matters must override this
* method to implement a field ordering sensitive check.
*
* @param set
* The set that is a candidate subset.
* @return True, if the given set is a subset of this set, false otherwise.
*/
public boolean isValidSubset(FieldSet set) {
if (set.size() > size()) {return false;
}
for (Integer i : set) {
if (!contains(i)) {
return false;
}
}
return true;
} | 3.26 |
flink_FieldSet_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return this.collection.hashCode();
} | 3.26 |
flink_FieldSet_toArray_rdh | /**
* Transforms the field set into an array of field IDs. Whether the IDs are ordered or unordered
* depends on the specific subclass of the field set.
*
* @return An array of all contained field IDs.
*/
public int[] toArray() {
int[] a = new int[this.collection.size()];
int i = 0;
for (int col : this.collection) {
a[i++] = col;
}
return a;
} | 3.26 |
flink_FieldSet_clone_rdh | /**
* Since instances of FieldSet are strictly immutable, this method does not actually clone, but
* it only returns the original instance.
*
* @return This objects reference, unmodified.
*/
public FieldSet clone() {
return this;} | 3.26 |
flink_FieldSet_addField_rdh | // --------------------------------------------------------------------------------------------
public FieldSet addField(Integer fieldID) {
if (fieldID == null) {
throw new IllegalArgumentException("Field ID must not be null.");
}
return new FieldSet(this, fieldID);
} | 3.26 |
flink_FieldSet_toFieldList_rdh | /**
* Turns the FieldSet into an ordered FieldList.
*
* @return An ordered FieldList.
*/
public FieldList toFieldList() {
int[] pos = toArray();
Arrays.sort(pos);
return new
FieldList(pos);
} | 3.26 |
flink_FieldSet_getDescriptionPrefix_rdh | // --------------------------------------------------------------------------------------------
protected String getDescriptionPrefix() {
return "(";
} | 3.26 |
flink_JobResourceRequirements_validate_rdh | /**
* This method validates that:
*
* <ul>
* <li>The requested boundaries are less or equal than the max parallelism.
* <li>The requested boundaries are greater than zero.
* <li>The requested upper bound is greater than the lower bound.
* <li>There are no unknown job vertex ids and that we're not missing any.
* </ul>
*
* In case any boundary is set to {@code -1}, it will be expanded to the default value ({@code 1} for the lower bound and the max parallelism for the upper bound), before the validation.
*
* @param jobResourceRequirements
* contains the new resources requirements for the job vertices
* @param maxParallelismPerVertex
* allows us to look up maximum possible parallelism for a job
* vertex
* @return a list of validation errors
*/
public static List<String> validate(JobResourceRequirements jobResourceRequirements, Map<JobVertexID, Integer> maxParallelismPerVertex) {
final List<String> errors = new ArrayList<>();
final Set<JobVertexID> missingJobVertexIds = new HashSet<>(maxParallelismPerVertex.keySet());
for (JobVertexID jobVertexId : jobResourceRequirements.getJobVertices()) {
missingJobVertexIds.remove(jobVertexId);
final Optional<Integer> maybeMaxParallelism = Optional.ofNullable(maxParallelismPerVertex.get(jobVertexId));
if (maybeMaxParallelism.isPresent()) {final JobVertexResourceRequirements.Parallelism requestedParallelism = jobResourceRequirements.getParallelism(jobVertexId);
int lowerBound = (requestedParallelism.getLowerBound() == (-1)) ? 1 : requestedParallelism.getLowerBound();
int upperBound = (requestedParallelism.getUpperBound() == (-1)) ? maybeMaxParallelism.get() : requestedParallelism.getUpperBound();
if ((lowerBound < 1) || (upperBound < 1)) {
errors.add(String.format("Both, the requested lower bound [%d] and upper bound [%d] for job vertex [%s] must be greater than zero.", lowerBound, upperBound, jobVertexId));
// Don't validate this vertex any further to avoid additional noise.
continue;
}
if (lowerBound > upperBound) {
errors.add(String.format("The requested lower bound [%d] for job vertex [%s] is higher than the upper bound [%d].", lowerBound, jobVertexId, upperBound));
}
if (maybeMaxParallelism.get()
< upperBound) {
errors.add(String.format("The newly requested parallelism %d for the job vertex %s exceeds its maximum parallelism %d.", upperBound, jobVertexId, maybeMaxParallelism.get()));
}
} else {
errors.add(String.format("Job vertex [%s] was not found in the JobGraph.", jobVertexId));
}
}
for (JobVertexID jobVertexId : missingJobVertexIds) {
errors.add(String.format("The request is incomplete, missing job vertex [%s] resource requirements.", jobVertexId)); }
return errors;
} | 3.26 |
flink_JobResourceRequirements_readFromJobGraph_rdh | /**
* Read {@link JobResourceRequirements resource requirements} from the configuration of a given
* {@link JobGraph}.
*
* @param jobGraph
* job graph to read requirements from
* @throws IOException
* in case we're not able to deserialize requirements from the configuration
*/
public static Optional<JobResourceRequirements> readFromJobGraph(JobGraph jobGraph) throws IOException {
try {
return Optional.ofNullable(InstantiationUtil.readObjectFromConfig(jobGraph.getJobConfiguration(), JOB_RESOURCE_REQUIREMENTS_KEY, JobResourceRequirements.class.getClassLoader()));
} catch (ClassNotFoundException e) {
throw new IOException("Unable to deserialize JobResourceRequirements due to missing classes. This might happen when the JobGraph was written from a different Flink version.", e);
}
} | 3.26 |
flink_JobResourceRequirements_writeToJobGraph_rdh | /**
* Write {@link JobResourceRequirements resource requirements} into the configuration of a given
* {@link JobGraph}.
*
* @param jobGraph
* job graph to write requirements to
* @param jobResourceRequirements
* resource requirements to write
* @throws IOException
* in case we're not able to serialize requirements into the configuration
*/
public static void writeToJobGraph(JobGraph jobGraph, JobResourceRequirements jobResourceRequirements) throws IOException {
InstantiationUtil.writeObjectToConfig(jobResourceRequirements, jobGraph.getJobConfiguration(), JOB_RESOURCE_REQUIREMENTS_KEY);
} | 3.26 |
flink_SubsequenceInputTypeStrategy_argument_rdh | /**
* Defines that we expect a single argument at the next position.
*/
public SubsequenceStrategyBuilder argument(ArgumentTypeStrategy argumentTypeStrategy) {
SequenceInputTypeStrategy singleArgumentStrategy = new SequenceInputTypeStrategy(Collections.singletonList(argumentTypeStrategy), null);
argumentsSplits.add(new ArgumentsSplit(currentPos, currentPos + 1, singleArgumentStrategy));
currentPos += 1;
return this;
} | 3.26 |
flink_SubsequenceInputTypeStrategy_finish_rdh | /**
* Constructs the given strategy.
*/
public InputTypeStrategy finish() {
return new SubsequenceInputTypeStrategy(argumentsSplits, ConstantArgumentCount.of(currentPos));
} | 3.26 |
flink_SubsequenceInputTypeStrategy_subsequence_rdh | /**
* Defines a common {@link InputTypeStrategy} for the next arguments. Given input strategy
* must expect a constant number of arguments. That means that both the minimum and maximum
* number of arguments must be defined and equal to each other.
*
* <p>If you need a varying logic use {@link #finishWithVarying(InputTypeStrategy)}.
*/
public SubsequenceStrategyBuilder
subsequence(InputTypeStrategy inputTypeStrategy) {
Preconditions.checkArgument(inputTypeStrategy.getArgumentCount() instanceof ConstantArgumentCount);
Optional<Integer> maxCount = inputTypeStrategy.getArgumentCount().getMaxCount();
Optional<Integer> minCount = inputTypeStrategy.getArgumentCount().getMinCount();
if (((!maxCount.isPresent()) || (!minCount.isPresent())) || (!maxCount.get().equals(minCount.get()))) {
throw new IllegalArgumentException("Both the minimum and maximum number of expected arguments must" + " be defined and equal to each other.");
}
argumentsSplits.add(new ArgumentsSplit(currentPos, currentPos + maxCount.get(), inputTypeStrategy));
currentPos += maxCount.get();
return
this;
} | 3.26 |
flink_SubsequenceInputTypeStrategy_finishWithVarying_rdh | /**
* Defines a common {@link InputTypeStrategy} for the next arguments. Given input strategy
* must expect a varying number of arguments. That means that the maximum number of
* arguments must not be defined.
*/
public InputTypeStrategy finishWithVarying(InputTypeStrategy inputTypeStrategy) {
final ArgumentCount strategyArgumentCount = inputTypeStrategy.getArgumentCount();
strategyArgumentCount.getMaxCount().ifPresent(c -> {
throw new IllegalArgumentException("The maximum number of arguments must not be defined.");
});
argumentsSplits.add(new ArgumentsSplit(currentPos, null, inputTypeStrategy));
final int minCount = currentPos + strategyArgumentCount.getMinCount().orElse(0);return new SubsequenceInputTypeStrategy(argumentsSplits, ConstantArgumentCount.from(minCount));
} | 3.26 |
flink_KeyedCoProcessFunction_onTimer_rdh | /**
* Called when a timer set using {@link TimerService} fires.
*
* @param timestamp
* The timestamp of the firing timer.
* @param ctx
* An {@link OnTimerContext} that allows querying the timestamp of the firing timer,
* querying the {@link TimeDomain} of the firing timer and getting a {@link TimerService}
* for registering timers and querying the time. The context is only valid during the
* invocation of this method, do not store it.
* @param out
* The collector for returning result values.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
public void onTimer(long timestamp, OnTimerContext ctx, Collector<OUT> out) throws Exception {
} | 3.26 |
flink_SortBufferAccumulator_switchCurrentDataBufferIfNeeded_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void switchCurrentDataBufferIfNeeded(boolean isBroadcast) {
if ((((isBroadcast == f0) && (currentDataBuffer != null)) && (!currentDataBuffer.isReleased())) && (!currentDataBuffer.isFinished())) {
return;
}f0 = isBroadcast;
flushCurrentDataBuffer(); currentDataBuffer = createNewDataBuffer();
} | 3.26 |
flink_Tuple15_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13
f13, T14 f14) {
return new Tuple15<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14);
} | 3.26 |
flink_Tuple15_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple15)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple15 tuple = ((Tuple15) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
return
true;
} | 3.26 |
flink_Tuple15_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14), where the individual fields are the value returned by calling
* {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ")";} | 3.26 |
flink_Tuple15_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
* @param f14
* The value for field 14
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 =
f3;
this.f4 = f4;this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;this.f13 = f13;
this.f14 = f14;
} | 3.26 |
flink_RpcEndpoint_validateScheduledExecutorClosed_rdh | /**
* Validate whether the scheduled executor is closed.
*
* @return true if the scheduled executor is shutdown, otherwise false
*/
final boolean validateScheduledExecutorClosed() {
return mainScheduledExecutor.isShutdown();
} | 3.26 |
flink_RpcEndpoint_scheduleRunAsync_rdh | /**
* Execute the runnable in the main thread of the underlying RPC endpoint, with a delay of the
* given number of milliseconds.
*
* @param runnable
* Runnable to be executed
* @param delay
* The delay after which the runnable will be executed
*/
protected void scheduleRunAsync(Runnable runnable, long delay, TimeUnit unit) {
rpcServer.scheduleRunAsync(runnable, unit.toMillis(delay));
} | 3.26 |
flink_RpcEndpoint_isRunning_rdh | /**
* Returns whether the RPC endpoint is started and not stopped or being stopped.
*
* @return whether the RPC endpoint is started and not stopped or being stopped.
*/
protected boolean isRunning() {
validateRunsInMainThread();
return isRunning;
} | 3.26 |
flink_RpcEndpoint_m1_rdh | /**
* Gets the endpoint's RPC service.
*
* @return The endpoint's RPC service
*/
public RpcService
m1() {
return rpcService;
} | 3.26 |
flink_RpcEndpoint_validateRunsInMainThread_rdh | // Main Thread Validation
// ------------------------------------------------------------------------
/**
* Validates that the method call happens in the RPC endpoint's main thread.
*
* <p><b>IMPORTANT:</b> This check only happens when assertions are enabled, such as when
* running tests.
*
* <p>This can be used for additional checks, like
*
* <pre>{@code protected void concurrencyCriticalMethod() {
* validateRunsInMainThread();
*
* // some critical stuff
* }}</pre>
*/
public void validateRunsInMainThread() {
assert MainThreadValidatorUtil.isRunningInExpectedThread(currentMainThread.get());
} | 3.26 |
flink_RpcEndpoint_unregisterResource_rdh | /**
* Unregister the given closeable resource from {@link CloseableRegistry}.
*
* @param closeableResource
* the given closeable resource
* @return true if the given resource unregister successful, otherwise false
*/
protected boolean unregisterResource(Closeable closeableResource) {
return resourceRegistry.unregisterCloseable(closeableResource);
}
/**
* User overridable callback which is called from {@link #internalCallOnStop()} | 3.26 |
flink_RpcEndpoint_internalCallOnStart_rdh | /**
* Internal method which is called by the RpcService implementation to start the RpcEndpoint.
*
* @throws Exception
* indicating that the rpc endpoint could not be started. If an exception
* occurs, then the rpc endpoint will automatically terminate.
*/
public final void internalCallOnStart() throws Exception {
validateRunsInMainThread();
isRunning = true;
onStart();
} | 3.26 |
flink_RpcEndpoint_validateResourceClosed_rdh | /**
* Validate whether all the resources are closed.
*
* @return true if all the resources are closed, otherwise false
*/
boolean validateResourceClosed() {
return mainThreadExecutor.validateScheduledExecutorClosed() && resourceRegistry.isClosed();
} | 3.26 |
flink_RpcEndpoint_getSelfGateway_rdh | // ------------------------------------------------------------------------
// Basic RPC endpoint properties
// ------------------------------------------------------------------------
/**
* Returns a self gateway of the specified type which can be used to issue asynchronous calls
* against the RpcEndpoint.
*
* <p>IMPORTANT: The self gateway type must be implemented by the RpcEndpoint. Otherwise the
* method will fail.
*
* @param selfGatewayType
* class of the self gateway type
* @param <C>
* type of the self gateway to create
* @return Self gateway of the specified type which can be used to issue asynchronous rpcs
*/
public <C extends RpcGateway> C getSelfGateway(Class<C> selfGatewayType) {
return rpcService.getSelfGateway(selfGatewayType, rpcServer);
} | 3.26 |
flink_RpcEndpoint_registerResource_rdh | /**
* Register the given closeable resource to {@link CloseableRegistry}.
*
* @param closeableResource
* the given closeable resource
*/
protected void registerResource(Closeable closeableResource) {
try {
resourceRegistry.registerCloseable(closeableResource);
} catch (IOException e) {
throw new RuntimeException(("Registry closeable resource " + closeableResource) + " fail", e);
}
} | 3.26 |
flink_RpcEndpoint_close_rdh | /**
* Shutdown the {@link ScheduledThreadPoolExecutor} and remove all the pending tasks.
*/
@Override
public void close() {
if (!mainScheduledExecutor.isShutdown()) {
mainScheduledExecutor.shutdownNow();
}
} | 3.26 |
flink_RpcEndpoint_runAsync_rdh | // ------------------------------------------------------------------------
// Asynchronous executions
// ------------------------------------------------------------------------
/**
* Execute the runnable in the main thread of the underlying RPC endpoint.
*
* @param runnable
* Runnable to be executed in the main thread of the underlying RPC endpoint
*/
protected void runAsync(Runnable runnable) {
rpcServer.runAsync(runnable);
} | 3.26 |
flink_RpcEndpoint_callAsync_rdh | /**
* Execute the callable in the main thread of the underlying RPC service, returning a future for
* the result of the callable. If the callable is not completed within the given timeout, then
* the future will be failed with a {@link TimeoutException}.
*
* @param callable
* Callable to be executed in the main thread of the underlying rpc server
* @param timeout
* Timeout for the callable to be completed
* @param <V>
* Return type of the callable
* @return Future for the result of the callable.
*/
protected <V> CompletableFuture<V> callAsync(Callable<V> callable, Duration timeout) {
return rpcServer.callAsync(callable,
timeout);
} | 3.26 |
flink_RpcEndpoint_getEndpointId_rdh | /**
* Returns the rpc endpoint's identifier.
*
* @return Rpc endpoint's identifier.
*/public String getEndpointId() {
return endpointId;
} | 3.26 |
flink_RpcEndpoint_getHostname_rdh | /**
* Gets the hostname of the underlying RPC endpoint.
*
* @return Hostname on which the RPC endpoint is running
*/
@Override
public String getHostname() {
return rpcServer.getHostname();
} | 3.26 |
flink_RpcEndpoint_onStart_rdh | /**
* User overridable callback which is called from {@link #internalCallOnStart()}.
*
* <p>This method is called when the RpcEndpoint is being started. The method is guaranteed to
* be executed in the main thread context and can be used to start the rpc endpoint in the
* context of the rpc endpoint's main thread.
*
* <p>IMPORTANT: This method should never be called directly by the user.
*
* @throws Exception
* indicating that the rpc endpoint could not be started. If an exception
* occurs, then the rpc endpoint will automatically terminate.
*/
protected void onStart() throws Exception {
} | 3.26 |
flink_RpcEndpoint_m0_rdh | // ------------------------------------------------------------------------
// Start & shutdown & lifecycle callbacks
// ------------------------------------------------------------------------
/**
* Triggers start of the rpc endpoint. This tells the underlying rpc server that the rpc
* endpoint is ready to process remote procedure calls.
*/
public final void m0() {
rpcServer.start();
} | 3.26 |
flink_RpcEndpoint_schedule_rdh | /**
* The mainScheduledExecutor manages the task and sends it to the gateway after the given
* delay.
*
* @param command
* the task to execute in the future
* @param delay
* the time from now to delay the execution
* @param unit
* the time unit of the delay parameter
* @return a ScheduledFuture representing the completion of the scheduled task
*/
@Override
public ScheduledFuture<?> schedule(Runnable command,
long delay, TimeUnit unit) {
final long delayMillis = TimeUnit.MILLISECONDS.convert(delay, unit);
FutureTask<Void> ft = new FutureTask<>(command, null);
if (mainScheduledExecutor.isShutdown()) {
log.warn("The scheduled executor service is shutdown and ignores the command {}", command);
} else {
mainScheduledExecutor.schedule(() -> gateway.runAsync(ft), delayMillis, TimeUnit.MILLISECONDS);
}
return new ScheduledFutureAdapter<>(ft, delayMillis, TimeUnit.MILLISECONDS);
} | 3.26 |
flink_ExecutionVertexInputInfo_getSubpartitionIndexRange_rdh | /**
* Get the subpartition range this subtask should consume.
*/
public IndexRange getSubpartitionIndexRange() {
return subpartitionIndexRange;
} | 3.26 |
flink_ExecutionVertexInputInfo_getPartitionIndexRange_rdh | /**
* Get the partition range this subtask should consume.
*/
public IndexRange getPartitionIndexRange() {
return partitionIndexRange;
} | 3.26 |
flink_ExecutionVertexInputInfo_getSubtaskIndex_rdh | /**
* Get the index of this subtask.
*/
public int getSubtaskIndex() {
return subtaskIndex;
} | 3.26 |
flink_HsMemoryDataSpiller_close_rdh | /**
* Close this {@link HsMemoryDataSpiller}. It means spiller will no longer accept new spilling
* operation and wait for all previous spilling operation done blocking.
*/
public void close() {
try {
ioExecutor.shutdown();
if (!ioExecutor.awaitTermination(5L, TimeUnit.MINUTES)) {
throw new TimeoutException("Shutdown spilling thread timeout.");
}
dataFileChannel.close();
} catch (Exception e) { ExceptionUtils.rethrow(e);
}
} | 3.26 |
flink_HsMemoryDataSpiller_writeBuffers_rdh | /**
* Write all buffers to disk.
*/
private void writeBuffers(List<BufferWithIdentity> bufferWithIdentities, long expectedBytes) throws IOException {
if (bufferWithIdentities.isEmpty()) {
return;
}ByteBuffer[] bufferWithHeaders = new ByteBuffer[2 * bufferWithIdentities.size()];
for (int i = 0; i < bufferWithIdentities.size(); i++) {
Buffer buffer = bufferWithIdentities.get(i).getBuffer();
setBufferWithHeader(buffer, bufferWithHeaders, 2 * i);
}
BufferReaderWriterUtil.writeBuffers(dataFileChannel, expectedBytes, bufferWithHeaders);
totalBytesWritten += expectedBytes;
} | 3.26 |
flink_HsMemoryDataSpiller_spillAsync_rdh | /**
* Spilling buffers to disk asynchronously.
*
* @param bufferToSpill
* buffers need to be spilled, must ensure that it is sorted by
* (subpartitionId, bufferIndex).
* @return the completable future contains spilled buffers information.
*/
public CompletableFuture<List<SpilledBuffer>> spillAsync(List<BufferWithIdentity> bufferToSpill) {
CompletableFuture<List<SpilledBuffer>> spilledFuture = new CompletableFuture<>();ioExecutor.execute(() -> spill(bufferToSpill, spilledFuture));
return spilledFuture;
} | 3.26 |
flink_HsMemoryDataSpiller_spill_rdh | /**
* Called in single-threaded ioExecutor. Order is guaranteed.
*/
private void spill(List<BufferWithIdentity> toWrite, CompletableFuture<List<SpilledBuffer>> spilledFuture) {
try {
List<SpilledBuffer> spilledBuffers = new ArrayList<>();
long expectedBytes = createSpilledBuffersAndGetTotalBytes(toWrite, spilledBuffers);
// write all buffers to file
writeBuffers(toWrite, expectedBytes);
// complete spill future when buffers are written to disk successfully.
// note that the ownership of these buffers is transferred to the MemoryDataManager,
// which controls data's life cycle.
spilledFuture.complete(spilledBuffers);
} catch (IOException exception) {
// if spilling is failed, throw exception directly to uncaughtExceptionHandler.
ExceptionUtils.rethrow(exception);
}
} | 3.26 |
flink_HsMemoryDataSpiller_createSpilledBuffersAndGetTotalBytes_rdh | /**
* Compute buffer's file offset and create spilled buffers.
*
* @param toWrite
* for create {@link SpilledBuffer}.
* @param spilledBuffers
* receive the created {@link SpilledBuffer} by this method.
* @return total bytes(header size + buffer size) of all buffers to write.
*/
private long createSpilledBuffersAndGetTotalBytes(List<BufferWithIdentity> toWrite, List<SpilledBuffer> spilledBuffers) {
long expectedBytes = 0;
for (BufferWithIdentity v4 : toWrite) {
Buffer buffer = v4.getBuffer();int numBytes = buffer.readableBytes() + BufferReaderWriterUtil.HEADER_LENGTH;
spilledBuffers.add(new SpilledBuffer(v4.getChannelIndex(), v4.getBufferIndex(), totalBytesWritten + expectedBytes));
expectedBytes += numBytes;
}
return expectedBytes;
} | 3.26 |
flink_UnresolvedIdentifier_asSummaryString_rdh | /**
* Returns a string that summarizes this instance for printing to a console or log.
*/
public String asSummaryString() {
return Stream.of(catalogName, databaseName, objectName).filter(Objects::nonNull).map(EncodingUtils::escapeIdentifier).collect(Collectors.joining("."));
} | 3.26 |
flink_DefaultContainerizedExternalSystem_builder_rdh | /**
* Get a builder for {@link DefaultContainerizedExternalSystem}.
*
* @param <C>
* Type of underlying container
* @return An instance of builder
*/
public static <C extends GenericContainer<C>> Builder<C> builder() {
return new
Builder<>();
} | 3.26 |
flink_ExternalTypeInfo_isBasicType_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean isBasicType() {
return false;
} | 3.26 |
flink_ExternalTypeInfo_getDataType_rdh | // --------------------------------------------------------------------------------------------
@Override
public DataType getDataType() {
return dataType;
} | 3.26 |
flink_ExternalTypeInfo_of_rdh | /**
* Creates type information for a {@link DataType} that is possibly represented by internal data
* structures but serialized and deserialized into external data structures.
*
* @param isInternalInput
* allows for a non-bidirectional serializer from internal to external
*/
public static <T> ExternalTypeInfo<T> of(DataType dataType, boolean isInternalInput) {
final TypeSerializer<T> serializer = createExternalTypeSerializer(dataType, isInternalInput);
return new ExternalTypeInfo<>(dataType, serializer);
} | 3.26 |
flink_MultipleInputNodeCreationProcessor_createMultipleInputNodes_rdh | // --------------------------------------------------------------------------------
// Multiple Input Nodes Creating
// --------------------------------------------------------------------------------
private List<ExecNode<?>> createMultipleInputNodes(ReadableConfig tableConfig, List<ExecNodeWrapper> rootWrappers) {
List<ExecNode<?>> result = new ArrayList<>();
Map<ExecNodeWrapper, ExecNode<?>> visitedMap = new HashMap<>();
for
(ExecNodeWrapper rootWrapper : rootWrappers) {
result.add(getMultipleInputNode(tableConfig, rootWrapper, visitedMap));
}
return result;
} | 3.26 |
flink_MultipleInputNodeCreationProcessor_wrapExecNodes_rdh | // --------------------------------------------------------------------------------
// Wrapping and Sorting
// --------------------------------------------------------------------------------
private List<ExecNodeWrapper> wrapExecNodes(List<ExecNode<?>> rootNodes) {
Map<ExecNode<?>, ExecNodeWrapper> wrapperMap = new HashMap<>();
AbstractExecNodeExactlyOnceVisitor visitor = new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void visitNode(ExecNode<?> node) {
ExecNodeWrapper wrapper = wrapperMap.computeIfAbsent(node, k ->
new ExecNodeWrapper(node));
for (ExecEdge v7 : node.getInputEdges()) {
ExecNode<?> inputNode = v7.getSource();
ExecNodeWrapper inputWrapper = wrapperMap.computeIfAbsent(inputNode, k -> new ExecNodeWrapper(inputNode));
wrapper.inputs.add(inputWrapper);
inputWrapper.outputs.add(wrapper);
}
visitInputs(node);
}
};
rootNodes.forEach(s -> s.accept(visitor));
List<ExecNodeWrapper> rootWrappers = new ArrayList<>();
for (ExecNode<?> root : rootNodes) {
ExecNodeWrapper rootWrapper = wrapperMap.get(root);
Preconditions.checkNotNull(rootWrapper, "Root node is not wrapped. This is a bug.");
rootWrappers.add(rootWrapper);
}
return rootWrappers;
} | 3.26 |
flink_MultipleInputNodeCreationProcessor_createMultipleInputGroups_rdh | // --------------------------------------------------------------------------------
// Multiple Input Groups Creating
// --------------------------------------------------------------------------------
private void
createMultipleInputGroups(ReadableConfig tableConfig, List<ExecNodeWrapper> orderedWrappers) {
// wrappers are checked in topological order from sinks to sources
for (ExecNodeWrapper wrapper : orderedWrappers) {
// we skip nodes which cannot be a member of a multiple input node
if (!canBeMultipleInputNodeMember(wrapper)) {
continue;
}
// we first try to assign this wrapper into the same group with its outputs
MultipleInputGroup v20 = canBeInSameGroupWithOutputs(wrapper);
if (v20 != null) {
v20.addMember(wrapper);
continue;
}
// we then try to create a new multiple input group with this node as the root
if (m0(tableConfig, wrapper)) {wrapper.group = new MultipleInputGroup(wrapper);
}
// all our attempts failed, this node will not be in a multiple input node
}
} | 3.26 |
flink_MultipleInputNodeCreationProcessor_optimizeMultipleInputGroups_rdh | // --------------------------------------------------------------------------------
// Multiple Input Groups Optimizing
// --------------------------------------------------------------------------------
private void optimizeMultipleInputGroups(List<ExecNodeWrapper> orderedWrappers, ProcessorContext context) {
// wrappers are checked in topological order from sources to sinks
for (int i = orderedWrappers.size() - 1; i >= 0; i--) {
ExecNodeWrapper wrapper = orderedWrappers.get(i);
MultipleInputGroup group = wrapper.group;
if (group == null) {
// we only consider nodes currently in a multiple input group
continue;
}
if (!isEntranceOfMultipleInputGroup(wrapper)) {
// we're not removing a node from the middle of a multiple input group
continue;
}
boolean shouldRemove = false;
if (wrapper.execNode instanceof CommonExecUnion) {
// optimization 1. we do not allow union to be the tail of a multiple input
// as we're paying extra function calls for this, unless one of the united
// input is a FLIP-27 source
shouldRemove = wrapper.inputs.stream().noneMatch(inputWrapper -> isChainableSource(inputWrapper.execNode, context));
} else if (wrapper.inputs.size() == 1) {
// optimization 2. for one-input operators we'll remove it unless its input
// is an exchange or a FLIP-27 source, this is mainly to avoid the following
// pattern:
// non-chainable source -> calc --\
// join ->
// non-chainable source -> calc --/
// if we move two calcs into the multiple input group rooted at the join, we're
// directly shuffling large amount of records from the source without filtering
// by the calc
ExecNode<?> input = wrapper.inputs.get(0).execNode;shouldRemove = (!(input instanceof CommonExecExchange)) && (!isChainableSource(input, context));
}
// optimization 3. for singleton operations (for example singleton global agg)
// we're not including it into the multiple input node as we have to ensure that
// the whole multiple input can only have 1 parallelism.
// continuous singleton operations connected by forwarding shuffle will be dealt
// together with optimization 3
shouldRemove |= wrapper.inputs.stream().anyMatch(inputWrapper -> (inputWrapper.execNode instanceof CommonExecExchange) && (inputWrapper.execNode.getInputProperties().get(0).getRequiredDistribution().getType() == DistributionType.SINGLETON));
if (shouldRemove) {
wrapper.group.removeMember(wrapper);
}
}
// wrappers are checked in topological order from sinks to sources
for (ExecNodeWrapper wrapper : orderedWrappers) {
MultipleInputGroup group = wrapper.group;
if (group == null) {
// we only consider nodes currently in a multiple input group
continue;
}
if (wrapper != wrapper.group.root) {
// we only consider nodes at the root of the multiple input
continue;
}
boolean isUnion = wrapper.execNode instanceof CommonExecUnion;
if (group.members.size() == 1) {
// optimization 4. we clean up multiple input groups with only 1 member,
// unless one of its input is a FLIP-27 source (for maximizing source chaining),
// however unions do not apply to this optimization because they're not real
// operators
if (isUnion || wrapper.inputs.stream().noneMatch(inputWrapper -> isChainableSource(inputWrapper.execNode, context))) {wrapper.group.removeRoot();
}
continue;
}if
(isUnion) {// optimization 5. this optimization remove redundant union at the output of a
// multiple input, consider the following graph:
//
// source -> exchange -> agg ---\
// source -> exchange -> agg --> union ->
// source -> exchange -> join --/
// source -> exchange --/
//
// we'll initially put aggs, the join and the union into a multiple input, while
// the union here is actually redundant.
int numberOfUsefulInputs = 0;
List<Integer> uselessBranches = new ArrayList<>();
List<List<ExecNodeWrapper>> sameGroupWrappersList = new ArrayList<>();
// an input branch is useful if it contains a node with two or more inputs other
// than union. we shall keep the union if it has two or more useful input branches,
// as this may benefit source chaining. consider the following example:
//
// chainable source -> join -\
// / \
// chainable source --< union
// \ /
// chainable source -> join -/
for (int i = 0; i < wrapper.inputs.size(); i++) {
ExecNodeWrapper inputWrapper = wrapper.inputs.get(i);
List<ExecNodeWrapper> sameGroupWrappers = getInputWrappersInSameGroup(inputWrapper, wrapper.group);
sameGroupWrappersList.add(sameGroupWrappers);
long numberOfValuableNodes = sameGroupWrappers.stream().filter(w -> (w.inputs.size() >= 2) && (!(w.execNode instanceof CommonExecUnion))).count();
if (numberOfValuableNodes > 0)
{
numberOfUsefulInputs++;
} else {
uselessBranches.add(i);
}
}
if (numberOfUsefulInputs < 2) {
// remove this union and its useless branches from multiple input
for (int branch
: uselessBranches) {List<ExecNodeWrapper> sameGroupWrappers = sameGroupWrappersList.get(branch);
for (ExecNodeWrapper w : sameGroupWrappers) {
if (w.group !=
null) {
w.group.removeMember(w);
}
}}
wrapper.group.removeRoot();
}
} else if (wrapper.inputs.size() == 1) {
// optimization 6. operators with only 1 input are not allowed to be the root,
// as their chaining will be handled by operator chains. But Calc and HashAgg can be
// the root node for OFCG
// TODO If all kinds of one input operator support OFCG, we can remove this
// limitation
boolean fusionCodegenEnabled = context.getPlanner().getTableConfig().get(TABLE_EXEC_OPERATOR_FUSION_CODEGEN_ENABLED);
if (fusionCodegenEnabled && ((wrapper.execNode instanceof BatchExecCalc) || (wrapper.execNode instanceof BatchExecHashAggregate))) {
continue;
}
wrapper.group.removeRoot();
}}
} | 3.26 |
flink_StateBackendLoader_fromApplicationOrConfigOrDefault_rdh | /**
* This is the state backend loader that loads a {@link DelegatingStateBackend} wrapping the
* state backend loaded from {@link StateBackendLoader#loadFromApplicationOrConfigOrDefaultInternal} when delegation is enabled.
* If delegation is not enabled, the underlying wrapped state backend is returned instead.
*
* @param fromApplication
* StateBackend defined from application
* @param isChangelogStateBackendEnableFromApplication
* whether to enable the
* ChangelogStateBackend from application
* @param config
* The configuration to load the state backend from
* @param classLoader
* The class loader that should be used to load the state backend
* @param logger
* Optionally, a logger to log actions to (may be null)
* @return The instantiated state backend.
* @throws DynamicCodeLoadingException
* Thrown if a state backend (factory) is configured and the
* (factory) class was not found or could not be instantiated
* @throws IllegalConfigurationException
* May be thrown by the StateBackendFactory when creating
* / configuring the state backend in the factory
* @throws IOException
* May be thrown by the StateBackendFactory when instantiating the state
* backend
*/
public static StateBackend fromApplicationOrConfigOrDefault(@Nullable
StateBackend fromApplication, TernaryBoolean isChangelogStateBackendEnableFromApplication, Configuration config, ClassLoader classLoader, @Nullable
Logger logger) throws IllegalConfigurationException, DynamicCodeLoadingException, IOException {
StateBackend rootBackend = loadFromApplicationOrConfigOrDefaultInternal(fromApplication, config, classLoader, logger);
// Configuration from application will override the one from env.
boolean enableChangeLog = TernaryBoolean.TRUE.equals(isChangelogStateBackendEnableFromApplication) || (TernaryBoolean.UNDEFINED.equals(isChangelogStateBackendEnableFromApplication) && config.get(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG));
StateBackend backend;
if (enableChangeLog) {
backend = wrapStateBackend(rootBackend, classLoader, CHANGELOG_STATE_BACKEND);
LOG.info("State backend loader loads {} to delegate {}", backend.getClass().getSimpleName(), rootBackend.getClass().getSimpleName());} else {
backend =
rootBackend;
LOG.info("State backend loader loads the state backend as {}", backend.getClass().getSimpleName());
}
return backend;
} | 3.26 |
flink_StateBackendLoader_loadFromApplicationOrConfigOrDefaultInternal_rdh | /**
* Checks if an application-defined state backend is given, and if not, loads the state backend
* from the configuration, from the parameter 'state.backend', as defined in {@link CheckpointingOptions#STATE_BACKEND}. If no state backend is configured, this instantiates the
* default state backend (the {@link HashMapStateBackend}).
*
* <p>If an application-defined state backend is found, and the state backend is a {@link ConfigurableStateBackend}, this methods calls {@link ConfigurableStateBackend#configure(ReadableConfig, ClassLoader)} on the state backend.
*
* <p>Refer to {@link #loadStateBackendFromConfig(ReadableConfig, ClassLoader, Logger)} for
* details on how the state backend is loaded from the configuration.
*
* @param config
* The configuration to load the state backend from
* @param classLoader
* The class loader that should be used to load the state backend
* @param logger
* Optionally, a logger to log actions to (may be null)
* @return The instantiated state backend.
* @throws DynamicCodeLoadingException
* Thrown if a state backend factory is configured and the
* factory class was not found or the factory could not be instantiated
* @throws IllegalConfigurationException
* May be thrown by the StateBackendFactory when creating
* / configuring the state backend in the factory
* @throws IOException
* May be thrown by the StateBackendFactory when instantiating the state
* backend
*/
private static StateBackend loadFromApplicationOrConfigOrDefaultInternal(@Nullable
StateBackend fromApplication, Configuration config, ClassLoader classLoader, @Nullable
Logger logger) throws IllegalConfigurationException, DynamicCodeLoadingException, IOException {
checkNotNull(config, "config");
checkNotNull(classLoader, "classLoader");
final StateBackend backend;
// (1) the application defined state backend has precedence
if (fromApplication != null) {
// see if this is supposed to pick up additional configuration parameters
if (fromApplication instanceof ConfigurableStateBackend) {
// needs to pick up configuration
if (logger != null) {
logger.info("Using job/cluster config to configure application-defined state backend: {}", fromApplication);
}
backend = ((ConfigurableStateBackend) (fromApplication)).configure(config, classLoader);
} else {
// keep as is!
backend = fromApplication;
}
if (logger != null) {
logger.info("Using application-defined state backend: {}", backend);
}
} else {
// (2) check if the config defines a state backend
final StateBackend fromConfig = loadStateBackendFromConfig(config, classLoader, logger);
if (fromConfig != null) {
backend = fromConfig;
} else {
// (3) use the default
backend = new HashMapStateBackendFactory().createFromConfig(config, classLoader);
if (logger != null) {
logger.info("No state backend has been configured, using default (HashMap) {}", backend);
}
}
}
return backend;
} | 3.26 |
flink_StateBackendLoader_stateBackendFromApplicationOrConfigOrDefaultUseManagedMemory_rdh | /**
* Checks whether state backend uses managed memory, without having to deserialize or load the
* state backend.
*
* @param config
* Cluster configuration.
* @param stateBackendFromApplicationUsesManagedMemory
* Whether the application-defined backend
* uses Flink's managed memory. Empty if application has not defined a backend.
* @param classLoader
* User code classloader.
* @return Whether the state backend uses managed memory.
*/
public static boolean stateBackendFromApplicationOrConfigOrDefaultUseManagedMemory(Configuration config, Optional<Boolean> stateBackendFromApplicationUsesManagedMemory, ClassLoader classLoader) {
checkNotNull(config, "config");
// (1) the application defined state backend has precedence
if (stateBackendFromApplicationUsesManagedMemory.isPresent()) {
return stateBackendFromApplicationUsesManagedMemory.get();
}
// (2) check if the config defines a state backend
try {
final StateBackend fromConfig = loadStateBackendFromConfig(config, classLoader, LOG);
if (fromConfig != null) {
return fromConfig.useManagedMemory();
}
} catch (IllegalConfigurationException | DynamicCodeLoadingException | IOException e) {
LOG.warn("Cannot decide whether state backend uses managed memory. Will reserve managed memory by default.", e);
return true;
}
// (3) use the default MemoryStateBackend
return false;
} | 3.26 |
flink_StateBackendLoader_loadStateBackendFromKeyedStateHandles_rdh | /**
* Load state backend which may wrap the original state backend for recovery.
*
* @param originalStateBackend
* StateBackend loaded from application or config.
* @param classLoader
* User code classloader.
* @param keyedStateHandles
* The state handles for restore.
* @return Wrapped state backend for recovery.
* @throws DynamicCodeLoadingException
* Thrown if keyed state handles of wrapped state backend
* are found and the class was not found or could not be instantiated.
*/
public static StateBackend loadStateBackendFromKeyedStateHandles(StateBackend originalStateBackend, ClassLoader classLoader, Collection<KeyedStateHandle> keyedStateHandles) throws DynamicCodeLoadingException {
// Wrapping ChangelogStateBackend or ChangelogStateBackendHandle is not supported currently.
if ((!isChangelogStateBackend(originalStateBackend)) && keyedStateHandles.stream().anyMatch(stateHandle -> stateHandle instanceof ChangelogStateBackendHandle)) {
return wrapStateBackend(originalStateBackend, classLoader, DEACTIVATED_CHANGELOG_STATE_BACKEND);
}
return originalStateBackend;
} | 3.26 |
flink_StateBackendLoader_loadStateBackendFromConfig_rdh | // ------------------------------------------------------------------------
// Loading the state backend from a configuration
// ------------------------------------------------------------------------
/**
* Loads the unwrapped state backend from the configuration, from the parameter 'state.backend',
* as defined in {@link StateBackendOptions#STATE_BACKEND}.
*
* <p>The state backends can be specified either via their shortcut name, or via the class name
* of a {@link StateBackendFactory}. If a StateBackendFactory class name is specified, the
* factory is instantiated (via its zero-argument constructor) and its {@link StateBackendFactory#createFromConfig(ReadableConfig, ClassLoader)} method is called.
*
* <p>Recognized shortcut names are '{@value StateBackendLoader#HASHMAP_STATE_BACKEND_NAME}',
* '{@value StateBackendLoader#ROCKSDB_STATE_BACKEND_NAME}' '{@value StateBackendLoader#MEMORY_STATE_BACKEND_NAME}' (Deprecated), and '{@value StateBackendLoader#FS_STATE_BACKEND_NAME}' (Deprecated).
*
* @param config
* The configuration to load the state backend from
* @param classLoader
* The class loader that should be used to load the state backend
* @param logger
* Optionally, a logger to log actions to (may be null)
* @return The instantiated state backend.
* @throws DynamicCodeLoadingException
* Thrown if a state backend factory is configured and the
* factory class was not found or the factory could not be instantiated
* @throws IllegalConfigurationException
* May be thrown by the StateBackendFactory when creating
* / configuring the state backend in the factory
* @throws IOException
* May be thrown by the StateBackendFactory when instantiating the state
* backend
*/
public static StateBackend loadStateBackendFromConfig(ReadableConfig config, ClassLoader classLoader, @Nullable
Logger logger) throws IllegalConfigurationException, DynamicCodeLoadingException, IOException {
checkNotNull(config, "config");
checkNotNull(classLoader, "classLoader");final String backendName
= config.get(StateBackendOptions.STATE_BACKEND);
if (backendName == null) {
return null;
}
// by default the factory class is the backend name
String factoryClassName = backendName;
switch (backendName.toLowerCase()) {
case MEMORY_STATE_BACKEND_NAME :
MemoryStateBackend backend = new MemoryStateBackendFactory().createFromConfig(config, classLoader);
if (logger != null) {
logger.warn(("MemoryStateBackend has been deprecated. Please use 'hashmap' state " + "backend instead with JobManagerCheckpointStorage for equivalent ") + "functionality");
logger.info("State backend is set to job manager {}", backend);
} return backend;
case FS_STATE_BACKEND_NAME :
if (logger != null) {
logger.warn("{} state backend has been deprecated. Please use 'hashmap' state " + "backend instead.", backendName.toLowerCase());
}
// fall through and use the HashMapStateBackend instead which
// utilizes the same HeapKeyedStateBackend runtime implementation.
case HASHMAP_STATE_BACKEND_NAME :
HashMapStateBackend hashMapStateBackend = new
HashMapStateBackendFactory().createFromConfig(config, classLoader);
if
(logger != null) {
logger.info("State backend is set to heap memory {}", hashMapStateBackend); }
return hashMapStateBackend;
case ROCKSDB_STATE_BACKEND_NAME :
factoryClassName = ROCKSDB_STATE_BACKEND_FACTORY;
// fall through to the 'default' case that uses reflection to load the backend
// that way we can keep RocksDB in a separate module
default :
if (logger !=
null) {
logger.info("Loading state backend via factory {}", factoryClassName);
}
StateBackendFactory<?> factory;
try {
@SuppressWarnings("rawtypes")
Class<? extends StateBackendFactory> clazz
= Class.forName(factoryClassName, false, classLoader).asSubclass(StateBackendFactory.class);
factory = clazz.newInstance();
} catch (ClassNotFoundException e) {
throw
new DynamicCodeLoadingException("Cannot find configured state backend factory class: " + backendName, e);
} catch (ClassCastException | InstantiationException | IllegalAccessException e) {
throw new DynamicCodeLoadingException(((("The class configured under '" + StateBackendOptions.STATE_BACKEND.key()) + "' is not a valid state backend factory (") + backendName) + ')', e);
}
return factory.createFromConfig(config, classLoader);
}
} | 3.26 |
flink_FileSourceSplitState_toFileSourceSplit_rdh | /**
* Use the current row count as the starting row count to create a new FileSourceSplit.
*/
@SuppressWarnings("unchecked")
public SplitT toFileSourceSplit() {
final CheckpointedPosition position = ((offset
== CheckpointedPosition.NO_OFFSET) && (recordsToSkipAfterOffset == 0)) ? null : new CheckpointedPosition(offset, recordsToSkipAfterOffset);
final FileSourceSplit updatedSplit = split.updateWithCheckpointedPosition(position);
// some sanity checks to avoid surprises and not accidentally lose split information
if (updatedSplit == null) {
throw
new FlinkRuntimeException("Split returned 'null' in updateWithCheckpointedPosition(): " + split);
}
if (updatedSplit.getClass() != split.getClass()) {
throw new FlinkRuntimeException(String.format("Split returned different type in updateWithCheckpointedPosition(). " +
"Split type is %s, returned type is %s", split.getClass().getName(), updatedSplit.getClass().getName())); }
return ((SplitT) (updatedSplit));
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.