name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_JoinOperator_projectTuple15_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> ProjectJoin<I1, I2, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> projectTuple15() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> tType = new TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(fTypes);
return new ProjectJoin<I1, I2, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst,
tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple11_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10> ProjectJoin<I1, I2, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> projectTuple11() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> tType = new TupleTypeInfo<Tuple11<T0,
T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(fTypes);
return new ProjectJoin<I1, I2, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple20_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> ProjectJoin<I1, I2, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
T13, T14, T15, T16, T17, T18, T19>> projectTuple20() {
TypeInformation<?>[] v85 = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> tType = new TupleTypeInfo<Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(v85);
return new ProjectJoin<I1, I2, Tuple20<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTupleX_rdh | // --------------------------------------------------------------------------------------------
// BEGIN_OF_TUPLE_DEPENDENT_CODE
// GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
/**
* Chooses a projectTupleX according to the length of {@link org.apache.flink.api.java.operators.JoinOperator.JoinProjection#fieldIndexes}.
*
* @return The projected DataSet.
* @see org.apache.flink.api.java.operators.JoinOperator.ProjectJoin
*/
@SuppressWarnings("unchecked")
public <OUT extends Tuple> ProjectJoin<I1, I2, OUT> projectTupleX() {
ProjectJoin<I1, I2, OUT> projectJoin = null;
switch (fieldIndexes.length) {
case 1 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple1()));
break;
case 2 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple2()));
break;
case 3 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple3()));break;
case 4 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple4()));
break;
case 5 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple5())); break;
case 6 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple6()));
break;
case 7 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple7()));break;
case 8 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple8()));
break;
case 9 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple9()));
break;
case 10 :
projectJoin = ((ProjectJoin<I1, I2,
OUT>) (projectTuple10()));
break;
case 11 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple11()));
break;
case 12 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple12()));
break;
case 13 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple13()));
break;
case 14 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple14()));
break;
case 15 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple15()));
break;
case 16 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple16()));
break;case 17 :
projectJoin = ((ProjectJoin<I1, I2,
OUT>) (projectTuple17()));
break;
case 18 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple18()));
break;
case 19 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple19()));
break;
case 20 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple20()));
break;
case 21 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple21()));
break;
case 22 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple22()));
break;
case 23 :projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple23()));
break;
case 24 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple24()));
break;
case 25 :
projectJoin = ((ProjectJoin<I1, I2, OUT>) (projectTuple25()));
break;
default :
throw new IllegalStateException("Excessive arity in tuple.");
}
return projectJoin;
} | 3.26 |
flink_JoinOperator_projectTuple1_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/ public <T0> ProjectJoin<I1, I2, Tuple1<T0>>
projectTuple1() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple1<T0>> tType = new TupleTypeInfo<Tuple1<T0>>(fTypes);
return new ProjectJoin<I1, I2, Tuple1<T0>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_equalTo_rdh | /**
* Continues a Join transformation and defines the fields of the second join {@link DataSet} that should be used as join keys.
*
* <p>The resulting {@link DefaultJoin} wraps each pair of joining elements into a
* {@link Tuple2}, with the element of the first input being the first field of the
* tuple and the element of the second input being the second field of the tuple.
*
* @param fields
* The fields of the second join DataSet that should be used as keys.
* @return A DefaultJoin that represents the joined DataSet.
*/
@Override
public DefaultJoin<I1, I2> equalTo(String... fields) {
return createDefaultJoin(new Keys.ExpressionKeys<>(fields, input2.getType()));
}
/**
* Continues a Join transformation and defines a {@link KeySelector} function for the
* second join {@link DataSet}.
*
* <p>The KeySelector function is called for each element of the second DataSet and
* extracts a single key value on which the DataSet is joined.
*
* <p>The resulting {@link DefaultJoin} wraps each pair of joining elements into a
* {@link Tuple2} | 3.26 |
flink_JoinOperator_extractFieldTypes_rdh | // END_OF_TUPLE_DEPENDENT_CODE
// -----------------------------------------------------------------------------------------
private TypeInformation<?>[] extractFieldTypes(int[] fields) {
TypeInformation<?>[] fieldTypes = new TypeInformation[fields.length];
for (int i = 0; i < fields.length; i++) {
TypeInformation<?> typeInfo;
if (isFieldInFirst[i]) {
if (fields[i] >= 0) {typeInfo = ((TupleTypeInfo<?>) (ds1.getType())).getTypeAt(fields[i]);
} else {typeInfo = ds1.getType();
}
} else if (fields[i] >= 0) {
typeInfo = ((TupleTypeInfo<?>) (ds2.getType())).getTypeAt(fields[i]);
} else {
typeInfo = ds2.getType();
}
fieldTypes[i] = typeInfo;
} return fieldTypes;
} | 3.26 |
flink_JoinOperator_projectTuple19_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> ProjectJoin<I1, I2, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
T11, T12, T13, T14, T15, T16, T17, T18>> projectTuple19() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>> tType = new TupleTypeInfo<Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17, T18>>(fTypes);
return new ProjectJoin<I1, I2, Tuple19<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17,
T18>>(this.ds1,
this.ds2, this.keys1,
this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple5_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4> ProjectJoin<I1, I2, Tuple5<T0, T1, T2, T3, T4>> projectTuple5() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType = new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes);
return new ProjectJoin<I1,
I2, Tuple5<T0, T1, T2, T3, T4>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple17_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> ProjectJoin<I1, I2, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> projectTuple17() {TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>> tType = new TupleTypeInfo<Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(fTypes);
return new ProjectJoin<I1, I2, Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple13_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> ProjectJoin<I1, I2, Tuple13<T0, T1, T2, T3, T4,
T5, T6, T7, T8, T9, T10, T11, T12>> projectTuple13() {TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType = new TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fTypes);
return new ProjectJoin<I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectFirst_rdh | /**
* Continues a ProjectJoin transformation and adds fields of the first join input.
*
* <p>If the first join input is a {@link Tuple} {@link DataSet}, fields can be selected by
* their index. If the first join input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectFirst(int...)} and
* {@link org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectSecond(int...)}.
*
* @param firstFieldIndexes
* If the first input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended JoinProjection.
* @see Tuple
* @see DataSet
*/
protected JoinProjection<I1, I2> projectFirst(int... firstFieldIndexes) {
boolean isFirstTuple;
isFirstTuple = (ds1.getType() instanceof TupleTypeInfo) && (firstFieldIndexes.length > 0);if ((!isFirstTuple) && (firstFieldIndexes.length != 0)) {
// field index provided for non-Tuple input
throw new IllegalArgumentException("Input is not a Tuple. Call projectFirst() without arguments to include it.");
} else if (firstFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException("You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isFirstTuple) {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + firstFieldIndexes.length);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + firstFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs1;
for (int i = 0; i < firstFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(firstFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = true;
this.fieldIndexes[offset + i]
=
firstFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = true;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.26 |
flink_JoinOperator_projectTuple22_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> ProjectJoin<I1, I2, Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>> projectTuple22()
{
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11, T12, T13, T14, T15, T16,
T17, T18, T19, T20, T21>> tType = new TupleTypeInfo<Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8,
T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(fTypes);
return new ProjectJoin<I1, I2, Tuple22<T0, T1, T2,
T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple3_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2> ProjectJoin<I1, I2, Tuple3<T0, T1, T2>> projectTuple3() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes);
return new ProjectJoin<I1, I2, Tuple3<T0, T1, T2>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple7_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6> ProjectJoin<I1, I2, Tuple7<T0, T1, T2, T3, T4, T5, T6>> projectTuple7() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>> v60 = new TupleTypeInfo<Tuple7<T0, T1, T2, T3, T4, T5, T6>>(fTypes);
return new ProjectJoin<I1, I2, Tuple7<T0, T1, T2, T3, T4, T5, T6>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, v60, this);
} | 3.26 |
flink_JoinOperator_projectTuple2_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1> ProjectJoin<I1, I2, Tuple2<T0, T1>> projectTuple2() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes);
return new ProjectJoin<I1, I2, Tuple2<T0, T1>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple6_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5> ProjectJoin<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType = new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes);
return new ProjectJoin<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple16_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> ProjectJoin<I1, I2, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
T14, T15>>
projectTuple16() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> tType = new TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(fTypes);
return new ProjectJoin<I1, I2, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_withPartitioner_rdh | /**
* Sets a custom partitioner for this join. The partitioner will be called on the join keys to
* determine the partition a key should be assigned to. The partitioner is evaluated on both
* join inputs in the same way.
*
* <p>NOTE: A custom partitioner can only be used with single-field join keys, not with
* composite join keys.
*
* @param partitioner
* The custom partitioner to be used.
* @return This join operator, to allow for function chaining.
*/
public JoinOperator<I1, I2, OUT> withPartitioner(Partitioner<?> partitioner) {
if (partitioner != null) {
keys1.validateCustomPartitioner(partitioner, null);
keys2.validateCustomPartitioner(partitioner, null);
}
this.customPartitioner = getInput1().clean(partitioner);
return this;} | 3.26 |
flink_JoinOperator_projectTuple12_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11> ProjectJoin<I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> projectTuple12() {
TypeInformation<?>[] v69 = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType = new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4,
T5, T6, T7, T8, T9, T10, T11>>(v69);
return new ProjectJoin<I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8,
T9, T10, T11>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple23_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> ProjectJoin<I1, I2, Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> projectTuple23() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> tType = new TupleTypeInfo<Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(fTypes);
return new ProjectJoin<I1, I2, Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_TableOperatorWrapper_isClosed_rdh | /**
* Checks if the wrapped operator has been closed.
*
* <p>Note that this method must be called in the task thread.
*/
public boolean isClosed()
{
return closed;
} | 3.26 |
flink_NumericSummaryAggregator_aggregate_rdh | /**
* Add a value to the current aggregation.
*/
@Override
public void aggregate(T value) {
if (value == null) {
nullCount++;
} else if (isNan(value)) {
nanCount++;}
else if (isInfinite(value)) {
infinityCount++;
} else {
nonMissingCount++;
min.aggregate(value);
f0.aggregate(value);
sum.aggregate(value);
double doubleValue = value.doubleValue();
double delta = doubleValue - mean.value();
mean = mean.add(delta / nonMissingCount);
m2 = m2.add(delta * (doubleValue - mean.value()));
}
} | 3.26 |
flink_NumericSummaryAggregator_combine_rdh | /**
* combine two aggregations.
*/
@Override
public void combine(Aggregator<T, NumericColumnSummary<T>> otherSameType) {
NumericSummaryAggregator<T> other = ((NumericSummaryAggregator<T>) (otherSameType));nullCount += other.nullCount;
nanCount
+= other.nanCount;
infinityCount += other.infinityCount;
if (nonMissingCount == 0) {
nonMissingCount = other.nonMissingCount;min = other.min;
f0 =
other.max;
sum = other.sum;
mean = other.mean;
m2 = other.m2;} else if (other.nonMissingCount != 0) {
long combinedCount = nonMissingCount + other.nonMissingCount;min.combine(other.min);
f0.combine(other.max);
sum.combine(other.sum);
double
deltaMean = other.mean.value() - mean.value();
mean = mean.add((deltaMean * other.nonMissingCount) / combinedCount);
m2 = m2.add(other.m2).add((((deltaMean * deltaMean) * nonMissingCount) * other.nonMissingCount) / combinedCount);
nonMissingCount =
combinedCount;
}
} | 3.26 |
flink_Pattern_m0_rdh | /**
* Specifies that this pattern is optional for a final match of the pattern sequence to happen.
*
* @return The same pattern as optional.
* @throws MalformedPatternException
* if the quantifier is not applicable to this pattern.
*/
public Pattern<T, F> m0() {
checkIfPreviousPatternGreedy();
quantifier.optional();
return this;
}
/**
* Specifies that this pattern can occur {@code one or more} times. This means at least one and
* at most infinite number of events can be matched to this pattern.
*
* <p>If this quantifier is enabled for a pattern {@code A.oneOrMore().followedBy(B)} and a
* sequence of events {@code A1 A2 B} appears, this will generate patterns: {@code A1 B} and
* {@code A1 A2 B}. See also {@link #allowCombinations()}.
*
* @return The same pattern with a {@link Quantifier#looping(ConsumingStrategy)} | 3.26 |
flink_Pattern_next_rdh | /**
* Appends a new group pattern to the existing one. The new pattern enforces strict temporal
* contiguity. This means that the whole pattern sequence matches only if an event which matches
* this pattern directly follows the preceding matching event. Thus, there cannot be any events
* in between two matching events.
*
* @param group
* the pattern to append
* @return A new pattern which is appended to this one
*/
public GroupPattern<T, F> next(Pattern<T, F>
group) {
return new GroupPattern<>(this, group, ConsumingStrategy.STRICT, afterMatchSkipStrategy);
} | 3.26 |
flink_Pattern_within_rdh | /**
* Defines the maximum time interval in which a matching pattern has to be completed in order to
* be considered valid. This interval corresponds to the maximum time gap between events.
*
* @param withinType
* Type of the within interval between events
* @param windowTime
* Time of the matching window
* @return The same pattern operator with the new window length
*/
public Pattern<T, F> within(Time windowTime, WithinType withinType) {if (windowTime
!= null) {
windowTimes.put(withinType, windowTime);
}
return this;
} | 3.26 |
flink_Pattern_subtype_rdh | /**
* Applies a subtype constraint on the current pattern. This means that an event has to be of
* the given subtype in order to be matched.
*
* @param subtypeClass
* Class of the subtype
* @param <S>
* Type of the subtype
* @return The same pattern with the new subtype constraint
*/public <S extends F> Pattern<T, S> subtype(final Class<S> subtypeClass) {
Preconditions.checkNotNull(subtypeClass, "The class cannot be null.");
if (condition == null) {
this.condition = new SubtypeCondition<F>(subtypeClass);
} else {
this.condition = new RichAndCondition<>(condition, new SubtypeCondition<F>(subtypeClass));
}
@SuppressWarnings("unchecked")
Pattern<T, S> result = ((Pattern<T, S>) (this));
return result;
} | 3.26 |
flink_Pattern_or_rdh | /**
* Adds a condition that has to be satisfied by an event in order to be considered a match. If
* another condition has already been set, the new one is going to be combined with the previous
* with a logical {@code OR}. In other case, this is going to be the only condition.
*
* @param condition
* The condition as an {@link IterativeCondition}.
* @return The pattern with the new condition is set.
*/
public Pattern<T,
F> or(IterativeCondition<F> condition) {
Preconditions.checkNotNull(condition, "The condition cannot be null.");
ClosureCleaner.clean(condition, ClosureCleanerLevel.RECURSIVE, true);
if (this.condition == null) {
this.condition = condition;
} else {
this.condition = new RichOrCondition<>(this.condition, condition);
}
return this;
} | 3.26 |
flink_Pattern_times_rdh | /**
* Specifies that the pattern can occur between from and to times with time interval corresponds
* to the maximum time gap between previous and current event for each times.
*
* @param from
* number of times matching event must appear at least
* @param to
* number of times matching event must appear at most
* @param windowTime
* time of the matching window between times
* @return The same pattern with the number of times range applied
* @throws MalformedPatternException
* if the quantifier is not applicable to this pattern.
*/
public Pattern<T, F> times(int from, int to, @Nullable
Time windowTime)
{
checkIfNoNotPattern();
checkIfQuantifierApplied();
this.quantifier = Quantifier.times(quantifier.getConsumingStrategy());
if (from == 0) {
this.quantifier.optional();
from = 1;
}
this.times = Times.of(from, to, windowTime);
return this;
}
/**
* Specifies that this pattern can occur the specified times at least. This means at least the
* specified times and at most infinite number of events can be matched to this pattern.
*
* @return The same pattern with a {@link Quantifier#looping(ConsumingStrategy)} | 3.26 |
flink_Pattern_notFollowedBy_rdh | /**
* Appends a new pattern to the existing one. The new pattern enforces that there is no event
* matching this pattern between the preceding pattern and succeeding this one.
*
* <p><b>NOTE:</b> There has to be other pattern after this one.
*
* @param name
* Name of the new pattern
* @return A new pattern which is appended to this one
*/public Pattern<T, T> notFollowedBy(final String name) {
if (quantifier.hasProperty(QuantifierProperty.OPTIONAL)) {
throw new UnsupportedOperationException(("Specifying a pattern with an optional path to NOT condition is not supported yet. " + "You can simulate such pattern with two independent patterns, one with and the other without ") + "the optional part.");
}
return new Pattern<>(name, this, ConsumingStrategy.NOT_FOLLOW, afterMatchSkipStrategy);
} | 3.26 |
flink_Pattern_m1_rdh | /**
* Specifies exact number of times that this pattern should be matched and time interval
* corresponds to the maximum time gap between previous and current event for each times.
*
* @param times
* number of times matching event must appear
* @param windowTime
* time of the matching window between times
* @return The same pattern with number of times applied
* @throws MalformedPatternException
* if the quantifier is not applicable to this pattern.
*/
public Pattern<T, F> m1(int times, @Nullable
Time windowTime) {
checkIfNoNotPattern();checkIfQuantifierApplied();
Preconditions.checkArgument(times > 0, "You should give a positive number greater than 0.");
this.quantifier = Quantifier.times(quantifier.getConsumingStrategy());
this.times = Times.of(times, windowTime);
return this;
} | 3.26 |
flink_Pattern_getAfterMatchSkipStrategy_rdh | /**
*
* @return the pattern's {@link AfterMatchSkipStrategy.SkipStrategy} after match.
*/
public AfterMatchSkipStrategy getAfterMatchSkipStrategy() {
return afterMatchSkipStrategy;
} | 3.26 |
flink_Pattern_greedy_rdh | /**
* Specifies that this pattern is greedy. This means as many events as possible will be matched
* to this pattern.
*
* @return The same pattern with {@link Quantifier#greedy} set to true.
* @throws MalformedPatternException
* if the quantifier is not applicable to this pattern.
*/public Pattern<T, F> greedy() {
checkIfNoNotPattern();
checkIfNoGroupPattern();
this.quantifier.greedy();
return this;} | 3.26 |
flink_Pattern_where_rdh | /**
* Adds a condition that has to be satisfied by an event in order to be considered a match. If
* another condition has already been set, the new one is going to be combined with the previous
* with a logical {@code AND}. In other case, this is going to be the only condition.
*
* @param condition
* The condition as an {@link IterativeCondition}.
* @return The pattern with the new condition is set.
*/
public Pattern<T, F> where(IterativeCondition<F> condition) {
Preconditions.checkNotNull(condition, "The condition cannot be null.");
ClosureCleaner.clean(condition, ClosureCleanerLevel.RECURSIVE, true);
if (this.condition ==
null) {
this.condition = condition;
} else {
this.condition = new RichAndCondition<>(this.condition, condition);
} return this;
} | 3.26 |
flink_Pattern_until_rdh | /**
* Applies a stop condition for a looping state. It allows cleaning the underlying state.
*
* @param untilCondition
* a condition an event has to satisfy to stop collecting events into
* looping state
* @return The same pattern with applied untilCondition
*/
public Pattern<T, F> until(IterativeCondition<F> untilCondition) {
Preconditions.checkNotNull(untilCondition, "The condition cannot be null");
if (this.untilCondition != null) {
throw new MalformedPatternException("Only one until condition can be applied.");
}
if (!quantifier.hasProperty(QuantifierProperty.LOOPING)) {
throw new MalformedPatternException("The until condition is only applicable to looping states.");
}
ClosureCleaner.clean(untilCondition, ClosureCleanerLevel.RECURSIVE, true);
this.untilCondition = untilCondition;
return this;
} | 3.26 |
flink_Pattern_m2_rdh | /**
* Appends a new group pattern to the existing one. The new pattern enforces non-strict temporal
* contiguity. This means that a matching event of this pattern and the preceding matching event
* might be interleaved with other events which are ignored.
*
* @param group
* the pattern to append
* @return A new pattern which is appended to this one
*/
public GroupPattern<T, F> m2(Pattern<T, F> group) {
return new GroupPattern<>(this, group, ConsumingStrategy.SKIP_TILL_ANY, afterMatchSkipStrategy);
} | 3.26 |
flink_Pattern_followedBy_rdh | /**
* Appends a new group pattern to the existing one. The new pattern enforces non-strict temporal
* contiguity. This means that a matching event of this pattern and the preceding matching event
* might be interleaved with other events which are ignored.
*
* @param group
* the pattern to append
* @return A new pattern which is appended to this one
*/
public GroupPattern<T, F> followedBy(Pattern<T, F> group) {
return new GroupPattern<>(this, group, ConsumingStrategy.SKIP_TILL_NEXT, afterMatchSkipStrategy);
} | 3.26 |
flink_Pattern_followedByAny_rdh | /**
* Appends a new pattern to the existing one. The new pattern enforces non-strict temporal
* contiguity. This means that a matching event of this pattern and the preceding matching event
* might be interleaved with other events which are ignored.
*
* @param name
* Name of the new pattern
* @return A new pattern which is appended to this one
*/
public Pattern<T, T> followedByAny(final String
name) {
return new Pattern<>(name, this, ConsumingStrategy.SKIP_TILL_ANY, afterMatchSkipStrategy);
} | 3.26 |
flink_Pattern_begin_rdh | /**
* Starts a new pattern sequence. The provided pattern is the initial pattern of the new
* sequence.
*
* @param group
* the pattern to begin with
* @return the first pattern of a pattern sequence
*/
public static <T, F extends T> GroupPattern<T, F> begin(Pattern<T, F> group) {
return new GroupPattern<>(null, group, ConsumingStrategy.STRICT, AfterMatchSkipStrategy.noSkip());} | 3.26 |
flink_DistributedCache_getFile_rdh | // ------------------------------------------------------------------------
public File getFile(String name) {if (name == null) {
throw new NullPointerException("name must not be null");
}
Future<Path> future = cacheCopyTasks.get(name);
if (future == null) {
throw new IllegalArgumentException((("File with name '" + name) + "' is not available.") + " Did you forget to register the file?");
}
try {
final Path v3 = future.get(); URI tmp = v3.makeQualified(v3.getFileSystem()).toUri();
return new File(tmp);
} catch (ExecutionException e) {
throw new RuntimeException("An error occurred while copying the file.", e.getCause());
} catch (Exception e) {
throw new RuntimeException(("Error while getting the file registered under '" + name) + "' from the distributed cache", e);
}
} | 3.26 |
flink_DistributedCache_parseCachedFilesFromString_rdh | /**
* Parses a list of distributed cache entries encoded in a string. Can be used to parse a config
* option described by {@link org.apache.flink.configuration.PipelineOptions#CACHED_FILES}.
*
* <p>See {@link org.apache.flink.configuration.PipelineOptions#CACHED_FILES} for the format.
*
* @param files
* List of string encoded distributed cache entries.
*/
public static List<Tuple2<String, DistributedCacheEntry>>
parseCachedFilesFromString(List<String> files) {
return files.stream().map(ConfigurationUtils::parseMap).map(m -> Tuple2.of(m.get("name"), new DistributedCacheEntry(m.get("path"), Optional.ofNullable(m.get("executable")).map(Boolean::parseBoolean).orElse(false)))).collect(Collectors.toList());
} | 3.26 |
flink_DistributedCache_writeFileInfoToConfig_rdh | // ------------------------------------------------------------------------
// Utilities to read/write cache files from/to the configuration
// ------------------------------------------------------------------------
public static void writeFileInfoToConfig(String name, DistributedCacheEntry e, Configuration conf) {
int num =
conf.getInteger(CACHE_FILE_NUM, 0) + 1;
conf.setInteger(CACHE_FILE_NUM, num);
conf.setString(CACHE_FILE_NAME + num, name);
conf.setString(CACHE_FILE_PATH + num, e.filePath);
conf.setBoolean(f0 + num, e.isExecutable || new File(e.filePath).canExecute());
conf.setBoolean(CACHE_FILE_DIR + num, e.isZipped || new File(e.filePath).isDirectory());
if (e.blobKey != null) {
conf.setBytes(CACHE_FILE_BLOB_KEY + num, e.blobKey);
}
} | 3.26 |
flink_MutableConfig_of_rdh | /**
* Creates a new {@link Configuration}.
*
* @param config
* A readable configuration.
* @return A mutable Configuration.
*/
public static Configuration of(ReadableConfig config) {
if (!(config instanceof Configuration)) {
throw new IllegalStateException("Unexpected implementation of ReadableConfig: " + config.getClass());
}
return new Configuration(((Configuration) (config)));
} | 3.26 |
flink_ManagedTableFactory_discoverManagedTableFactory_rdh | /**
* Discovers the unique implementation of {@link ManagedTableFactory} without identifier.
*/static ManagedTableFactory discoverManagedTableFactory(ClassLoader classLoader) {
return FactoryUtil.discoverManagedTableFactory(classLoader, ManagedTableFactory.class);
} | 3.26 |
flink_WindowAggregateQueryOperation_getSize_rdh | /**
* Size of a {@link WindowType#TUMBLE} or {@link WindowType#SLIDE} window. Empty for {@link WindowType#SESSION} window.
*
* @return size of a window
*/
public Optional<ValueLiteralExpression> getSize() {
return Optional.of(size);
} | 3.26 |
flink_WindowAggregateQueryOperation_getSlide_rdh | /**
* Slide of {@link WindowType#SLIDE} window. Empty for other windows.
*
* @return slide of a slide window
*/
public Optional<ValueLiteralExpression> getSlide() {return Optional.of(slide);
} | 3.26 |
flink_ArrayResultIterator_next_rdh | // -------------------------------------------------------------------------
// Result Iterator Methods
// -------------------------------------------------------------------------
@Nullable
@Override
public RecordAndPosition<E> next() {
if (pos < num) {
recordAndPosition.setNext(records[pos++]);
return recordAndPosition;
} else {
return null;
}
} | 3.26 |
flink_ArrayResultIterator_set_rdh | // -------------------------------------------------------------------------
// Setting
// -------------------------------------------------------------------------
/**
* Sets the records to be returned by this iterator. Each record's {@link RecordAndPosition}
* will have the same offset (for {@link RecordAndPosition#getOffset()}. The first returned
* record will have a records-to-skip count of {@code skipCountOfFirst + 1}, following the
* contract that each record needs to point to the position AFTER itself (because a checkpoint
* taken after the record was emitted needs to resume from after that record).
*/
public void set(final E[] records, final int num, final long offset, final long skipCountOfFirst) {
this.records = records;
this.num = num;
this.pos = 0;
this.recordAndPosition.set(null, offset, skipCountOfFirst);
} | 3.26 |
flink_ZooKeeperLeaderElectionHaServices_cleanupZooKeeperPaths_rdh | /**
* Cleans up leftover ZooKeeper paths.
*/
private void cleanupZooKeeperPaths() throws Exception {
deleteOwnedZNode();
tryDeleteEmptyParentZNodes();
} | 3.26 |
flink_ZooKeeperLeaderElectionHaServices_tryDeleteEmptyParentZNodes_rdh | /**
* Tries to delete empty parent znodes.
*
* <p>IMPORTANT: This method can be removed once all supported ZooKeeper versions support the
* container {@link org.apache.zookeeper.CreateMode}.
*
* @throws Exception
* if the deletion fails for other reason than {@link KeeperException.NotEmptyException}
*/
private void tryDeleteEmptyParentZNodes() throws Exception {
// try to delete the parent znodes if they are empty
String remainingPath = getParentPath(getNormalizedPath(curatorFrameworkWrapper.asCuratorFramework().getNamespace()));
final CuratorFramework nonNamespaceClient = curatorFrameworkWrapper.asCuratorFramework().usingNamespace(null);
while (!isRootPath(remainingPath)) {
try
{
nonNamespaceClient.delete().forPath(remainingPath);} catch (KeeperException.NotEmptyException ignored) {
// We can only delete empty znodes
break;
}
remainingPath = getParentPath(remainingPath);
}
} | 3.26 |
flink_ZooKeeperLeaderElectionHaServices_createLeaderRetrievalService_rdh | // ///////////////////////////////////////////////
// LeaderElection/-Retrieval-related methods
// ///////////////////////////////////////////////
@Override
protected LeaderRetrievalService
createLeaderRetrievalService(String componentId) {
// Maybe use a single service for leader retrieval
return ZooKeeperUtils.createLeaderRetrievalService(curatorFrameworkWrapper.asCuratorFramework(), ZooKeeperUtils.getLeaderPath(componentId), configuration);
} | 3.26 |
flink_GSCommitRecoverableSerializer_deserializeCommitRecoverable_rdh | /**
* Deserializes a commit recoverable from the input stream.
*
* @param dataInputStream
* The input stream
* @return The commit recoverable
* @throws IOException
* On underlying failure
*/
static GSCommitRecoverable deserializeCommitRecoverable(DataInputStream dataInputStream) throws IOException {
// finalBlobId
String finalBucketName = dataInputStream.readUTF();
String finalObjectName = dataInputStream.readUTF();
GSBlobIdentifier finalBlobIdentifier = new GSBlobIdentifier(finalBucketName, finalObjectName);// componentObjectIds
ArrayList<UUID> componentObjectIds = new ArrayList<>();
int count = dataInputStream.readInt();
for (int i = 0; i < count; i++) {
long msbValue = dataInputStream.readLong();
long lsbValue = dataInputStream.readLong();
UUID componentObjectId = new UUID(msbValue, lsbValue);
componentObjectIds.add(componentObjectId);
}
GSCommitRecoverable recoverable = new GSCommitRecoverable(finalBlobIdentifier, componentObjectIds);
LOGGER.trace("Deserialized commit recoverable {}", recoverable);
return recoverable;
} | 3.26 |
flink_GSCommitRecoverableSerializer_getVersion_rdh | /**
* The serializer version. Note that, if this changes, then the version of {@link GSResumeRecoverableSerializer} must also change, because it uses this class to serialize
* itself, in part.
*
* @return The serializer version.
*/
@Override
public int getVersion() {
return f0;
} | 3.26 |
flink_GSCommitRecoverableSerializer_serializeCommitRecoverable_rdh | /**
* Writes a commit recoverable to a data output stream.
*
* @param recoverable
* The commit recoverable
* @param dataOutputStream
* The data output stream
* @throws IOException
* On underlyilng failure
*/
static void serializeCommitRecoverable(GSCommitRecoverable recoverable, DataOutputStream dataOutputStream) throws IOException {
// finalBlobIdentifier
dataOutputStream.writeUTF(recoverable.finalBlobIdentifier.bucketName);
dataOutputStream.writeUTF(recoverable.finalBlobIdentifier.objectName);
// componentObjectIds
dataOutputStream.writeInt(recoverable.componentObjectIds.size());
for (UUID componentObjectId : recoverable.componentObjectIds) {
dataOutputStream.writeLong(componentObjectId.getMostSignificantBits());
dataOutputStream.writeLong(componentObjectId.getLeastSignificantBits());
}
} | 3.26 |
flink_OneShotLatch_trigger_rdh | /**
* Fires the latch. Code that is blocked on {@link #await()} will now return.
*/
public void trigger() {
synchronized(lock)
{
triggered = true;lock.notifyAll();
}
} | 3.26 |
flink_OneShotLatch_await_rdh | /**
* Waits until {@link OneShotLatch#trigger()} is called. Once {@code #trigger()} has been called
* this call will always return immediately.
*
* <p>If the latch is not triggered within the given timeout, a {@code TimeoutException} will be
* thrown after the timeout.
*
* <p>A timeout value of zero means infinite timeout and make this equivalent to {@link #await()}.
*
* @param timeout
* The value of the timeout, a value of zero indicating infinite timeout.
* @param timeUnit
* The unit of the timeout
* @throws InterruptedException
* Thrown if the thread is interrupted while waiting.
* @throws TimeoutException
* Thrown, if the latch is not triggered within the timeout time.
*/
public void await(long timeout, TimeUnit timeUnit) throws InterruptedException, TimeoutException {
if (timeout < 0) {
throw new IllegalArgumentException("time may not be negative");
}
if (timeUnit == null) {
throw new NullPointerException("timeUnit");
}
if (timeout == 0) {
await();
} else {
final long deadline = System.nanoTime() + timeUnit.toNanos(timeout);
long millisToWait;
synchronized(lock) {
while ((!triggered) && ((millisToWait = (deadline - System.nanoTime()) / 1000000) > 0)) {
lock.wait(millisToWait);}
if (!triggered) {
throw new TimeoutException();
}
}
}
} | 3.26 |
flink_OneShotLatch_awaitQuietly_rdh | /**
* Calls {@link #await(long, TimeUnit)} and transforms any {@link InterruptedException} or
* {@link TimeoutException} into a {@link RuntimeException}.
*/
public void awaitQuietly(long timeout, TimeUnit timeUnit) {
try {
await(timeout, timeUnit);
} catch (InterruptedException | TimeoutException e) { throw new RuntimeException(e);
}
} | 3.26 |
flink_OneShotLatch_reset_rdh | /**
* Resets the latch so that {@link #isTriggered()} returns false.
*/
public void reset() {
synchronized(lock) {
triggered = false;
}
} | 3.26 |
flink_YarnResourceManagerDriver_initializeInternal_rdh | // ------------------------------------------------------------------------
// ResourceManagerDriver
// ------------------------------------------------------------------------
@Override protected void initializeInternal() throws Exception {
isRunning =
true;
final YarnContainerEventHandler yarnContainerEventHandler = new YarnContainerEventHandler();
try {
resourceManagerClient = f0.createResourceManagerClient(yarnHeartbeatIntervalMillis, yarnContainerEventHandler);
resourceManagerClient.init(yarnConfig);
resourceManagerClient.start();
final RegisterApplicationMasterResponse registerApplicationMasterResponse = registerApplicationMaster();
getContainersFromPreviousAttempts(registerApplicationMasterResponse);
taskExecutorProcessSpecContainerResourcePriorityAdapter = new TaskExecutorProcessSpecContainerResourcePriorityAdapter(registerApplicationMasterResponse.getMaximumResourceCapability(), ExternalResourceUtils.getExternalResourceConfigurationKeys(flinkConfig, YarnConfigOptions.EXTERNAL_RESOURCE_YARN_CONFIG_KEY_SUFFIX));
} catch (Exception e) {
throw new ResourceManagerException("Could not start resource manager client.",
e);
}
nodeManagerClient = yarnNodeManagerClientFactory.createNodeManagerClient(yarnContainerEventHandler);
nodeManagerClient.init(yarnConfig);
nodeManagerClient.start();
} | 3.26 |
flink_YarnResourceManagerDriver_getYarnStatus_rdh | // ------------------------------------------------------------------------
// Utility methods
// ------------------------------------------------------------------------
/**
* Converts a Flink application status enum to a YARN application status enum.
*
* @param status
* The Flink application status.
* @return The corresponding YARN application status.
*/private FinalApplicationStatus getYarnStatus(ApplicationStatus status) {
if (status == null) {
return FinalApplicationStatus.UNDEFINED;
} else {
switch (status) {
case SUCCEEDED :
return FinalApplicationStatus.SUCCEEDED;
case FAILED :
return FinalApplicationStatus.FAILED;
case CANCELED :
return FinalApplicationStatus.KILLED;
default :
return FinalApplicationStatus.UNDEFINED;
}
}} | 3.26 |
flink_YarnResourceManagerDriver_m0_rdh | // ------------------------------------------------------------------------
// Internal
// ------------------------------------------------------------------------
private void m0(Priority priority, List<Container> containers) {
final
Optional<TaskExecutorProcessSpecContainerResourcePriorityAdapter.TaskExecutorProcessSpecAndResource> taskExecutorProcessSpecAndResourceOpt = taskExecutorProcessSpecContainerResourcePriorityAdapter.getTaskExecutorProcessSpecAndResource(priority);
Preconditions.checkState(taskExecutorProcessSpecAndResourceOpt.isPresent(), "Receive %s containers with unrecognized priority %s. This should not happen.", containers.size(), priority.getPriority());
final
TaskExecutorProcessSpec taskExecutorProcessSpec = taskExecutorProcessSpecAndResourceOpt.get().getTaskExecutorProcessSpec();
final Resource resource = taskExecutorProcessSpecAndResourceOpt.get().getResource();
final Queue<CompletableFuture<YarnWorkerNode>> pendingRequestResourceFutures = requestResourceFutures.getOrDefault(taskExecutorProcessSpec, new LinkedList<>());
log.info("Received {} containers with priority {}, {} pending container requests.", containers.size(), priority, pendingRequestResourceFutures.size());
final Iterator<Container> containerIterator = containers.iterator();
final Iterator<AMRMClient.ContainerRequest> pendingContainerRequestIterator = getPendingRequestsAndCheckConsistency(priority, resource, pendingRequestResourceFutures.size()).iterator();
int numAccepted = 0;
while (containerIterator.hasNext() && pendingContainerRequestIterator.hasNext()) {
final Container container = containerIterator.next();
final AMRMClient.ContainerRequest pendingRequest = pendingContainerRequestIterator.next();
final ResourceID resourceId = getContainerResourceId(container);
final CompletableFuture<YarnWorkerNode> requestResourceFuture = pendingRequestResourceFutures.poll();
Preconditions.checkState(requestResourceFuture != null);
if (pendingRequestResourceFutures.isEmpty()) {
requestResourceFutures.remove(taskExecutorProcessSpec);
}
requestResourceFuture.complete(new YarnWorkerNode(container, resourceId));
startTaskExecutorInContainerAsync(container, taskExecutorProcessSpec, resourceId);
removeContainerRequest(pendingRequest);
numAccepted++;
}
int numExcess = 0;
while (containerIterator.hasNext()) {
returnExcessContainer(containerIterator.next());
numExcess++;
}
log.info("Accepted {} requested containers, returned {} excess containers, {} pending container requests of resource {}.", numAccepted, numExcess, pendingRequestResourceFutures.size(), resource);
} | 3.26 |
flink_RecoverableMultiPartUploadImpl_snapshotAndGetRecoverable_rdh | /**
* Creates a snapshot of this MultiPartUpload, from which the upload can be resumed.
*
* <p>Data buffered locally which is less than {@link org.apache.flink.fs.s3.common.FlinkS3FileSystem#S3_MULTIPART_MIN_PART_SIZE
* S3_MULTIPART_MIN_PART_SIZE}, and cannot be uploaded as part of the MPU and set to S3 as
* independent objects.
*
* <p>This implementation currently blocks until all part uploads are complete and returns a
* completed future.
*/ @Override
public S3Recoverable snapshotAndGetRecoverable(@Nullable
final
RefCountedFSOutputStream incompletePartFile) throws IOException {final String incompletePartObjectName = safelyUploadSmallPart(incompletePartFile);
// make sure all other uploads are complete
// this currently makes the method blocking,
// to be made non-blocking in the future
awaitPendingPartsUpload();
final String objectName = currentUploadInfo.getObjectName();
final String uploadId = currentUploadInfo.getUploadId();
final List<PartETag> completedParts = currentUploadInfo.getCopyOfEtagsOfCompleteParts();
final long sizeInBytes = currentUploadInfo.getExpectedSizeInBytes();
if (incompletePartObjectName == null) {
return new S3Recoverable(objectName, uploadId, completedParts, sizeInBytes);
} else {
return new S3Recoverable(objectName, uploadId, completedParts, sizeInBytes, incompletePartObjectName, incompletePartFile.getPos());
}
} | 3.26 |
flink_RecoverableMultiPartUploadImpl_newUpload_rdh | // ------------------------------------------------------------------------
// factory methods
// ------------------------------------------------------------------------
public static RecoverableMultiPartUploadImpl newUpload(final S3AccessHelper s3AccessHelper, final Executor uploadThreadPool, final String objectName) throws IOException {
final String multiPartUploadId = s3AccessHelper.startMultiPartUpload(objectName);
return new RecoverableMultiPartUploadImpl(s3AccessHelper, uploadThreadPool, multiPartUploadId, objectName, new ArrayList<>(), 0L, Optional.empty());
} | 3.26 |
flink_RecoverableMultiPartUploadImpl_uploadPart_rdh | /**
* Adds a part to the uploads without any size limitations.
*
* <p>This method is non-blocking and does not wait for the part upload to complete.
*
* @param file
* The file with the part data.
* @throws IOException
* If this method throws an exception, the RecoverableS3MultiPartUpload
* should not be used any more, but recovered instead.
*/
@Override
public void uploadPart(RefCountedFSOutputStream file) throws IOException {
// this is to guarantee that nobody is
// writing to the file we are uploading.
checkState(file.isClosed());
final CompletableFuture<PartETag> future = new CompletableFuture<>();
f0.add(future);
final long partLength = file.getPos();
currentUploadInfo.registerNewPart(partLength);
file.retain();// keep the file while the async upload still runs
uploadThreadPool.execute(new UploadTask(s3AccessHelper, currentUploadInfo, file, future));
} | 3.26 |
flink_FlinkVersion_rangeOf_rdh | /**
* Returns all versions within the defined range, inclusive both start and end.
*/
public static Set<FlinkVersion> rangeOf(FlinkVersion
start, FlinkVersion end) {
return
Stream.of(FlinkVersion.values()).filter(v -> (v.ordinal() >= start.ordinal()) && (v.ordinal() <= end.ordinal())).collect(Collectors.toCollection(LinkedHashSet::new));
} | 3.26 |
flink_FlinkVersion_current_rdh | /**
* Returns the version for the current branch.
*/public static FlinkVersion current() {
return values()[values().length - 1];
} | 3.26 |
flink_VersionedIOReadableWritable_getCompatibleVersions_rdh | /**
* Returns the compatible version values.
*
* <p>By default, the base implementation recognizes only the current version (identified by
* {@link #getVersion()}) as compatible. This method can be used as a hook and may be overridden
* to identify more compatible versions.
*
* @return an array of integers representing the compatible version values.
*/
public int[] getCompatibleVersions() {
return new int[]{ getVersion() };
} | 3.26 |
flink_BinaryArrayData_calculateFixLengthPartSize_rdh | /**
* It store real value when type is primitive. It store the length and offset of variable-length
* part when type is string, map, etc.
*/
public static int calculateFixLengthPartSize(LogicalType type) { // ordered by type root definition
switch (type.getTypeRoot()) {
case BOOLEAN :
case TINYINT :
return 1;
case CHAR
:
case VARCHAR :
case BINARY :
case VARBINARY :
case DECIMAL :
case BIGINT :
case DOUBLE :
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
case INTERVAL_DAY_TIME :
case ARRAY :
case MULTISET :
case MAP :
case ROW :
case STRUCTURED_TYPE :
case RAW :
// long and double are 8 bytes;
// otherwise it stores the length and offset of the variable-length part for types
// such as is string, map, etc.
return 8;
case TIMESTAMP_WITH_TIME_ZONE :
throw
new UnsupportedOperationException();
case SMALLINT :
return 2;
case INTEGER :
case FLOAT :
case DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
return 4;
case DISTINCT_TYPE :
return calculateFixLengthPartSize(((DistinctType) (type)).getSourceType());
case NULL :case SYMBOL :
case UNRESOLVED :
default :
throw new IllegalArgumentException();
}
} | 3.26 |
flink_BinaryArrayData_fromPrimitiveArray_rdh | // ------------------------------------------------------------------------------------------
// Construction Utilities
// ------------------------------------------------------------------------------------------
public static BinaryArrayData fromPrimitiveArray(boolean[] arr) {
return
fromPrimitiveArray(arr, BOOLEAN_ARRAY_OFFSET, arr.length, 1);
} | 3.26 |
flink_AbstractOrcColumnVector_createFlinkVectorFromConstant_rdh | /**
* Create flink vector by hive vector from constant.
*/
public static ColumnVector createFlinkVectorFromConstant(LogicalType type, Object value, int batchSize) {
return createFlinkVector(m0(type, value, batchSize), type);
} | 3.26 |
flink_AbstractOrcColumnVector_m0_rdh | /**
* Create a orc vector from partition spec value. See hive {@code VectorizedRowBatchCtx#addPartitionColsToBatch}.
*/
private static ColumnVector m0(LogicalType type, Object value, int batchSize) {
switch (type.getTypeRoot()) {
case CHAR :
case VARCHAR :
case BINARY :
case VARBINARY :
return createBytesVector(batchSize, value);
case BOOLEAN :
return createLongVector(batchSize, ((Boolean) (value)) ? 1 : 0);
case TINYINT :
case SMALLINT :
case INTEGER :
case BIGINT : return createLongVector(batchSize, value);
case DECIMAL :
DecimalType decimalType = ((DecimalType) (type)); return createDecimalVector(batchSize, decimalType.getPrecision(), decimalType.getScale(), value);
case FLOAT :
case DOUBLE :
return createDoubleVector(batchSize, value);
case DATE :
if (value instanceof LocalDate) {
value = Date.valueOf(((LocalDate)
(value)));
}
return createLongVector(batchSize, toInternal(((Date)
(value))));
case TIMESTAMP_WITHOUT_TIME_ZONE :
return TimestampUtil.createVectorFromConstant(batchSize, value);
default :
throw new UnsupportedOperationException("Unsupported type: " + type);
}
} | 3.26 |
flink_ContinuousEventTimeTrigger_of_rdh | /**
* Creates a trigger that continuously fires based on the given interval.
*
* @param interval
* The time interval at which to fire.
* @param <W>
* The type of {@link Window Windows} on which this trigger can operate.
*/
public static <W extends Window> ContinuousEventTimeTrigger<W> of(Time interval) {
return new ContinuousEventTimeTrigger<>(interval.toMilliseconds());
} | 3.26 |
flink_CompactingHashTable_getMinPartition_rdh | /**
*
* @return number of memory segments in the smallest partition
*/
private int getMinPartition() {
int v69 = Integer.MAX_VALUE;
for (InMemoryPartition<T>
p1 : this.partitions) {
if (p1.getBlockCount() < v69) {
v69 = p1.getBlockCount();
}
}
return v69;
} | 3.26 |
flink_CompactingHashTable_close_rdh | /**
* Closes the hash table. This effectively releases all internal structures and closes all open
* files and removes them. The call to this method is valid both as a cleanup after the complete
* inputs were properly processed, and as an cancellation call, which cleans up all resources
* that are currently held by the hash join. If another process still access the hash table
* after close has been called no operations will be performed.
*/
@Override
public void close() {
// make sure that we close only once
synchronized(this.stateLock) {
if (this.closed) {
return;
}
this.closed = true;
}
LOG.debug("Closing hash table and releasing resources.");
// release the table structure
releaseTable();
// clear the memory in the partitions
clearPartitions();
} | 3.26 |
flink_CompactingHashTable_getOverflowSegmentCount_rdh | /**
*
* @return number of memory segments used in overflow buckets
*/
private int getOverflowSegmentCount() {
int result = 0;
for (InMemoryPartition<T> p : this.partitions) {
result += p.numOverflowSegments;}
return result;
} | 3.26 |
flink_CompactingHashTable_fillCache_rdh | /**
* utility function that inserts all entries from a bucket and its overflow buckets into the
* cache
*
* @return true if last bucket was not reached yet
* @throws IOException
*/
private boolean fillCache() throws IOException {
if (currentBucketIndex >= table.numBuckets) {
return false;
}
MemorySegment bucket = table.buckets[currentSegmentIndex];
// get the basic characteristics of the bucket
final int partitionNumber = bucket.get(currentBucketOffset + HEADER_PARTITION_OFFSET);
final InMemoryPartition<T> partition = table.partitions.get(partitionNumber);
final MemorySegment[] overflowSegments = partition.overflowSegments;
int countInSegment = bucket.getInt(currentBucketOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
int posInSegment = currentBucketOffset + BUCKET_POINTER_START_OFFSET;
int bucketOffset = currentBucketOffset;
// loop over all segments that are involved in the bucket (original bucket plus overflow
// buckets)
while (true) {
while (numInSegment < countInSegment) {
long pointer = bucket.getLong(posInSegment);
posInSegment += f0;
numInSegment++;
T target = table.buildSideSerializer.createInstance();
try {
target = partition.readRecordAt(pointer, target);cache.add(target);
} catch
(IOException e) {
throw new RuntimeException("Error deserializing record from the Hash Table: " + e.getMessage(), e);
}
}
// this segment is done. check if there is another chained bucket
final long v151 = bucket.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
if (v151 == BUCKET_FORWARD_POINTER_NOT_SET) {
break;
}
final int overflowSegNum = ((int) (v151 >>>
32));
bucket = overflowSegments[overflowSegNum];
bucketOffset = ((int) (v151));
countInSegment = bucket.getInt(bucketOffset + HEADER_COUNT_OFFSET);
posInSegment = bucketOffset + BUCKET_POINTER_START_OFFSET;
numInSegment = 0;
}
currentBucketIndex++;
if ((currentBucketIndex % bucketsPerSegment) == 0) {
currentSegmentIndex++;
currentBucketOffset = 0;
} else {
currentBucketOffset += HASH_BUCKET_SIZE;
}
return true;
} | 3.26 |
flink_CompactingHashTable_getInitialTableSize_rdh | /**
* tries to find a good value for the number of buckets will ensure that the number of buckets
* is a multiple of numPartitions
*
* @return number of buckets
*/ private static int getInitialTableSize(int numBuffers, int bufferSize, int numPartitions, int recordLenBytes) {
final long totalSize = ((long) (bufferSize)) * numBuffers;
final long numRecordsStorable = totalSize / (recordLenBytes + RECORD_OVERHEAD_BYTES);
final long bucketBytes = numRecordsStorable * RECORD_OVERHEAD_BYTES;
long numBuckets = (bucketBytes / (2 * HASH_BUCKET_SIZE))
+ 1;
numBuckets += numPartitions - (numBuckets % numPartitions);
return numBuckets > Integer.MAX_VALUE ? Integer.MAX_VALUE : ((int) (numBuckets));
} | 3.26 |
flink_CompactingHashTable_m0_rdh | // --------------------------------------------------------------------------------------------
// Access to the entries
// --------------------------------------------------------------------------------------------
@Override
public <PT> HashTableProber<PT> m0(TypeComparator<PT> probeSideComparator, TypePairComparator<PT, T> pairComparator) {
return new HashTableProber<PT>(probeSideComparator, pairComparator);
} | 3.26 |
flink_CompactingHashTable_getEntryIterator_rdh | /**
*
* @return Iterator over hash table
* @see EntryIterator
*/
public MutableObjectIterator<T> getEntryIterator() {
return new EntryIterator(this);
} | 3.26 |
flink_CompactingHashTable_open_rdh | // ------------------------------------------------------------------------
// life cycle
// ------------------------------------------------------------------------
/**
* Initialize the hash table
*/
@Override
public void open() {
synchronized(stateLock) {
if (!closed) {
throw new IllegalStateException("currently not closed.");
}
closed = false;
}
// create the partitions
final int partitionFanOut = getPartitioningFanOutNoEstimates(this.availableMemory.size());
createPartitions(partitionFanOut);
// set up the table structure. the write behind buffers are taken away, as are one buffer
// per partition
final int numBuckets =
getInitialTableSize(this.availableMemory.size(), this.segmentSize, partitionFanOut, this.avgRecordLen);
initTable(numBuckets, ((byte) (partitionFanOut)));
} | 3.26 |
flink_CompactingHashTable_getMemoryConsumptionString_rdh | /**
*
* @return String containing a summary of the memory consumption for error messages
*/
private String getMemoryConsumptionString() {
return (((((((((((("numPartitions: " + this.partitions.size()) + " minPartition: ") + getMinPartition()) + " maxPartition: ") + getMaxPartition()) + " number of overflow segments: ") + getOverflowSegmentCount()) + " bucketSize: ") + this.buckets.length) + " Overall memory: ") + getSize()) + " Partition memory: ") + getPartitionSize();
} | 3.26 |
flink_CompactingHashTable_createPartitions_rdh | // --------------------------------------------------------------------------------------------
// Setup and Tear Down of Structures
// --------------------------------------------------------------------------------------------
private void createPartitions(int numPartitions) {
this.partitions.clear();
ListMemorySegmentSource memSource = new ListMemorySegmentSource(this.availableMemory);
for (int i = 0; i < numPartitions; i++) {
this.partitions.add(new InMemoryPartition<T>(this.buildSideSerializer, i, memSource, this.segmentSize, pageSizeInBits));
}
this.compactionMemory = new InMemoryPartition<T>(this.buildSideSerializer, -1, memSource, this.segmentSize,
pageSizeInBits);
} | 3.26 |
flink_CompactingHashTable_resizeHashTable_rdh | /**
* Attempts to double the number of buckets
*
* @return true on success
* @throws IOException
*/
@VisibleForTesting
boolean resizeHashTable() throws IOException {
final int newNumBuckets = 2 * this.numBuckets;
final int bucketsPerSegment =
this.bucketsPerSegmentMask + 1;
final int newNumSegments = (newNumBuckets + (bucketsPerSegment - 1)) / bucketsPerSegment;
final int additionalSegments = newNumSegments - this.buckets.length;
final int numPartitions = this.partitions.size();
if (this.availableMemory.size() < additionalSegments) {
for (int i =
0; i <
numPartitions; i++) {
compactPartition(i);
if (this.availableMemory.size() >= additionalSegments) {
break;
}
}
}
if ((this.availableMemory.size() < additionalSegments) || this.closed) {
return false;
} else {
this.isResizing = true;// allocate new buckets
final int startOffset = (this.numBuckets * HASH_BUCKET_SIZE) % this.segmentSize;
final int
oldNumBuckets = this.numBuckets;
final int oldNumSegments = this.buckets.length;
MemorySegment[] mergedBuckets = new
MemorySegment[newNumSegments];
System.arraycopy(this.buckets, 0, mergedBuckets, 0, this.buckets.length);
this.buckets = mergedBuckets;
this.numBuckets = newNumBuckets;
// initialize all new buckets
boolean oldSegment = startOffset != 0;
final int startSegment = (oldSegment) ? oldNumSegments - 1 : oldNumSegments;
for (int i = startSegment, bucket = oldNumBuckets; (i < newNumSegments) && (bucket < this.numBuckets); i++) {
MemorySegment seg;
int bucketOffset;
if (oldSegment) {
// the first couple of new buckets may be located on an old
// segment
seg = this.buckets[i];
for (int k = oldNumBuckets % bucketsPerSegment; (k < bucketsPerSegment) && (bucket < this.numBuckets); k++ , bucket++) {
bucketOffset = k * HASH_BUCKET_SIZE;
// initialize the header fields
seg.put(bucketOffset + HEADER_PARTITION_OFFSET, assignPartition(bucket, ((byte) (numPartitions))));
seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
}} else {
seg = getNextBuffer();
// go over all buckets in the segment
for (int k = 0; (k < bucketsPerSegment) && (bucket < this.numBuckets); k++ , bucket++) {
bucketOffset = k * HASH_BUCKET_SIZE;
// initialize the header fields
seg.put(bucketOffset + HEADER_PARTITION_OFFSET, assignPartition(bucket, ((byte) (numPartitions))));
seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
}
}
this.buckets[i] = seg;
oldSegment = false;// we write on at most one old segment
}
int hashOffset;
int hash;
int pointerOffset;
long pointer;
IntArrayList hashList = new IntArrayList(NUM_ENTRIES_PER_BUCKET);LongArrayList pointerList = new LongArrayList(NUM_ENTRIES_PER_BUCKET);
IntArrayList overflowHashes = new IntArrayList(64);
LongArrayList overflowPointers = new LongArrayList(64);
// go over all buckets and split them between old and new buckets
for (int i = 0; i < numPartitions; i++) {
InMemoryPartition<T> partition = this.partitions.get(i);
final MemorySegment[] overflowSegments = partition.overflowSegments;
int posHashCode;
for (int j = 0, bucket = i; (j < this.buckets.length) && (bucket < oldNumBuckets);
j++) {MemorySegment segment = this.buckets[j];
// go over all buckets in the segment belonging to the partition
for (int k = bucket % bucketsPerSegment; (k < bucketsPerSegment) && (bucket < oldNumBuckets); k += numPartitions , bucket += numPartitions) {int bucketOffset = k * HASH_BUCKET_SIZE;
if (((int) (segment.get(bucketOffset + HEADER_PARTITION_OFFSET))) != i) {
throw new IOException((("Accessed wrong bucket! wanted: " + i)
+ " got: ") + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
}
// loop over all segments that are involved in the bucket (original bucket
// plus overflow buckets)
int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
hashOffset = bucketOffset + BUCKET_HEADER_LENGTH;
while (true) {
while
(numInSegment < countInSegment) {
hash = segment.getInt(hashOffset);
if (((hash % this.numBuckets) != bucket) && ((hash %
this.numBuckets) != (bucket + oldNumBuckets))) {
throw new IOException((((("wanted: " + bucket) + " or ") + (bucket + oldNumBuckets)) + " got: ") + (hash % this.numBuckets));
}
pointer = segment.getLong(pointerOffset);
hashList.add(hash);
pointerList.add(pointer);
pointerOffset += f0;
hashOffset += HASH_CODE_LEN;numInSegment++;
} // this segment is done. check if there is another chained bucket
final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
break;
}
final int v115 = ((int) (forwardPointer >>> 32));
segment = overflowSegments[v115];
bucketOffset = ((int) (forwardPointer));
countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
hashOffset = bucketOffset + BUCKET_HEADER_LENGTH;
numInSegment = 0;
}
segment = this.buckets[j];
bucketOffset = k * HASH_BUCKET_SIZE;
// reset bucket for re-insertion
segment.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
segment.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
// refill table
if (hashList.size() != pointerList.size()) {throw new IOException((("Pointer and hash counts do not match. hashes: " + hashList.size()) + " pointer: ") + pointerList.size());
}
int newSegmentIndex = (bucket + oldNumBuckets) / bucketsPerSegment;
MemorySegment newSegment = this.buckets[newSegmentIndex];
// we need to avoid overflows in the first run
int oldBucketCount = 0;
int newBucketCount = 0;
while (!hashList.isEmpty()) {
hash = hashList.removeLast();
pointer = pointerList.removeLong(pointerList.size() - 1);
posHashCode = hash % this.numBuckets;
if ((posHashCode == bucket) && (oldBucketCount < NUM_ENTRIES_PER_BUCKET)) {
bucketOffset
= (bucket % bucketsPerSegment) * HASH_BUCKET_SIZE;
insertBucketEntryFromStart(segment, bucketOffset, hash, pointer, partition.getPartitionNumber());
oldBucketCount++;
} else if ((posHashCode == (bucket + oldNumBuckets)) && (newBucketCount < NUM_ENTRIES_PER_BUCKET)) {
bucketOffset = ((bucket + oldNumBuckets) % bucketsPerSegment) * HASH_BUCKET_SIZE;
insertBucketEntryFromStart(newSegment, bucketOffset, hash, pointer, partition.getPartitionNumber());
newBucketCount++;
} else if ((posHashCode == (bucket + oldNumBuckets)) || (posHashCode == bucket)) {
overflowHashes.add(hash);overflowPointers.add(pointer);
} else {
throw new IOException((((("Accessed wrong bucket. Target: " + bucket) + " or ") + (bucket + oldNumBuckets)) + " Hit: ") + posHashCode);
}
}
hashList.clear();
pointerList.clear();
}
}
// reset partition's overflow buckets and reclaim their memory
this.availableMemory.addAll(partition.resetOverflowBuckets());
// clear overflow lists
int bucketArrayPos; int bucketInSegmentPos;
MemorySegment bucket;
while (!overflowHashes.isEmpty()) {
hash = overflowHashes.removeLast();
pointer = overflowPointers.removeLong(overflowPointers.size() - 1);
posHashCode = hash % this.numBuckets;
bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
bucket = this.buckets[bucketArrayPos];
insertBucketEntryFromStart(bucket, bucketInSegmentPos, hash,
pointer, partition.getPartitionNumber());
}
overflowHashes.clear();
overflowPointers.clear();
}
this.isResizing = false;
return true;
}} | 3.26 |
flink_CompactingHashTable_getSize_rdh | /**
* Size of all memory segments owned by this hash table
*
* @return size in bytes
*/
private long getSize() {
long numSegments = 0;
numSegments += this.availableMemory.size();
numSegments += this.buckets.length;
for (InMemoryPartition<T> p : this.partitions) {
numSegments +=
p.getBlockCount();
numSegments += p.numOverflowSegments;
}
numSegments += this.compactionMemory.getBlockCount();
return numSegments * this.segmentSize;
} | 3.26 |
flink_CompactingHashTable_insertBucketEntryFromStart_rdh | /**
* IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh
* partition reference. The partition reference used during search for the key may have become
* invalid during the compaction.
*/
private void insertBucketEntryFromStart(MemorySegment bucket, int bucketInSegmentPos, int hashCode, long pointer, int partitionNumber) throws
IOException {
boolean checkForResize = false;
// find the position to put the hash code and pointer
final int count = bucket.getInt(bucketInSegmentPos + HEADER_COUNT_OFFSET);
if (count < NUM_ENTRIES_PER_BUCKET) {
// we are good in our current bucket, put the values
bucket.putInt((bucketInSegmentPos + BUCKET_HEADER_LENGTH) + (count * HASH_CODE_LEN),
hashCode);// hash code
bucket.putLong((bucketInSegmentPos + BUCKET_POINTER_START_OFFSET) + (count * f0), pointer);// pointer
bucket.putInt(bucketInSegmentPos + HEADER_COUNT_OFFSET, count + 1);// update count
} else {
// we need to go to the overflow buckets
final InMemoryPartition<T> p = this.partitions.get(partitionNumber);
final long originalForwardPointer
= bucket.getLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET);
final long forwardForNewBucket;
if (originalForwardPointer != BUCKET_FORWARD_POINTER_NOT_SET) {
// forward pointer set
final int overflowSegNum = ((int) (originalForwardPointer >>> 32));
final int segOffset = ((int) (originalForwardPointer));
final MemorySegment seg = p.overflowSegments[overflowSegNum];
final int obCount = seg.getInt(segOffset + HEADER_COUNT_OFFSET);
// check if there is space in this overflow bucket
if (obCount < NUM_ENTRIES_PER_BUCKET) {
// space in this bucket and we are done
seg.putInt((segOffset + BUCKET_HEADER_LENGTH) + (obCount * HASH_CODE_LEN), hashCode);// hash code
seg.putLong((segOffset + BUCKET_POINTER_START_OFFSET) + (obCount * f0), pointer);// pointer
seg.putInt(segOffset + HEADER_COUNT_OFFSET, obCount + 1);// update count
return;
} else {
// no space here, we need a new bucket. this current overflow bucket will be the
// target of the new overflow bucket
forwardForNewBucket = originalForwardPointer;
}
} else {
// no overflow bucket yet, so we need a first one
forwardForNewBucket = BUCKET_FORWARD_POINTER_NOT_SET;
}
// we need a new overflow bucket
MemorySegment overflowSeg;
final int overflowBucketNum;
final int overflowBucketOffset;
// first, see if there is space for an overflow bucket remaining in the last overflow
// segment
if (p.nextOverflowBucket == 0) {
// no space left in last bucket, or no bucket yet, so create an overflow segment
overflowSeg = getNextBuffer();
overflowBucketOffset = 0;
overflowBucketNum = p.numOverflowSegments;
// add the new overflow segment
if (p.overflowSegments.length <= p.numOverflowSegments) {
MemorySegment[] newSegsArray = new MemorySegment[p.overflowSegments.length * 2];
System.arraycopy(p.overflowSegments, 0, newSegsArray, 0, p.overflowSegments.length);
p.overflowSegments = newSegsArray;
}
p.overflowSegments[p.numOverflowSegments] = overflowSeg;
p.numOverflowSegments++;
checkForResize = true;
} else {
// there is space in the last overflow bucket
overflowBucketNum = p.numOverflowSegments - 1;
overflowSeg = p.overflowSegments[overflowBucketNum];
overflowBucketOffset = p.nextOverflowBucket << NUM_INTRA_BUCKET_BITS;
}
// next overflow bucket is one ahead. if the segment is full, the next will be at the
// beginning
// of a new segment
p.nextOverflowBucket = (p.nextOverflowBucket == this.bucketsPerSegmentMask) ? 0 : p.nextOverflowBucket + 1;// insert the new overflow bucket in the chain of buckets
// 1) set the old forward pointer
// 2) let the bucket in the main table point to this one
overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, forwardForNewBucket);
final long v48
= (((long) (overflowBucketNum)) << 32) | ((long) (overflowBucketOffset));
bucket.putLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET, v48);
// finally, insert the values into the overflow buckets
overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode);// hash code
overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer);// pointer
// set the count to one
overflowSeg.putInt(overflowBucketOffset
+ HEADER_COUNT_OFFSET, 1);
if (checkForResize && (!this.isResizing)) {
// check if we should resize buckets
if (this.buckets.length <= getOverflowSegmentCount()) {
resizeHashTable();
}
}
}} | 3.26 |
flink_CompactingHashTable_insertOrReplaceRecord_rdh | /**
* Replaces record in hash table if record already present or append record if not. May trigger
* expensive compaction.
*
* @param record
* record to insert or replace
* @throws IOException
*/
public void insertOrReplaceRecord(T record) throws IOException {
if (this.closed) {
return;
}
final int searchHashCode = MathUtils.jenkinsHash(this.buildSideComparator.hash(record));
final int posHashCode = searchHashCode % this.numBuckets;
// get the bucket for the given hash code
final MemorySegment originalBucket = this.buckets[posHashCode >> this.bucketsPerSegmentBits];
final int originalBucketOffset = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
MemorySegment bucket = originalBucket;
int bucketInSegmentOffset = originalBucketOffset;
// get the basic characteristics of the bucket
final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
final InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
final MemorySegment[] overflowSegments = partition.overflowSegments;
this.buildSideComparator.setReference(record);
int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
// loop over all segments that are involved in the bucket (original bucket plus overflow
// buckets)
while
(true) {
while (numInSegment < countInSegment) {
final int thisCode = bucket.getInt(posInSegment);
posInSegment += HASH_CODE_LEN;
// check if the hash code matches
if (thisCode == searchHashCode) {
// get the pointer to the pair
final int pointerOffset = (bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET) + (numInSegment * f0);
final long pointer = bucket.getLong(pointerOffset);
// deserialize the key to check whether it is really equal, or whether we had
// only a hash collision
T valueAtPosition = partition.readRecordAt(pointer);
if (this.buildSideComparator.equalToReference(valueAtPosition)) {
long newPointer = insertRecordIntoPartition(record, partition, true);
bucket.putLong(pointerOffset, newPointer);
return;
}
}
numInSegment++;
}
// this segment is done. check if there is another chained bucket
long newForwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
if (newForwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
// nothing found. append and insert
long pointer = insertRecordIntoPartition(record, partition, false);
if (countInSegment < NUM_ENTRIES_PER_BUCKET) {
// we are good in our current bucket, put the values
bucket.putInt((bucketInSegmentOffset + BUCKET_HEADER_LENGTH) + (countInSegment * HASH_CODE_LEN), searchHashCode);// hash code
bucket.putLong((bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET) + (countInSegment * f0), pointer);// pointer
bucket.putInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET, countInSegment + 1);// update count
} else {
insertBucketEntryFromStart(originalBucket, originalBucketOffset, searchHashCode, pointer, partitionNumber);
}
return;
}
final int overflowSegNum = ((int) (newForwardPointer >>> 32));
bucket = overflowSegments[overflowSegNum];
bucketInSegmentOffset = ((int) (newForwardPointer));
countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
numInSegment = 0;
}
} | 3.26 |
flink_CompactingHashTable_getPartitionSize_rdh | /**
* Size of all memory segments owned by the partitions of this hash table excluding the
* compaction partition
*
* @return size in bytes
*/
private long getPartitionSize() {
long
numSegments = 0;
for (InMemoryPartition<T> p : this.partitions) {
numSegments += p.getBlockCount();
}
return numSegments * this.segmentSize;
} | 3.26 |
flink_CompactingHashTable_buildTableWithUniqueKey_rdh | // ------------------------------------------------------------------------
// adding data to the hash table
// ------------------------------------------------------------------------
public void buildTableWithUniqueKey(final MutableObjectIterator<T> input) throws IOException {
// go over the complete input and insert every element into the hash table
T value;
while (this.running && ((value = input.next()) != null)) {
insertOrReplaceRecord(value);
}
} | 3.26 |
flink_CompactingHashTable_compactPartition_rdh | /**
* Compacts (garbage collects) partition with copy-compact strategy using compaction partition
*
* @param partitionNumber
* partition to compact
* @throws IOException
*/
private void compactPartition(final int partitionNumber) throws IOException {
// do nothing if table was closed, parameter is invalid or no garbage exists
if ((this.closed || (partitionNumber >= this.partitions.size())) || this.partitions.get(partitionNumber).isCompacted()) {
return;
}
// release all segments owned by compaction partition
this.compactionMemory.clearAllMemory(availableMemory);
this.compactionMemory.allocateSegments(1);
this.compactionMemory.pushDownPages();
T tempHolder = this.buildSideSerializer.createInstance();
final int numPartitions = this.partitions.size();
InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
MemorySegment[] overflowSegments = partition.overflowSegments;
long pointer;
int pointerOffset;
int bucketOffset;
final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
for (int i = 0, bucket = partitionNumber; (i < this.buckets.length) && (bucket < this.numBuckets); i++) {
MemorySegment segment = this.buckets[i];
// go over all buckets in the segment belonging to the partition
for (int k = bucket % bucketsPerSegment; (k < bucketsPerSegment) && (bucket <
this.numBuckets); k += numPartitions , bucket += numPartitions) {bucketOffset = k * HASH_BUCKET_SIZE;
if (((int) (segment.get(bucketOffset + HEADER_PARTITION_OFFSET))) != partitionNumber) {
throw new IOException((("Accessed wrong bucket! wanted: " + partitionNumber) + " got: ") + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
}
// loop over all segments that are involved in the bucket (original bucket plus
// overflow buckets)
int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
while (true) {
while (numInSegment < countInSegment) {
pointer = segment.getLong(pointerOffset);
tempHolder = partition.readRecordAt(pointer, tempHolder);
pointer = this.compactionMemory.appendRecord(tempHolder);
segment.putLong(pointerOffset, pointer);
pointerOffset += f0;
numInSegment++;
}
// this segment is done. check if there is another chained bucket
final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
break;
}
final int
overflowSegNum = ((int)
(forwardPointer >>> 32));
segment = overflowSegments[overflowSegNum];
bucketOffset = ((int) (forwardPointer));countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
numInSegment = 0;
}
segment = this.buckets[i];
}
}
// swap partition with compaction partition
this.compactionMemory.setPartitionNumber(partitionNumber);
this.partitions.add(partitionNumber, compactionMemory);
this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
this.partitions.get(partitionNumber).setIsCompacted(true);
// this.partitions.get(partitionNumber).pushDownPages();
this.compactionMemory = partition;
this.compactionMemory.resetRecordCounter();
this.compactionMemory.setPartitionNumber(-1);
this.compactionMemory.overflowSegments = null;
this.compactionMemory.numOverflowSegments = 0;
this.compactionMemory.nextOverflowBucket = 0;
// try to allocate maximum segment count
this.compactionMemory.clearAllMemory(this.availableMemory);
int maxSegmentNumber = this.getMaxPartition();
this.compactionMemory.allocateSegments(maxSegmentNumber);
this.compactionMemory.resetRWViews();
this.compactionMemory.pushDownPages();
} | 3.26 |
flink_CompactingHashTable_assignPartition_rdh | /**
* Assigns a partition to a bucket.
*
* @param bucket
* bucket index
* @param numPartitions
* number of partitions
* @return The hash code for the integer.
*/
private static byte assignPartition(int bucket, byte numPartitions) {
return ((byte) (bucket % numPartitions));
} | 3.26 |
flink_CompactingHashTable_getMaxPartition_rdh | /**
*
* @return number of memory segments in the largest partition
*/
private int getMaxPartition() {
int maxPartition = 0;
for (InMemoryPartition<T> p1 : this.partitions) {
if (p1.getBlockCount() > maxPartition) {
maxPartition = p1.getBlockCount();
}
}
return maxPartition;
} | 3.26 |
flink_TableMetaStoreFactory_finishWritingTable_rdh | /**
* After data has been inserted into table, some follow-up works related to metastore may
* need be done like report statistic to metastore.
*/
default void finishWritingTable(Path tablePath) throws
Exception {
} | 3.26 |
flink_MetricRegistryConfiguration_getScopeFormats_rdh | // ------------------------------------------------------------------------
// Getter
// ------------------------------------------------------------------------
public ScopeFormats getScopeFormats() {
return scopeFormats;
} | 3.26 |
flink_MetricRegistryConfiguration_fromConfiguration_rdh | // ------------------------------------------------------------------------
// Static factory methods
// ------------------------------------------------------------------------
/**
* Create a metric registry configuration object from the given {@link Configuration}.
*
* @param configuration
* to generate the metric registry configuration from
* @param maximumFrameSize
* the maximum message size that the RPC system supports
* @return Metric registry configuration generated from the configuration
*/
public static MetricRegistryConfiguration fromConfiguration(Configuration configuration, long maximumFrameSize) {
ScopeFormats v0;
try {
v0 = ScopeFormats.fromConfig(configuration);
} catch (Exception e) {
LOG.warn("Failed to parse scope format, using default scope formats", e);
v0 = ScopeFormats.fromConfig(new Configuration());
}
char delim;
try {delim = configuration.getString(MetricOptions.SCOPE_DELIMITER).charAt(0);
} catch (Exception e) {
LOG.warn("Failed to parse delimiter, using default delimiter.", e);
delim = '.';
}// padding to account for serialization overhead
final long messageSizeLimitPadding = 256;
return new MetricRegistryConfiguration(v0, delim, maximumFrameSize - messageSizeLimitPadding);
} | 3.26 |
flink_VertexInputInfoComputationUtils_m0_rdh | /**
* Compute the {@link JobVertexInputInfo} for a {@link DistributionPattern#POINTWISE} edge. This
* computation algorithm will evenly distribute subpartitions to downstream subtasks according
* to the number of subpartitions. Different downstream subtasks consume roughly the same number
* of subpartitions.
*
* @param sourceCount
* the parallelism of upstream
* @param targetCount
* the parallelism of downstream
* @param numOfSubpartitionsRetriever
* a retriever to get the number of subpartitions
* @param isDynamicGraph
* whether is dynamic graph
* @return the computed {@link JobVertexInputInfo}
*/
static JobVertexInputInfo m0(int sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph) {
final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>();
if (sourceCount >= targetCount) {
for (int index = 0; index < targetCount;
index++) {
int start = (index * sourceCount) / targetCount;
int end = ((index + 1) * sourceCount) / targetCount;
IndexRange partitionRange = new IndexRange(start, end - 1);
IndexRange subpartitionRange = computeConsumedSubpartitionRange(index,
1, () -> numOfSubpartitionsRetriever.apply(start), isDynamicGraph, false);
executionVertexInputInfos.add(new ExecutionVertexInputInfo(index, partitionRange,
subpartitionRange));
}
} else {
for (int partitionNum = 0; partitionNum < sourceCount; partitionNum++) {
int start = (((partitionNum * targetCount) + sourceCount) - 1) / sourceCount;
int end = ((((partitionNum + 1) * targetCount)
+ sourceCount) - 1) / sourceCount;
int numConsumers = end - start;
IndexRange partitionRange = new IndexRange(partitionNum,
partitionNum);
// Variable used in lambda expression should be final or effectively final
final int finalPartitionNum = partitionNum;
for (int i
= start; i < end; i++) {
IndexRange subpartitionRange = computeConsumedSubpartitionRange(i, numConsumers, () -> numOfSubpartitionsRetriever.apply(finalPartitionNum), isDynamicGraph, false);
executionVertexInputInfos.add(new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange));
}
}
}
return new JobVertexInputInfo(executionVertexInputInfos);
} | 3.26 |
flink_VertexInputInfoComputationUtils_computeVertexInputInfoForAllToAll_rdh | /**
* Compute the {@link JobVertexInputInfo} for a {@link DistributionPattern#ALL_TO_ALL} edge.
* This computation algorithm will evenly distribute subpartitions to downstream subtasks
* according to the number of subpartitions. Different downstream subtasks consume roughly the
* same number of subpartitions.
*
* @param sourceCount
* the parallelism of upstream
* @param targetCount
* the parallelism of downstream
* @param numOfSubpartitionsRetriever
* a retriever to get the number of subpartitions
* @param isDynamicGraph
* whether is dynamic graph
* @param isBroadcast
* whether the edge is broadcast
* @return the computed {@link JobVertexInputInfo}
*/
static JobVertexInputInfo computeVertexInputInfoForAllToAll(int
sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph, boolean isBroadcast) {
final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>();
IndexRange partitionRange = new IndexRange(0, sourceCount - 1);
for
(int i = 0; i < targetCount; ++i) {
IndexRange subpartitionRange = computeConsumedSubpartitionRange(i, targetCount, () -> numOfSubpartitionsRetriever.apply(0), isDynamicGraph, isBroadcast);
executionVertexInputInfos.add(new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange));
}
return new JobVertexInputInfo(executionVertexInputInfos);
} | 3.26 |
flink_VertexInputInfoComputationUtils_computeConsumedSubpartitionRange_rdh | /**
* Compute the consumed subpartition range for a subtask. This computation algorithm will evenly
* distribute subpartitions to downstream subtasks according to the number of subpartitions.
* Different downstream subtasks consume roughly the same number of subpartitions.
*
* @param consumerSubtaskIndex
* the subtask index
* @param numConsumers
* the total number of consumers
* @param numOfSubpartitionsSupplier
* a supplier to get the number of subpartitions
* @param isDynamicGraph
* whether is dynamic graph
* @param isBroadcast
* whether the edge is broadcast
* @return the computed subpartition range
*/
@VisibleForTesting
static IndexRange computeConsumedSubpartitionRange(int consumerSubtaskIndex, int numConsumers, Supplier<Integer> numOfSubpartitionsSupplier, boolean isDynamicGraph, boolean isBroadcast) {
int consumerIndex = consumerSubtaskIndex % numConsumers;
if (!isDynamicGraph) {
return new IndexRange(consumerIndex, consumerIndex);
} else {
int numSubpartitions = numOfSubpartitionsSupplier.get();
if (isBroadcast) {
// broadcast results have only one subpartition, and be consumed multiple times.
checkArgument(numSubpartitions == 1);
return new IndexRange(0,
0);
} else {checkArgument(consumerIndex < numConsumers);
checkArgument(numConsumers <= numSubpartitions);
int
start = (consumerIndex * numSubpartitions) / numConsumers;
int nextStart = ((consumerIndex + 1) * numSubpartitions) / numConsumers;
return new IndexRange(start, nextStart - 1);}
}
} | 3.26 |
flink_StringUtf8Utils_encodeUTF8_rdh | /**
* This method must have the same result with JDK's String.getBytes.
*/
public static byte[] encodeUTF8(String str) {
byte[] bytes =
allocateReuseBytes(str.length() * MAX_BYTES_PER_CHAR);
int len = encodeUTF8(str, bytes);
return Arrays.copyOf(bytes, len);
} | 3.26 |
flink_HiveFunctionDefinitionFactory_createFunctionDefinitionFromHiveFunction_rdh | /**
* Create a FunctionDefinition from a Hive function's class name. Called directly by {@link org.apache.flink.table.module.hive.HiveModule}.
*/
public FunctionDefinition createFunctionDefinitionFromHiveFunction(String name, String functionClassName, Context context) {Class<?> functionClz;
try {
functionClz = context.getClassLoader().loadClass(functionClassName);
LOG.info("Successfully loaded Hive udf '{}' with class '{}'", name, functionClassName);
} catch (ClassNotFoundException
e) {
throw new TableException(String.format("Failed to initiate an instance of class %s.", functionClassName), e);}
if
(UDF.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveSimpleUDF", name);
return new HiveSimpleUDF(new HiveFunctionWrapper<>(functionClz), hiveShim);
} else if (GenericUDF.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDF", name);
return new HiveGenericUDF(new HiveFunctionWrapper<>(functionClz), hiveShim);
} else if (GenericUDTF.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDTF", name);
return new HiveGenericUDTF(new HiveFunctionWrapper<>(functionClz), hiveShim);
} else if ((GenericUDAFResolver2.class.isAssignableFrom(functionClz) || GenericUDAFResolver.class.isAssignableFrom(functionClz)) || UDAF.class.isAssignableFrom(functionClz)) {
if (GenericUDAFResolver2.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDAF without UDAF bridging", name);
return new HiveGenericUDAF(new HiveFunctionWrapper<>(functionClz), false, true, hiveShim);
} else if (GenericUDAFResolver.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDAF without UDAF bridging", name);
return new HiveGenericUDAF(new HiveFunctionWrapper<>(functionClz), false, false, hiveShim);
} else {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDAF with UDAF bridging", name);
return new HiveGenericUDAF(new HiveFunctionWrapper<>(functionClz), true, false, hiveShim);
}
} else {
throw new
IllegalArgumentException(String.format("HiveFunctionDefinitionFactory cannot initiate FunctionDefinition for class %s", functionClassName));
}} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.