name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_CliResultView_increaseRefreshInterval_rdh | // --------------------------------------------------------------------------------------------
protected void increaseRefreshInterval()
{
refreshInterval = Math.min(REFRESH_INTERVALS.size() - 1, refreshInterval + 1);
// reset view
resetAllParts();
synchronized(refreshThread) {
refreshThread.notify();}
} | 3.26 |
flink_Tuple10_copy_rdh | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> copy() {
return new Tuple10<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9);
}
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)} | 3.26 |
flink_Tuple10_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9) {
this.f0 = f0;
this.f1 =
f1;
this.f2
= f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
} | 3.26 |
flink_Tuple10_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple10)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple10 tuple = ((Tuple10) (o));
if (f0 != null ? !f0.equals(tuple.f0) :
tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6
!= null ? !f6.equals(tuple.f6) : tuple.f6 != null)
{
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple10_toString_rdh | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9), where the individual fields are the value returned by calling {@link Object#toString} on
* that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ")";
} | 3.26 |
flink_ParquetSchemaConverter_is32BitDecimal_rdh | // From DecimalDataUtils
public static boolean is32BitDecimal(int precision) {
return precision <= 9;
} | 3.26 |
flink_ClientUtils_reportHeartbeatPeriodically_rdh | /**
* The client reports the heartbeat to the dispatcher for aliveness.
*
* @param jobClient
* The job client.
* @param interval
* The heartbeat interval.
* @param timeout
* The heartbeat timeout.
* @return The ScheduledExecutorService which reports heartbeat periodically.
*/
public static ScheduledExecutorService reportHeartbeatPeriodically(JobClient jobClient, long interval, long timeout) {
checkArgument(interval < timeout, (((("The client's heartbeat interval " + "should be less than the heartbeat timeout. Please adjust the param '") + ClientOptions.CLIENT_HEARTBEAT_INTERVAL) + "' or '") + ClientOptions.CLIENT_HEARTBEAT_TIMEOUT) + "'");
JobID jobID = jobClient.getJobID();
LOG.info("Begin to report client's heartbeat for the job {}.", jobID);
ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
scheduledExecutor.scheduleAtFixedRate(() -> {
LOG.debug("Report client's heartbeat for the job {}.", jobID);
jobClient.reportHeartbeat(System.currentTimeMillis() + timeout);
}, interval, interval, TimeUnit.MILLISECONDS);
return scheduledExecutor;
} | 3.26 |
flink_ClientUtils_waitUntilJobInitializationFinished_rdh | /**
* This method blocks until the job status is not INITIALIZING anymore.
*
* @param jobStatusSupplier
* supplier returning the job status.
* @param jobResultSupplier
* supplier returning the job result. This will only be called if the
* job reaches the FAILED state.
* @throws JobInitializationException
* If the initialization failed
*/
public static void waitUntilJobInitializationFinished(SupplierWithException<JobStatus, Exception> jobStatusSupplier, SupplierWithException<JobResult,
Exception> jobResultSupplier, ClassLoader userCodeClassloader) throws JobInitializationException {
LOG.debug("Wait until job initialization is finished");
WaitStrategy waitStrategy = new ExponentialWaitStrategy(50, 2000);
try {
JobStatus status = jobStatusSupplier.get();
long attempt = 0;
while (status == JobStatus.INITIALIZING) {
Thread.sleep(waitStrategy.sleepTime(attempt++));
status = jobStatusSupplier.get();
}
if (status == JobStatus.FAILED) {
JobResult result = jobResultSupplier.get();
Optional<SerializedThrowable> throwable = result.getSerializedThrowable();if (throwable.isPresent()) {
Throwable t = throwable.get().deserializeError(userCodeClassloader);
if (t instanceof JobInitializationException) {
throw t;
}
}
}
} catch (JobInitializationException initializationException) {
throw initializationException;
} catch (Throwable throwable) {
ExceptionUtils.checkInterrupted(throwable);
throw new RuntimeException("Error while waiting for job to be initialized", throwable);
}
} | 3.26 |
flink_FieldList_addField_rdh | // --------------------------------------------------------------------------------------------
@Override
public FieldList addField(Integer fieldID) {
if (fieldID == null) {
throw new IllegalArgumentException("Field ID must not be null.");
}
if (size() == 0) {
return new FieldList(fieldID);} else {
ArrayList<Integer> list = new ArrayList<Integer>(size() + 1);
list.addAll(this.collection);
list.add(fieldID);
return new FieldList(Collections.unmodifiableList(list));
}
} | 3.26 |
flink_FieldList_getDescriptionPrefix_rdh | // --------------------------------------------------------------------------------------------
@Override
protected String getDescriptionPrefix() {
return "[";
} | 3.26 |
flink_FieldList_isValidSubset_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean isValidSubset(FieldSet set) {
if (set instanceof FieldList) {
return isValidSubset(((FieldList) (set)));
} else {
return false;
}
} | 3.26 |
flink_EncodingFormat_listWritableMetadata_rdh | /**
* Returns the map of metadata keys and their corresponding data types that can be consumed by
* this format for writing. By default, this method returns an empty map.
*
* <p>Metadata columns add additional columns to the table's schema. An encoding format is
* responsible to accept requested metadata columns at the end of consumed rows and persist
* them.
*
* <p>See {@link SupportsWritingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSink} implements {@link SupportsWritingMetadata} and calls this method in {@link SupportsWritingMetadata#listWritableMetadata()}.
*/
default Map<String, DataType> listWritableMetadata() {return Collections.emptyMap();
}
/**
* Provides a list of metadata keys that the consumed row will contain as appended metadata
* columns. By default, this method throws an exception if metadata keys are defined.
*
* <p>See {@link SupportsWritingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSink} implements {@link SupportsWritingMetadata} and calls this method in {@link SupportsWritingMetadata#applyWritableMetadata(List, DataType)} | 3.26 |
flink_HiveParserCalcitePlanner_genJoinLogicalPlan_rdh | // Generate Join Logical Plan Relnode by walking through the join AST.
private RelNode genJoinLogicalPlan(HiveParserASTNode joinParseTree, Map<String, RelNode> aliasToRel) throws SemanticException {
RelNode leftRel
= null;
RelNode rightRel = null;
JoinType hiveJoinType;
if (joinParseTree.getToken().getType() ==
HiveASTParser.TOK_UNIQUEJOIN) {
String msg = "UNIQUE JOIN is currently not supported in CBO, turn off cbo to use UNIQUE JOIN.";
throw new SemanticException(msg);
}
// 1. Determine Join Type
switch (joinParseTree.getToken().getType()) {case HiveASTParser.TOK_LEFTOUTERJOIN :
hiveJoinType = JoinType.LEFTOUTER;
break;
case HiveASTParser.TOK_RIGHTOUTERJOIN :
hiveJoinType = JoinType.RIGHTOUTER;
break;
case HiveASTParser.TOK_FULLOUTERJOIN :
hiveJoinType
= JoinType.FULLOUTER;
break;
case HiveASTParser.TOK_LEFTSEMIJOIN :
hiveJoinType =
JoinType.LEFTSEMI;
break;
default :
hiveJoinType = JoinType.INNER;
break;
}
// 2. Get Left Table Alias
HiveParserASTNode left = ((HiveParserASTNode) (joinParseTree.getChild(0)));
String leftTableAlias = null;
if (((left.getToken().getType() == HiveASTParser.TOK_TABREF) || (left.getToken().getType()
== HiveASTParser.TOK_SUBQUERY)) || (left.getToken().getType()
== HiveASTParser.TOK_PTBLFUNCTION))
{
String tableName = HiveParserBaseSemanticAnalyzer.getUnescapedUnqualifiedTableName(((HiveParserASTNode) (left.getChild(0)))).toLowerCase();
leftTableAlias = (left.getChildCount() == 1) ? tableName : unescapeIdentifier(left.getChild(left.getChildCount() - 1).getText().toLowerCase());
leftTableAlias = (left.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(left.getChild(1).getText().toLowerCase()) : leftTableAlias;
leftRel = aliasToRel.get(leftTableAlias);
} else if (HiveParserUtils.isJoinToken(left)) {
leftRel = genJoinLogicalPlan(left, aliasToRel);
} else {
assert false;
}
// 3. Get Right Table Alias
HiveParserASTNode right =
((HiveParserASTNode) (joinParseTree.getChild(1)));
String rightTableAlias = null;
if (((right.getToken().getType() == HiveASTParser.TOK_TABREF) || (right.getToken().getType() == HiveASTParser.TOK_SUBQUERY)) || (right.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION)) {
String tableName = HiveParserBaseSemanticAnalyzer.getUnescapedUnqualifiedTableName(((HiveParserASTNode) (right.getChild(0)))).toLowerCase();
rightTableAlias = (right.getChildCount() == 1) ? tableName : unescapeIdentifier(right.getChild(right.getChildCount() - 1).getText().toLowerCase());
rightTableAlias = (right.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(right.getChild(1).getText().toLowerCase()) : rightTableAlias;
rightRel = aliasToRel.get(rightTableAlias);
} else {
assert
false;
}
// 4. Get Join Condn
HiveParserASTNode joinCond = ((HiveParserASTNode) (joinParseTree.getChild(2)));
// 5. Create Join rel
return genJoinRelNode(leftRel, leftTableAlias, rightRel, rightTableAlias, hiveJoinType, joinCond);
} | 3.26 |
flink_HiveParserCalcitePlanner_convertNullLiteral_rdh | // flink doesn't support type NULL, so we need to convert such literals
private RexNode convertNullLiteral(RexNode rexNode) {
if (rexNode instanceof RexLiteral) {
RexLiteral literal = ((RexLiteral) (rexNode));
if (literal.isNull() && (literal.getTypeName() == SqlTypeName.NULL)) {
return cluster.getRexBuilder().makeNullLiteral(cluster.getTypeFactory().createSqlType(SqlTypeName.VARCHAR));}
}
return
rexNode;
} | 3.26 |
flink_HiveParserCalcitePlanner_genLogicalPlan_rdh | // Given an AST, generate and return the RelNode plan. Returns null if nothing needs to be done.
public RelNode genLogicalPlan(HiveParserASTNode ast) throws SemanticException {
LOG.info("Starting generating logical plan");
HiveParserPreCboCtx cboCtx = new HiveParserPreCboCtx();
// change the location of position alias process here
processPositionAlias(ast, semanticAnalyzer.getConf());
if (!semanticAnalyzer.genResolvedParseTree(ast, cboCtx)) {
return null;
}
// flink requires orderBy removed from sub-queries, otherwise it can fail to generate the
// plan
for (String alias : semanticAnalyzer.getQB().getSubqAliases()) {
removeOBInSubQuery(semanticAnalyzer.getQB().getSubqForAlias(alias));
}
HiveParserASTNode queryForCbo = ast;
if ((cboCtx.type == Type.CTAS) || (cboCtx.type == Type.VIEW)) {
queryForCbo = cboCtx.nodeOfInterest;// nodeOfInterest is the query
}
verifyCanHandleAst(queryForCbo, getQB(), semanticAnalyzer.getQueryProperties());
semanticAnalyzer.disableJoinMerge = true;
return logicalPlan();
} | 3.26 |
flink_HiveParserCalcitePlanner_genGBLogicalPlan_rdh | // Generate GB plan.
private RelNode genGBLogicalPlan(HiveParserQB qb, RelNode srcRel) throws SemanticException {
RelNode v152 = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
// 1. Gather GB Expressions (AST) (GB + Aggregations)
// NOTE: Multi Insert is not supported
String detsClauseName = qbp.getClauseNames().iterator().next();
HiveParserASTNode selExprList = qb.getParseInfo().getSelForClause(detsClauseName);
HiveParserSubQueryUtils.checkForTopLevelSubqueries(selExprList);if (((selExprList.getToken().getType()
== HiveASTParser.TOK_SELECTDI) && (selExprList.getChildCount() == 1)) && (selExprList.getChild(0).getChildCount() == 1)) {
HiveParserASTNode node = ((HiveParserASTNode) (selExprList.getChild(0).getChild(0)));
if (node.getToken().getType() == HiveASTParser.TOK_ALLCOLREF) {
srcRel = genSelectLogicalPlan(qb, srcRel, srcRel, null, null);
HiveParserRowResolver rr = relToRowResolver.get(srcRel);qbp.setSelExprForClause(detsClauseName, HiveParserUtils.genSelectDIAST(rr));
}
}
// Select DISTINCT + windowing; GBy handled by genSelectForWindowing
if ((selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI) && (!qb.getAllWindowingSpecs().isEmpty())) {
return null;}
List<HiveParserASTNode> gbAstExprs = getGroupByForClause(qbp, detsClauseName);
HashMap<String, HiveParserASTNode> aggregationTrees = qbp.getAggregationExprsForClause(detsClauseName);
boolean hasGrpByAstExprs = !gbAstExprs.isEmpty();
boolean hasAggregationTrees = (aggregationTrees != null) && (!aggregationTrees.isEmpty());
final
boolean cubeRollupGrpSetPresent = ((!qbp.getDestRollups().isEmpty()) || (!qbp.getDestGroupingSets().isEmpty())) || (!qbp.getDestCubes().isEmpty());
// 2. Sanity check
if (semanticAnalyzer.getConf().getBoolVar(ConfVars.HIVEGROUPBYSKEW) && (qbp.getDistinctFuncExprsForClause(detsClauseName).size() > 1)) {
throw new SemanticException(ErrorMsg.UNSUPPORTED_MULTIPLE_DISTINCTS.getMsg());
}
if (hasGrpByAstExprs || hasAggregationTrees) {
ArrayList<ExprNodeDesc> gbExprNodeDescs = new ArrayList<>();
ArrayList<String> outputColNames = new ArrayList<>();
// 3. Input, Output Row Resolvers
HiveParserRowResolver
inputRR = relToRowResolver.get(srcRel);
HiveParserRowResolver outputRR = new HiveParserRowResolver();
outputRR.setIsExprResolver(true);
if (hasGrpByAstExprs) {
// 4. Construct GB Keys (ExprNode)
for (HiveParserASTNode gbAstExpr : gbAstExprs) {
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(gbAstExpr, inputRR);
ExprNodeDesc grpbyExprNDesc = astToExprNodeDesc.get(gbAstExpr);
if (grpbyExprNDesc == null) {
throw new SemanticException("Invalid Column Reference: " + gbAstExpr.dump());
}
addToGBExpr(outputRR, inputRR, gbAstExpr, grpbyExprNDesc, gbExprNodeDescs, outputColNames);
}
}
// 5. GroupingSets, Cube, Rollup
int numGroupCols = gbExprNodeDescs.size();
List<Integer> groupingSets = null;
if (cubeRollupGrpSetPresent) {
if (qbp.getDestRollups().contains(detsClauseName)) {
groupingSets = getGroupingSetsForRollup(gbAstExprs.size());
} else if (qbp.getDestCubes().contains(detsClauseName)) {
groupingSets = getGroupingSetsForCube(gbAstExprs.size());
} else if (qbp.getDestGroupingSets().contains(detsClauseName)) {
groupingSets = getGroupingSets(gbAstExprs, qbp, detsClauseName);
}
}
// 6. Construct aggregation function Info
ArrayList<AggInfo> aggInfos = new ArrayList<>();
if (hasAggregationTrees) {
for (HiveParserASTNode value : aggregationTrees.values()) {
// 6.1 Determine type of UDAF
// This is the GenericUDAF name
String aggName = unescapeIdentifier(value.getChild(0).getText());
boolean isDistinct = value.getType() == HiveASTParser.TOK_FUNCTIONDI;
boolean isAllColumns = value.getType() == HiveASTParser.TOK_FUNCTIONSTAR;
// 6.2 Convert UDAF Params to ExprNodeDesc
ArrayList<ExprNodeDesc> aggParameters = new ArrayList<>();
for (int i = 1; i < value.getChildCount(); i++) {
HiveParserASTNode paraExpr = ((HiveParserASTNode) (value.getChild(i)));
ExprNodeDesc paraExprNode = semanticAnalyzer.genExprNodeDesc(paraExpr, inputRR);
aggParameters.add(paraExprNode);
}
GenericUDAFEvaluator.Mode aggMode = HiveParserUtils.groupByDescModeToUDAFMode(Mode.COMPLETE, isDistinct);
GenericUDAFEvaluator genericUDAFEvaluator = HiveParserUtils.getGenericUDAFEvaluator(aggName, aggParameters,
value, isDistinct, isAllColumns, frameworkConfig.getOperatorTable());
assert genericUDAFEvaluator != null;
HiveParserBaseSemanticAnalyzer.GenericUDAFInfo udaf = HiveParserUtils.getGenericUDAFInfo(genericUDAFEvaluator, aggMode, aggParameters);
String aggAlias = null;
if ((value.getParent().getType() == HiveASTParser.TOK_SELEXPR) && (value.getParent().getChildCount() == 2)) {
aggAlias = unescapeIdentifier(value.getParent().getChild(1).getText().toLowerCase());
}
AggInfo v185 = new AggInfo(aggParameters, udaf.returnType, aggName, isDistinct, isAllColumns, aggAlias);aggInfos.add(v185);
String field = (aggAlias == null) ? getColumnInternalName((numGroupCols + aggInfos.size()) - 1) : aggAlias;
outputColNames.add(field);
outputRR.putExpression(value, new ColumnInfo(field, v185.getReturnType(), "",
false));
}
}
// 7. If GroupingSets, Cube, Rollup were used, we account grouping__id
// GROUPING__ID is also required by the GROUPING function, so let's always add it for
// grouping sets
if ((groupingSets != null) && (!groupingSets.isEmpty())) {
String field = getColumnInternalName(numGroupCols + aggInfos.size());
outputColNames.add(field);
outputRR.put(null, VirtualColumn.GROUPINGID.getName(), // flink grouping_id's return type is bigint
new ColumnInfo(field, TypeInfoFactory.longTypeInfo, null,
true));
}
// 8. We create the group_by operator
v152 = genGBRelNode(gbExprNodeDescs, aggInfos, groupingSets, srcRel);
relToHiveColNameCalcitePosMap.put(v152, buildHiveToCalciteColumnMap(outputRR));
relToRowResolver.put(v152, outputRR);
}
return v152;
} | 3.26 |
flink_HiveParserCalcitePlanner_genDistSortBy_rdh | // Generate plan for sort by, cluster by and distribute by. This is basically same as generating
// order by plan.
// Should refactor to combine them.
private Pair<RelNode, RelNode> genDistSortBy(HiveParserQB qb, RelNode srcRel, boolean outermostOB) throws SemanticException {
RelNode res = null;
RelNode originalInput = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
String destClause = qbp.getClauseNames().iterator().next();
HiveParserASTNode sortAST = qbp.getSortByForClause(destClause);
HiveParserASTNode distAST = qbp.getDistributeByForClause(destClause);
HiveParserASTNode clusterAST = qbp.getClusterByForClause(destClause);
if (((sortAST != null) || (distAST != null)) || (clusterAST != null)) {
List<RexNode> virtualCols = new
ArrayList<>();
List<Pair<HiveParserASTNode, TypeInfo>> vcASTAndType = new ArrayList<>();
List<RelFieldCollation> fieldCollations = new ArrayList<>();
List<Integer> distKeys = new ArrayList<>();
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRexNodeConverter v200 = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), relToHiveColNameCalcitePosMap.get(srcRel), 0, false, funcConverter);
int numSrcFields = srcRel.getRowType().getFieldCount();
// handle cluster by
if (clusterAST != null) {
if (sortAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and SORT BY");
}
if (distAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and DISTRIBUTE BY");
}for (Node node : clusterAST.getChildren()) {
HiveParserASTNode v203 = ((HiveParserASTNode) (node));
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(v203, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(v203);
if (childNodeDesc == null) {
throw new SemanticException("Invalid CLUSTER BY expression: " + v203.toString());}
RexNode childRexNode = v200.convert(childNodeDesc).accept(funcConverter);
int v207;
if (childRexNode instanceof
RexInputRef) {
v207 = ((RexInputRef) (childRexNode)).getIndex();
} else {
v207 = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(v203, childNodeDesc.getTypeInfo()));
}
// cluster by doesn't support specifying ASC/DESC or NULLS FIRST/LAST, so use
// default values
fieldCollations.add(new RelFieldCollation(v207, Direction.ASCENDING, NullDirection.FIRST));
distKeys.add(v207);
}
} else {
// handle sort by
if (sortAST != null) {
for (Node node : sortAST.getChildren()) {
HiveParserASTNode childAST = ((HiveParserASTNode) (node));
HiveParserASTNode nullOrderAST = ((HiveParserASTNode) (childAST.getChild(0)));
HiveParserASTNode fieldAST = ((HiveParserASTNode) (nullOrderAST.getChild(0)));
Map<HiveParserASTNode,
ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(fieldAST, inputRR);
ExprNodeDesc fieldNodeDesc = astToExprNodeDesc.get(fieldAST);
if (fieldNodeDesc == null) {
throw new SemanticException("Invalid sort by expression: " + fieldAST.toString());
}
RexNode childRexNode = v200.convert(fieldNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) (childRexNode)).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, fieldNodeDesc.getTypeInfo()));
}
RelFieldCollation.Direction direction = Direction.DESCENDING;
if (childAST.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
direction = Direction.ASCENDING;
}
RelFieldCollation.NullDirection nullOrder;
if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_FIRST) {
nullOrder = NullDirection.FIRST;
} else if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_LAST) {
nullOrder
= NullDirection.LAST;
} else {
throw new SemanticException("Unexpected null ordering option: " + nullOrderAST.getType());
}
fieldCollations.add(new RelFieldCollation(fieldIndex, direction, nullOrder));
}
}
// handle distribute by
if (distAST != null) {
for (Node node : distAST.getChildren()) {
HiveParserASTNode
childAST = ((HiveParserASTNode) (node));
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc =
semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException("Invalid DISTRIBUTE BY expression: " + childAST.toString());
}
RexNode childRexNode = v200.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) (childRexNode)).getIndex();
} else {
fieldIndex = numSrcFields +
virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
distKeys.add(fieldIndex);
}
}
}
Preconditions.checkState((!fieldCollations.isEmpty()) || (!distKeys.isEmpty()), "Both field collations and dist keys are empty");
// add child SEL if needed
RelNode realInput = srcRel;
HiveParserRowResolver outputRR = new HiveParserRowResolver();
if (!virtualCols.isEmpty()) {
List<RexNode> originalInputRefs = srcRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver addedProjectRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(addedProjectRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}int vColPos = inputRR.getRowSchema().getSignature().size();
for (Pair<HiveParserASTNode, TypeInfo> astTypePair : vcASTAndType) {
addedProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo(getColumnInternalName(vColPos), astTypePair.getValue(), null, false));
vColPos++;
}
realInput = genSelectRelNode(CompositeList.of(originalInputRefs, virtualCols), addedProjectRR, srcRel);
if (outermostOB) {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
else if
(!HiveParserRowResolver.add(outputRR, addedProjectRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
originalInput = srcRel;
} else if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
// create rel node
RelTraitSet traitSet = cluster.traitSet();
RelCollation canonizedCollation = traitSet.canonize(RelCollations.of(fieldCollations));
res = LogicalDistribution.create(realInput,
canonizedCollation, distKeys);
Map<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR);
relToRowResolver.put(res, outputRR);
relToHiveColNameCalcitePosMap.put(res, hiveColNameCalcitePosMap);
}
return new Pair<>(res, originalInput);
} | 3.26 |
flink_HiveParserCalcitePlanner_genSelectLogicalPlan_rdh | // NOTE: there can only be one select clause since we don't handle multi destination insert.
private RelNode genSelectLogicalPlan(HiveParserQB qb, RelNode srcRel, RelNode starSrcRel, Map<String, Integer> outerNameToPos, HiveParserRowResolver outerRR) throws SemanticException {
// 0. Generate a Select Node for Windowing
// Exclude the newly-generated select columns from */etc. resolution.
HashSet<ColumnInfo> excludedColumns = new HashSet<>();
RelNode selForWindow = genSelectForWindowing(qb, srcRel, excludedColumns);
srcRel = (selForWindow == null) ? srcRel : selForWindow;
ArrayList<ExprNodeDesc> exprNodeDescs = new ArrayList<>();
HiveParserASTNode v323 = null;
// 1. Get Select Expression List
HiveParserQBParseInfo qbp = qb.getParseInfo();
String selClauseName = qbp.getClauseNames().iterator().next();
HiveParserASTNode selExprList = qbp.getSelForClause(selClauseName);
// make sure if there is subquery it is top level expression
HiveParserSubQueryUtils.checkForTopLevelSubqueries(selExprList);
final boolean cubeRollupGrpSetPresent = ((!qbp.getDestRollups().isEmpty()) || (!qbp.getDestGroupingSets().isEmpty()))
|| (!qbp.getDestCubes().isEmpty());
// 3. Query Hints
int posn = 0;
boolean hintPresent = selExprList.getChild(0).getType() == HiveASTParser.QUERY_HINT;
if (hintPresent) {
posn++;
}
// 4. Bailout if select involves Transform
boolean isInTransform = selExprList.getChild(posn).getChild(0).getType() == HiveASTParser.TOK_TRANSFORM;
if (isInTransform) {
v323 = ((HiveParserASTNode) (selExprList.getChild(posn).getChild(0)));}
// 2.Row resolvers for input, output
HiveParserRowResolver outRR = new HiveParserRowResolver();
// SELECT * or SELECT TRANSFORM(*)
Integer pos = 0;
// TODO: will this also fix windowing? try
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRowResolver starRR = inputRR;
inputRR.setCheckForAmbiguity(true);
if (starSrcRel != null) {
starRR = relToRowResolver.get(starSrcRel);
}
// 5. Check if select involves UDTF
String udtfTableAlias = null;
SqlOperator udtfOperator = null;
String genericUDTFName = null;
ArrayList<String> udtfColAliases = new ArrayList<>();
HiveParserASTNode expr = ((HiveParserASTNode) (selExprList.getChild(posn).getChild(0)));
int v340 = expr.getType();
if ((v340 == HiveASTParser.TOK_FUNCTION) || (v340 == HiveASTParser.TOK_FUNCTIONSTAR)) {
String funcName = HiveParserTypeCheckProcFactory.DefaultExprProcessor.getFunctionText(expr, true);// we can't just try to get table function here because the operator table throws
// exception if it's not a table function
SqlOperator sqlOperator = HiveParserUtils.getAnySqlOperator(funcName, frameworkConfig.getOperatorTable());
if (HiveParserUtils.isUDTF(sqlOperator)) {
LOG.debug("Found UDTF " + funcName);
udtfOperator = sqlOperator;
genericUDTFName = funcName;
if
(!HiveParserUtils.isNative(sqlOperator)) {
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(((HiveParserASTNode)
(expr.getChild(0))));
}
if (v340 == HiveASTParser.TOK_FUNCTIONSTAR) {
semanticAnalyzer.genColListRegex(".*", null, ((HiveParserASTNode) (expr.getChild(0))),
exprNodeDescs, null, inputRR, starRR, pos, outRR, qb.getAliases(), false);
}
}
}
if (udtfOperator != null) {
// Only support a single expression when it's a UDTF
if (selExprList.getChildCount() > 1) {
throw new SemanticException(generateErrorMessage(((HiveParserASTNode) (selExprList.getChild(1))), ErrorMsg.UDTF_MULTIPLE_EXPR.getMsg()));
}
HiveParserASTNode selExpr = ((HiveParserASTNode)
(selExprList.getChild(posn)));// Get the column / table aliases from the expression. Start from 1 as
// 0 is the TOK_FUNCTION
// column names also can be inferred from result of UDTF
for (int i = 1; i < selExpr.getChildCount(); i++) {
HiveParserASTNode selExprChild = ((HiveParserASTNode) (selExpr.getChild(i)));
switch (selExprChild.getType()) {
case HiveASTParser.Identifier :
udtfColAliases.add(unescapeIdentifier(selExprChild.getText().toLowerCase()));
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(selExprChild);
break;
case HiveASTParser.TOK_TABALIAS :assert selExprChild.getChildCount() == 1;udtfTableAlias = unescapeIdentifier(selExprChild.getChild(0).getText());
qb.addAlias(udtfTableAlias);
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(((HiveParserASTNode) (selExprChild.getChild(0))));
break;
default :
throw new SemanticException(("Find invalid token type " + selExprChild.getType()) + " in UDTF.");
}
}
LOG.debug("UDTF table alias is " + udtfTableAlias);
LOG.debug("UDTF col aliases are " + udtfColAliases);
}
// 6. Iterate over all expression (after SELECT)
HiveParserASTNode exprList;
if (isInTransform) {exprList = ((HiveParserASTNode) (v323.getChild(0)));
} else if (udtfOperator != null) {
exprList = expr; } else {
exprList = selExprList;
}
// For UDTF's, skip the function name to get the expressions
int
startPos = (udtfOperator != null) ? posn + 1 : posn;
if (isInTransform) {
startPos = 0;
}
// track the col aliases provided by user
List<String> colAliases = new ArrayList<>();
for (int i = startPos; i < exprList.getChildCount(); ++i) {
colAliases.add(null);
// 6.1 child can be EXPR AS ALIAS, or EXPR.
HiveParserASTNode child = ((HiveParserASTNode) (exprList.getChild(i)));
boolean hasAsClause = (child.getChildCount() == 2) && (!isInTransform);
boolean isWindowSpec
= (child.getChildCount() == 3)
&& (child.getChild(2).getType() == HiveASTParser.TOK_WINDOWSPEC);
// 6.2 EXPR AS (ALIAS,...) parses, but is only allowed for UDTF's
// This check is not needed and invalid when there is a transform b/c the AST's are
// slightly different.
if ((((!isWindowSpec) && (!isInTransform)) && (udtfOperator == null)) && (child.getChildCount() > 2)) {
throw new SemanticException(generateErrorMessage(((HiveParserASTNode) (child.getChild(2))), ErrorMsg.INVALID_AS.getMsg()));
}
String tabAlias;
String colAlias;
if (isInTransform || (udtfOperator != null))
{
tabAlias = null;
colAlias = semanticAnalyzer.getAutogenColAliasPrfxLbl() + i;
expr = child;
} else {
// 6.3 Get rid of TOK_SELEXPR
expr = ((HiveParserASTNode) (child.getChild(0)));
String[] colRef = HiveParserUtils.getColAlias(child, semanticAnalyzer.getAutogenColAliasPrfxLbl(), inputRR, semanticAnalyzer.autogenColAliasPrfxIncludeFuncName(), i);
tabAlias = colRef[0];
colAlias = colRef[1];
if (hasAsClause) {
colAliases.set(colAliases.size() - 1, colAlias);
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(((HiveParserASTNode) (child.getChild(1))));
}
}
Map<HiveParserASTNode, RelNode> subQueryToRelNode = new HashMap<>();
boolean isSubQuery = genSubQueryRelNode(qb, expr, srcRel, false, subQueryToRelNode);
if (isSubQuery)
{
ExprNodeDesc subQueryDesc = semanticAnalyzer.genExprNodeDesc(expr, relToRowResolver.get(srcRel), outerRR, subQueryToRelNode, false);
exprNodeDescs.add(subQueryDesc);
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), subQueryDesc.getWritableObjectInspector(), tabAlias, false);
if (!outRR.putWithCheck(tabAlias, colAlias, null, colInfo)) {
throw new SemanticException(((((("Cannot add column to RR: " + tabAlias) + ".") + colAlias) + " => ") + colInfo) + " due to duplication, see previous warnings");
}
} else // 6.4 Build ExprNode corresponding to columns
if
(expr.getType() == HiveASTParser.TOK_ALLCOLREF) {
pos =
/* don't require uniqueness */semanticAnalyzer.genColListRegex(".*", expr.getChildCount() == 0 ? null : HiveParserBaseSemanticAnalyzer.getUnescapedName(((HiveParserASTNode) (expr.getChild(0)))).toLowerCase(), expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), false);
} else if ((((expr.getType() == HiveASTParser.TOK_TABLE_OR_COL) && (!hasAsClause)) && (!inputRR.getIsExprResolver())) && HiveParserUtils.isRegex(unescapeIdentifier(expr.getChild(0).getText()), semanticAnalyzer.getConf())) {
// In case the expression is a regex COL. This can only happen without AS clause
// We don't allow this for ExprResolver - the Group By case
pos = semanticAnalyzer.genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), null, expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), true);
} else if ((((((expr.getType() == HiveASTParser.DOT) && (expr.getChild(0).getType() ==
HiveASTParser.TOK_TABLE_OR_COL)) && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()))) && (!hasAsClause)) && (!inputRR.getIsExprResolver())) && HiveParserUtils.isRegex(unescapeIdentifier(expr.getChild(1).getText()), semanticAnalyzer.getConf())) {
// In case the expression is TABLE.COL (col can be regex). This can only happen
// without AS clause
// We don't allow this for ExprResolver - the Group By case
pos = /* don't require uniqueness */
semanticAnalyzer.genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), false);
}
else if (HiveASTParseUtils.containsTokenOfType(expr, HiveASTParser.TOK_FUNCTIONDI) && (!(srcRel instanceof Aggregate))) {
// Likely a malformed query eg, select hash(distinct c1) from t1;
throw new SemanticException("Distinct without an aggregation.");
} else {
// Case when this is an expression
HiveParserTypeCheckCtx typeCheckCtx = new HiveParserTypeCheckCtx(inputRR, true, true, frameworkConfig, cluster);
// We allow stateful functions in the SELECT list (but nowhere else)
typeCheckCtx.setAllowStatefulFunctions(true);
if (!qbp.getDestToGroupBy().isEmpty()) {
// Special handling of grouping function
expr = rewriteGroupingFunctionAST(getGroupByForClause(qbp, selClauseName), expr, !cubeRollupGrpSetPresent);
}
ExprNodeDesc exprDesc = semanticAnalyzer.genExprNodeDesc(expr, inputRR, typeCheckCtx);
String recommended = semanticAnalyzer.recommendName(exprDesc, colAlias);
if ((recommended != null) && (outRR.get(null, recommended) == null)) {
colAlias = recommended;
}
exprNodeDescs.add(exprDesc);
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), exprDesc.getWritableObjectInspector(), tabAlias, false);
colInfo.setSkewedCol((exprDesc instanceof ExprNodeColumnDesc) && ((ExprNodeColumnDesc) (exprDesc)).isSkewedCol());
outRR.put(tabAlias, colAlias, colInfo);
if (exprDesc instanceof ExprNodeColumnDesc) {
ExprNodeColumnDesc colExp = ((ExprNodeColumnDesc) (exprDesc));
String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn());if (altMapping != null) {
// TODO: this can overwrite the mapping. Should this be allowed?
outRR.put(altMapping[0], altMapping[1], colInfo);
}}
pos++;
}
}
// 7. Convert Hive projections to Calcite
List<RexNode>
calciteColLst = new ArrayList<>();
HiveParserRexNodeConverter rexNodeConverter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), outerNameToPos, buildHiveColNameToInputPosMap(exprNodeDescs, inputRR), relToRowResolver.get(srcRel), outerRR, 0, false, subqueryId, funcConverter);
for (ExprNodeDesc colExpr : exprNodeDescs)
{
RexNode calciteCol = rexNodeConverter.convert(colExpr);
calciteCol = convertNullLiteral(calciteCol).accept(funcConverter);
calciteColLst.add(calciteCol);
}
// 8. Build Calcite Rel
RelNode res;
if (isInTransform) {
HiveParserScriptTransformHelper transformHelper = new HiveParserScriptTransformHelper(cluster, relToRowResolver, relToHiveColNameCalcitePosMap, hiveConf);
res = transformHelper.genScriptPlan(v323, qb, calciteColLst, srcRel);
} else if (udtfOperator != null) {
// The basic idea for CBO support of UDTF is to treat UDTF as a special project.
res = genUDTFPlan(udtfOperator, genericUDTFName, udtfTableAlias, udtfColAliases, qb, calciteColLst, outRR.getColumnInfos(), srcRel, true, false);
} else // If it's a subquery and the project is identity, we skip creating this project.
// This is to handle an issue with calcite SubQueryRemoveRule. The rule checks col
// uniqueness by calling
// RelMetadataQuery::areColumnsUnique with an empty col set, which always returns null
// for a project
// and thus introduces unnecessary agg node.
if (HiveParserUtils.isIdentityProject(srcRel, calciteColLst, colAliases) && (outerRR != null)) {
res = srcRel;
} else {
res = genSelectRelNode(calciteColLst, outRR, srcRel);
}
// 9. Handle select distinct as GBY if there exist windowing functions
if ((selForWindow != null) && (selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI)) {
ImmutableBitSet groupSet =
ImmutableBitSet.range(res.getRowType().getFieldList().size());
res = LogicalAggregate.create(res, ImmutableList.of(),
groupSet, Collections.emptyList(), Collections.emptyList());
HiveParserRowResolver groupByOutputRowResolver = new HiveParserRowResolver();
for (int i = 0; i < outRR.getColumnInfos().size(); i++) {
ColumnInfo colInfo = outRR.getColumnInfos().get(i);
ColumnInfo newColInfo = new ColumnInfo(colInfo.getInternalName(), colInfo.getType(), colInfo.getTabAlias(), colInfo.getIsVirtualCol());groupByOutputRowResolver.put(colInfo.getTabAlias(), colInfo.getAlias(), newColInfo);
}
relToHiveColNameCalcitePosMap.put(res, buildHiveToCalciteColumnMap(groupByOutputRowResolver));
relToRowResolver.put(res, groupByOutputRowResolver);}
inputRR.setCheckForAmbiguity(false);
if
((selForWindow != null) && (res instanceof Project)) {
// if exist windowing expression, trim the project node with window
res = HiveParserProjectWindowTrimmer.trimProjectWindow(((Project) (res)), ((Project) (selForWindow)), relToRowResolver, relToHiveColNameCalcitePosMap);
}
return res;
} | 3.26 |
flink_StateMachineExample_main_rdh | /**
* Main entry point for the program.
*
* @param args
* The command line arguments.
*/
public static void main(String[] args)
throws Exception {
// ---- print some usage help ----
System.out.println("Usage with built-in data generator: StateMachineExample [--error-rate <probability-of-invalid-transition>] [--sleep <sleep-per-record-in-ms> | --rps <records-per-second>]");
System.out.println("Usage with Kafka: StateMachineExample --kafka-topic <topic> [--brokers <brokers>]");
System.out.println("Options for both the above setups: ");
System.out.println("\t[--backend <hashmap|rocks>]");
System.out.println("\t[--checkpoint-dir <filepath>]");
System.out.println("\t[--incremental-checkpoints <true|false>]");
System.out.println("\t[--output <filepath> OR null for stdout]");
System.out.println();
// ---- determine whether to use the built-in source, or read from Kafka ----
final DataStream<Event> events;
final ParameterTool params = ParameterTool.fromArgs(args);
// create the environment to create streams and configure execution
final StreamExecutionEnvironment env
= StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(2000L);
final String stateBackend = params.get("backend", "memory");
if ("hashmap".equals(stateBackend)) {
final String checkpointDir = params.get("checkpoint-dir"); env.setStateBackend(new HashMapStateBackend());
env.getCheckpointConfig().setCheckpointStorage(checkpointDir);
} else if ("rocks".equals(stateBackend)) {
final String checkpointDir = params.get("checkpoint-dir");
boolean incrementalCheckpoints = params.getBoolean("incremental-checkpoints", false);
env.setStateBackend(new EmbeddedRocksDBStateBackend(incrementalCheckpoints));env.getCheckpointConfig().setCheckpointStorage(checkpointDir);
}
if (params.has("kafka-topic")) {
// set up the Kafka reader
String kafkaTopic = params.get("kafka-topic");
String brokers = params.get("brokers", "localhost:9092");System.out.printf("Reading from kafka topic %s @ %s\n", kafkaTopic, brokers);
System.out.println();
KafkaSource<Event> source = KafkaSource.<Event>builder().setBootstrapServers(brokers).setGroupId("stateMachineExample").setTopics(kafkaTopic).setDeserializer(KafkaRecordDeserializationSchema.valueOnly(new EventDeSerializationSchema())).setStartingOffsets(OffsetsInitializer.latest()).build();
events = env.fromSource(source, WatermarkStrategy.noWatermarks(), "StateMachineExampleSource");} else {
final double errorRate = params.getDouble("error-rate", 0.0);
final int sleep = params.getInt("sleep", 1);
final double recordsPerSecond = params.getDouble("rps", rpsFromSleep(sleep, env.getParallelism()));
System.out.printf("Using standalone source with error rate %f and %.1f records per second\n", errorRate, recordsPerSecond);
System.out.println();
GeneratorFunction<Long, Event> generatorFunction = new EventsGeneratorFunction(errorRate);
DataGeneratorSource<Event> eventGeneratorSource = new DataGeneratorSource<>(generatorFunction, Long.MAX_VALUE, RateLimiterStrategy.perSecond(recordsPerSecond), TypeInformation.of(Event.class));
events = env.fromSource(eventGeneratorSource, WatermarkStrategy.noWatermarks(), "Events Generator Source");
}
// ---- main program ----
final String outputFile = params.get("output");
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
DataStream<Alert> alerts = // the function that evaluates the state machine over the sequence of events
// partition on the address to make sure equal addresses
// end up in the same state machine flatMap function
events.keyBy(Event::sourceAddress).flatMap(new StateMachineMapper());
// output the alerts to std-out
if (outputFile == null) {
alerts.print();
} else {
alerts.sinkTo(FileSink.<Alert>forRowFormat(new Path(outputFile), new SimpleStringEncoder<>()).withRollingPolicy(DefaultRollingPolicy.builder().withMaxPartSize(MemorySize.ofMebiBytes(1)).withRolloverInterval(Duration.ofSeconds(10)).build()).build()).setParallelism(1).name("output");
}
// trigger program execution
env.execute("State machine job");
} | 3.26 |
flink_StateMachineExample_rpsFromSleep_rdh | // Used for backwards compatibility to convert legacy 'sleep' parameter to records per second.
private static double rpsFromSleep(int sleep, int parallelism) {
return (1000.0 / sleep) * parallelism;
} | 3.26 |
flink_SocketStreamIterator_getPort_rdh | // ------------------------------------------------------------------------
// properties
// ------------------------------------------------------------------------
/**
* Returns the port on which the iterator is getting the data. (Used internally.)
*
* @return The port
*/
public int getPort() {
return f0.getLocalPort();
} | 3.26 |
flink_SocketStreamIterator_hasNext_rdh | // ------------------------------------------------------------------------
// iterator semantics
// ------------------------------------------------------------------------
/**
* Returns true if the DataStream has more elements. (Note: blocks if there will be more
* elements, but they are not available yet.)
*
* @return true if the DataStream has more elements
*/
@Override
public boolean hasNext() {
if (next == null) {
try {
next = readNextFromStream();
} catch (Exception e) {
throw new RuntimeException("Failed to receive next element: " + e.getMessage(), e);
}
}
return next != null;
} | 3.26 |
flink_SocketStreamIterator_next_rdh | /**
* Returns the next element of the DataStream. (Blocks if it is not available yet.)
*
* @return The element
* @throws NoSuchElementException
* if the stream has already ended
*/
@Override
public T next() {
if (hasNext()) {
T current = next;
next = null;
return current;
} else {
throw new NoSuchElementException();
}
} | 3.26 |
flink_ClosureCleaner_clean_rdh | /**
* Tries to clean the closure of the given object, if the object is a non-static inner class.
*
* @param func
* The object whose closure should be cleaned.
* @param level
* the clean up level.
* @param checkSerializable
* Flag to indicate whether serializability should be checked after the
* closure cleaning attempt.
* @throws InvalidProgramException
* Thrown, if 'checkSerializable' is true, and the object was
* not serializable after the closure cleaning.
* @throws RuntimeException
* A RuntimeException may be thrown, if the code of the class could not
* be loaded, in order to process during the closure cleaning.
*/
public static void clean(Object func, ExecutionConfig.ClosureCleanerLevel level, boolean checkSerializable) {
clean(func, level, checkSerializable, Collections.newSetFromMap(new IdentityHashMap<>()));} | 3.26 |
flink_HardwareDescription_extractFromSystem_rdh | // --------------------------------------------------------------------------------------------
// Factory
// --------------------------------------------------------------------------------------------
public static HardwareDescription extractFromSystem(long managedMemory) {
final int numberOfCPUCores = Hardware.getNumberCPUCores();
final long sizeOfJvmHeap = Runtime.getRuntime().maxMemory();
final long sizeOfPhysicalMemory = Hardware.getSizeOfPhysicalMemory();
return new HardwareDescription(numberOfCPUCores, sizeOfPhysicalMemory, sizeOfJvmHeap, managedMemory);
} | 3.26 |
flink_HardwareDescription_getSizeOfPhysicalMemory_rdh | /**
* Returns the size of physical memory in bytes available on the compute node.
*
* @return the size of physical memory in bytes available on the compute node
*/
public long getSizeOfPhysicalMemory() {
return this.sizeOfPhysicalMemory;
} | 3.26 |
flink_HardwareDescription_getSizeOfManagedMemory_rdh | /**
* Returns the size of the memory managed by the system for caching, hashing, sorting, ...
*
* @return The size of the memory managed by the system.
*/
public long getSizeOfManagedMemory() {
return this.sizeOfManagedMemory;
} | 3.26 |
flink_HardwareDescription_equals_rdh | // --------------------------------------------------------------------------------------------
// Utils
// --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {return false;
}
HardwareDescription that = ((HardwareDescription) (o));
return (((numberOfCPUCores == that.numberOfCPUCores) && (sizeOfPhysicalMemory == that.sizeOfPhysicalMemory)) && (sizeOfJvmHeap == that.sizeOfJvmHeap)) && (sizeOfManagedMemory == that.sizeOfManagedMemory);
} | 3.26 |
flink_HardwareDescription_getNumberOfCPUCores_rdh | /**
* Returns the number of CPU cores available to the JVM on the compute node.
*
* @return the number of CPU cores available to the JVM on the compute node
*/
public int getNumberOfCPUCores() {
return this.numberOfCPUCores;
} | 3.26 |
flink_AbstractRocksDBState_clear_rdh | // ------------------------------------------------------------------------
@Override
public void clear() {
try {
backend.db.delete(columnFamily, writeOptions, serializeCurrentKeyWithGroupAndNamespace());
} catch (RocksDBException e) {
throw new FlinkRuntimeException("Error while removing entry from RocksDB", e);
}
} | 3.26 |
flink_LastValueWithRetractAggFunction_getArgumentDataTypes_rdh | // --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType>
getArgumentDataTypes() {
return Collections.singletonList(valueDataType);
} | 3.26 |
flink_ReducingStateDescriptor_m0_rdh | /**
* Returns the reduce function to be used for the reducing state.
*/
public ReduceFunction<T> m0() {return reduceFunction;
} | 3.26 |
flink_NoFetchingInput_require_rdh | /**
* Require makes sure that at least required number of bytes are kept in the buffer. If not,
* then it will load exactly the difference between required and currently available number of
* bytes. Thus, it will only load the data which is required and never prefetch data.
*
* @param required
* the number of bytes being available in the buffer
* @return the number of bytes remaining, which is equal to required
* @throws KryoException
*/
@Override
protected int require(int required) throws KryoException {
if (required > capacity) {
throw new KryoException(((("Buffer too small: capacity: " + capacity) + ", ") + "required: ") + required);
}
position = 0;
int bytesRead = 0;
int count;
while (true) {
count = fill(buffer, bytesRead, required - bytesRead);
if (count == (-1)) {
throw new KryoException(new EOFException("No more bytes left."));
}
bytesRead += count;
if (bytesRead == required) {
break;
}
}
limit = required;
return required;
} | 3.26 |
flink_PekkoRpcActor_start_rdh | // Internal state machine
// ---------------------------------------------------------------------------
default State start(PekkoRpcActor<?> pekkoRpcActor, ClassLoader flinkClassLoader) {
throw new RpcInvalidStateException(invalidStateTransitionMessage(StartedState.f0));
} | 3.26 |
flink_PekkoRpcActor_lookupRpcMethod_rdh | /**
* Look up the rpc method on the given {@link RpcEndpoint} instance.
*
* @param methodName
* Name of the method
* @param parameterTypes
* Parameter types of the method
* @return Method of the rpc endpoint
* @throws NoSuchMethodException
* Thrown if the method with the given name and parameter types
* cannot be found at the rpc endpoint
*/
private Method lookupRpcMethod(final String methodName, final Class<?>[] parameterTypes) throws NoSuchMethodException {
return rpcEndpoint.getClass().getMethod(methodName, parameterTypes);
} | 3.26 |
flink_PekkoRpcActor_stop_rdh | /**
* Stop the actor immediately.
*/
private void stop(RpcEndpointTerminationResult rpcEndpointTerminationResult) {
if (rpcEndpointStopped.compareAndSet(false,
true)) {
this.rpcEndpointTerminationResult = rpcEndpointTerminationResult;
getContext().stop(getSelf());
}
} | 3.26 |
flink_PekkoRpcActor_envelopeSelfMessage_rdh | /**
* Hook to envelope self messages.
*
* @param message
* to envelope
* @return enveloped message
*/
protected Object envelopeSelfMessage(Object message) {
return message;
} | 3.26 |
flink_PekkoRpcActor_handleCallAsync_rdh | /**
* Handle asynchronous {@link Callable}. This method simply executes the given {@link Callable}
* in the context of the actor thread.
*
* @param callAsync
* Call async message
*/
private void handleCallAsync(CallAsync callAsync) {
try {
Object result = runWithContextClassLoader(() -> callAsync.getCallable().call(), flinkClassLoader);
getSender().tell(new Status.Success(result), getSelf());
} catch (Throwable e) {
getSender().tell(new Status.Failure(e), getSelf());
}
} | 3.26 |
flink_PekkoRpcActor_handleRunAsync_rdh | /**
* Handle asynchronous {@link Runnable}. This method simply executes the given {@link Runnable}
* in the context of the actor thread.
*
* @param runAsync
* Run async message
*/
private void handleRunAsync(RunAsync runAsync)
{
final long timeToRun = runAsync.getTimeNanos();
final long delayNanos;
if ((timeToRun == 0) || ((delayNanos = timeToRun - System.nanoTime()) <= 0)) {
// run immediately
try {
runWithContextClassLoader(() -> runAsync.getRunnable().run(), flinkClassLoader);
} catch (Throwable t) {
log.error("Caught exception while executing runnable in main thread.", t);
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
}
} else {
// schedule for later. send a new message after the delay, which will then be
// immediately executed
FiniteDuration delay = new FiniteDuration(delayNanos, TimeUnit.NANOSECONDS);
RunAsync message = new
RunAsync(runAsync.getRunnable(), timeToRun);
final Object v21 = envelopeSelfMessage(message);
getContext().system().scheduler().scheduleOnce(delay, getSelf(), v21, getContext().dispatcher(), ActorRef.noSender());
}
} | 3.26 |
flink_PekkoRpcActor_sendErrorIfSender_rdh | /**
* Send throwable to sender if the sender is specified.
*
* @param throwable
* to send to the sender
*/
protected void sendErrorIfSender(Throwable throwable) {
if (!getSender().equals(ActorRef.noSender())) {
getSender().tell(new Status.Failure(throwable), getSelf());
}
} | 3.26 |
flink_PekkoRpcActor_handleRpcInvocation_rdh | /**
* Handle rpc invocations by looking up the rpc method on the rpc endpoint and calling this
* method with the provided method arguments. If the method has a return value, it is returned
* to the sender of the call.
*
* @param rpcInvocation
* Rpc invocation message
*/
private void handleRpcInvocation(RpcInvocation rpcInvocation) {
Method rpcMethod = null;try {
String methodName =
rpcInvocation.getMethodName();
Class<?>[] parameterTypes = rpcInvocation.getParameterTypes();
rpcMethod = lookupRpcMethod(methodName, parameterTypes);
} catch (final NoSuchMethodException e) {
log.error("Could not find rpc method for rpc invocation.", e);
RpcConnectionException v4 = new RpcConnectionException("Could not find rpc method for rpc invocation.", e);
getSender().tell(new Status.Failure(v4), getSelf());
}
if (rpcMethod != null) {
try {
// this supports declaration of anonymous classes
rpcMethod.setAccessible(true); final Method capturedRpcMethod = rpcMethod;
if (rpcMethod.getReturnType().equals(Void.TYPE)) {
// No return value to send back
runWithContextClassLoader(() -> capturedRpcMethod.invoke(rpcEndpoint, rpcInvocation.getArgs()), flinkClassLoader);
} else {
final Object result;
try {
result = runWithContextClassLoader(() -> capturedRpcMethod.invoke(rpcEndpoint, rpcInvocation.getArgs()), flinkClassLoader);
} catch (InvocationTargetException e) {
log.debug("Reporting back error thrown in remote procedure {}", rpcMethod, e);
// tell the sender about the failure
getSender().tell(new Status.Failure(e.getTargetException()), getSelf());
return;
}
final String methodName = rpcMethod.getName();
final boolean isLocalRpcInvocation = rpcMethod.getAnnotation(Local.class) != null;
if (result instanceof CompletableFuture) {
final CompletableFuture<?> responseFuture = ((CompletableFuture<?>) (result));
sendAsyncResponse(responseFuture, methodName, isLocalRpcInvocation);
} else {
sendSyncResponse(result, methodName, isLocalRpcInvocation);
}
}
} catch (Throwable e) {
log.error("Error while executing remote procedure call {}.", rpcMethod, e);
// tell the sender about the failure
getSender().tell(new Status.Failure(e), getSelf());
}
}
} | 3.26 |
flink_MathUtils_roundUpToPowerOfTwo_rdh | /**
* Round the given number to the next power of two.
*
* @param x
* number to round
* @return x rounded up to the next power of two
*/
public static int roundUpToPowerOfTwo(int x) {
x = x - 1;
x |= x >> 1;
x |= x >> 2;
x
|= x >> 4;x |= x >>
8;
x |= x >> 16;
return x + 1;
} | 3.26 |
flink_MathUtils_isPowerOf2_rdh | /**
* Checks whether the given value is a power of two.
*
* @param value
* The value to check.
* @return True, if the value is a power of two, false otherwise.
*/
public static boolean isPowerOf2(long value) {
return (value & (value - 1)) == 0;
} | 3.26 |
flink_MathUtils_log2floor_rdh | /**
* Computes the logarithm of the given value to the base of 2, rounded down. It corresponds to
* the position of the highest non-zero bit. The position is counted, starting with 0 from the
* least significant bit to the most significant bit. For example, <code>log2floor(16) = 4
* </code>, and <code>log2floor(10) = 3</code>.
*
* @param value
* The value to compute the logarithm for.
* @return The logarithm (rounded down) to the base of 2.
* @throws ArithmeticException
* Thrown, if the given value is zero.
*/
public static int log2floor(int value) throws
ArithmeticException {
if (value == 0) {
throw new ArithmeticException("Logarithm of zero is undefined.");
}
return 31 - Integer.numberOfLeadingZeros(value);
} | 3.26 |
flink_MathUtils_log2strict_rdh | /**
* Computes the logarithm of the given value to the base of 2. This method throws an error, if
* the given argument is not a power of 2.
*
* @param value
* The value to compute the logarithm for.
* @return The logarithm to the base of 2.
* @throws ArithmeticException
* Thrown, if the given value is zero.
* @throws IllegalArgumentException
* Thrown, if the given value is not a power of two.
*/
public static int log2strict(int value) throws ArithmeticException, IllegalArgumentException {
if (value == 0) {
throw new ArithmeticException("Logarithm of zero is undefined.");
}
if ((value & (value - 1)) != 0) {throw new IllegalArgumentException(("The given value " + value) + " is not a power of two.");
}
return 31 - Integer.numberOfLeadingZeros(value);
} | 3.26 |
flink_MathUtils_longToIntWithBitMixing_rdh | /**
* Pseudo-randomly maps a long (64-bit) to an integer (32-bit) using some bit-mixing for better
* distribution.
*
* @param in
* the long (64-bit)input.
* @return the bit-mixed int (32-bit) output
*/
public static int longToIntWithBitMixing(long in) {
in = (in ^ (in >>> 30)) * 0xbf58476d1ce4e5b9L;
in = (in ^
(in >>> 27)) * 0x94d049bb133111ebL;
in = in ^ (in >>> 31);
return ((int) (in));
} | 3.26 |
flink_MathUtils_murmurHash_rdh | /**
* This function hashes an integer value.
*
* <p>It is crucial to use different hash functions to partition data across machines and the
* internal partitioning of data structures. This hash function is intended for partitioning
* across machines.
*
* @param code
* The integer to be hashed.
* @return The non-negative hash code for the integer.
*/
public static int murmurHash(int code) {
code *= 0xcc9e2d51;
code = Integer.rotateLeft(code, 15);
code *= 0x1b873593;
code = Integer.rotateLeft(code, 13);
code = (code * 5) + 0xe6546b64;
code ^= 4;
code = bitMix(code);
if (code >= 0) {
return code;
} else if (code != Integer.MIN_VALUE) {
return -code;
} else {
return 0;
}} | 3.26 |
flink_MathUtils_flipSignBit_rdh | /**
* Flips the sign bit (most-significant-bit) of the input.
*
* @param in
* the input value.
* @return the input with a flipped sign bit (most-significant-bit).
*/
public static long flipSignBit(long in) {
return in ^ Long.MIN_VALUE;
} | 3.26 |
flink_MathUtils_checkedDownCast_rdh | /**
* Casts the given value to a 32 bit integer, if it can be safely done. If the cast would change
* the numeric value, this method raises an exception.
*
* <p>This method is a protection in places where one expects to be able to safely case, but
* where unexpected situations could make the cast unsafe and would cause hidden problems that
* are hard to track down.
*
* @param value
* The value to be cast to an integer.
* @return The given value as an integer.
* @see Math#toIntExact(long)
*/
public static int checkedDownCast(long value) {
int downCast = ((int) (value));
if (downCast != value) {
throw new IllegalArgumentException(("Cannot downcast long value " + value) + " to integer.");
}
return downCast;
} | 3.26 |
flink_MathUtils_jenkinsHash_rdh | /**
* This function hashes an integer value. It is adapted from Bob Jenkins' website <a
* href="http://www.burtleburtle.net/bob/hash/integer.html">http://www.burtleburtle.net/bob/hash/integer.html</a>.
* The hash function has the <i>full avalanche</i> property, meaning that every bit of the value
* to be hashed affects every bit of the hash value.
*
* <p>It is crucial to use different hash functions to partition data across machines and the
* internal partitioning of data structures. This hash function is intended for partitioning
* internally in data structures.
*
* @param code
* The integer to be hashed.
* @return The non-negative hash code for the integer.
*/
public static int jenkinsHash(int code) {
code = (code + 0x7ed55d16) + (code << 12);
code = (code ^ 0xc761c23c) ^ (code >>> 19);
code = (code + 0x165667b1) +
(code << 5);
code = (code + 0xd3a2646c) ^ (code << 9);
code = (code + 0xfd7046c5) + (code << 3);
code = (code ^ 0xb55a4f09) ^ (code >>> 16);
return code >=
0 ? code : -(code + 1);
} | 3.26 |
flink_MathUtils_divideRoundUp_rdh | /**
* Divide and rounding up to integer. E.g., divideRoundUp(3, 2) returns 2, divideRoundUp(0, 3)
* returns 0. Note that this method does not support negative values.
*
* @param dividend
* value to be divided by the divisor
* @param divisor
* value by which the dividend is to be divided
* @return the quotient rounding up to integer
*/
public static int divideRoundUp(int dividend, int divisor) {Preconditions.checkArgument(dividend >= 0, "Negative dividend is not supported.");
Preconditions.checkArgument(divisor > 0, "Negative or zero divisor is not supported.");
return dividend == 0 ? 0 : ((dividend - 1) / divisor) + 1;
} | 3.26 |
flink_MathUtils_bitMix_rdh | /**
* Bit-mixing for pseudo-randomization of integers (e.g., to guard against bad hash functions).
* Implementation is from Murmur's 32 bit finalizer.
*
* @param in
* the input value
* @return the bit-mixed output value
*/
public static int bitMix(int in) {
in ^= in >>> 16;
in *= 0x85ebca6b;
in ^= in >>> 13;
in *= 0xc2b2ae35;
in ^= in >>> 16;
return in;
} | 3.26 |
flink_CollectSink_open_rdh | /**
* Initialize the connection with the Socket in the server.
*
* @param openContext
* the context.
*/@Override
public void open(OpenContext openContext) throws Exception {
try {
client = new Socket(hostIp, port);
outputStream = client.getOutputStream();
streamWriter = new DataOutputViewStreamWrapper(outputStream);
} catch (IOException e) {
throw new IOException((("Cannot get back the stream while opening connection to client at " + hostIp.toString()) + ":") + port, e);
}
} | 3.26 |
flink_CollectSink_close_rdh | /**
* Closes the connection with the Socket server.
*/
@Override
public void close() throws Exception {
try {
if (outputStream != null) {
outputStream.flush();
outputStream.close();
}
// first regular attempt to cleanly close. Failing that will escalate
if (client
!= null) {
client.close();
}
} catch (Exception e) {
throw new IOException((("Error while closing connection that streams data back to client at " + hostIp.toString()) + ":") + port, e);
} finally {
// if we failed prior to closing the client, close it
if (client != null) {
try {
client.close();
} catch (Throwable t) {
// best effort to close, we do not care about an exception here any more
}
}
}
} | 3.26 |
flink_HadoopFileSystem_getKindForScheme_rdh | /**
* Gets the kind of the file system from its scheme.
*
* <p>Implementation note: Initially, especially within the Flink 1.3.x line (in order to not
* break backwards compatibility), we must only label file systems as 'inconsistent' or as 'not
* proper filesystems' if we are sure about it. Otherwise, we cause regression for example in
* the performance and cleanup handling of checkpoints. For that reason, we initially mark some
* filesystems as 'eventually consistent' or as 'object stores', and leave the others as
* 'consistent file systems'.
*/
static FileSystemKind getKindForScheme(String scheme) {
scheme = scheme.toLowerCase(Locale.US);
if ((((scheme.startsWith("s3")
|| scheme.startsWith("emr")) || scheme.startsWith("oss")) || scheme.startsWith("wasb")) || scheme.startsWith("gs")) {
// the Amazon S3 storage or Aliyun OSS storage or Azure Blob Storage
// or Google Cloud Storage
return FileSystemKind.OBJECT_STORE;
} else if (scheme.startsWith("http") || scheme.startsWith("ftp")) {
// file servers instead of file systems
// they might actually be consistent, but we have no hard guarantees
// currently to rely on that
return FileSystemKind.OBJECT_STORE;
} else {
// the remainder should include hdfs, kosmos, ceph, ...
// this also includes federated HDFS (viewfs).
return FileSystemKind.FILE_SYSTEM;
}
} | 3.26 |
flink_HadoopFileSystem_getWorkingDirectory_rdh | // ------------------------------------------------------------------------
// file system methods
// ------------------------------------------------------------------------
@Override
public Path getWorkingDirectory() {
return new Path(this.fs.getWorkingDirectory().toUri());
} | 3.26 |
flink_HadoopFileSystem_getHadoopFileSystem_rdh | /**
* Gets the underlying Hadoop FileSystem.
*
* @return The underlying Hadoop FileSystem.
*/
public FileSystem getHadoopFileSystem() {
return this.fs;
} | 3.26 |
flink_HadoopFileSystem_toHadoopPath_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
public static Path toHadoopPath(Path path) {
return new Path(path.toUri());
} | 3.26 |
flink_TwoInputOperator_getInput2Type_rdh | /**
* Gets the type information of the data type of the second input data set. This method returns
* equivalent information as {@code getInput2().getType()}.
*
* @return The second input data type.
*/
public TypeInformation<IN2> getInput2Type() {
return this.input2.getType(); } | 3.26 |
flink_TwoInputOperator_getInput1_rdh | /**
* Gets the data set that this operation uses as its first input.
*
* @return The data set that this operation uses as its first input.
*/
public DataSet<IN1> getInput1() {
return this.input1;
} | 3.26 |
flink_DataStreamStateTTLTestProgram_setBackendWithCustomTTLTimeProvider_rdh | /**
* Sets the state backend to a new {@link StubStateBackend} which has a {@link MonotonicTTLTimeProvider}.
*
* @param env
* The {@link StreamExecutionEnvironment} of the job.
*/
private static void setBackendWithCustomTTLTimeProvider(StreamExecutionEnvironment env) {
final MonotonicTTLTimeProvider ttlTimeProvider = new MonotonicTTLTimeProvider();
final StateBackend configuredBackend = env.getStateBackend();
final StateBackend stubBackend = new StubStateBackend(configuredBackend, ttlTimeProvider);
env.setStateBackend(stubBackend);
} | 3.26 |
flink_OperatorCoordinatorCheckpoints_acknowledgeAllCoordinators_rdh | // ------------------------------------------------------------------------
private static void acknowledgeAllCoordinators(PendingCheckpoint checkpoint, Collection<CoordinatorSnapshot> snapshots)
throws CheckpointException {
for (final CoordinatorSnapshot snapshot : snapshots) {final PendingCheckpoint.TaskAcknowledgeResult v6 = checkpoint.acknowledgeCoordinatorState(snapshot.coordinator, snapshot.state);
if (v6 != TaskAcknowledgeResult.SUCCESS) {
final String errorMessage = "Coordinator state not acknowledged successfully: " + v6;
final Throwable error = (checkpoint.isDisposed()) ? checkpoint.getFailureCause() : null;
CheckpointFailureReason reason = CheckpointFailureReason.TRIGGER_CHECKPOINT_FAILURE;
if (error != null) {
final Optional<IOException> ioExceptionOptional = ExceptionUtils.findThrowable(error, IOException.class);
if (ioExceptionOptional.isPresent()) {
reason = CheckpointFailureReason.IO_EXCEPTION;
}
throw new CheckpointException(errorMessage, reason, error);
} else {
throw new CheckpointException(errorMessage, reason);
}
}
}
} | 3.26 |
flink_StatsSummary_getAverage_rdh | /**
* Calculates the average over all seen values.
*
* @return Average over all seen values.
*/
public long
getAverage() {
if (count == 0) {
return 0;
} else {
return sum / count;
}
} | 3.26 |
flink_StatsSummary_m0_rdh | /**
* Returns a snapshot of the current state.
*
* @return A snapshot of the current state.
*/
public StatsSummarySnapshot m0() {
return new StatsSummarySnapshot(min, max, sum, count, histogram == null ? null : histogram.getStatistics());
} | 3.26 |
flink_StatsSummary_add_rdh | /**
* Adds the value to the stats if it is >= 0.
*
* @param value
* Value to add for min/max/avg stats..
*/
void add(long value) {
if (value >= 0) {
if (count > 0) {
min = Math.min(min,
value);
max = Math.max(max, value);
} else {
min = value;
max = value;
}
count++;
sum +=
value;
if (histogram != null) {
histogram.update(value);
}
}
} | 3.26 |
flink_StatsSummary_getMinimum_rdh | /**
* Returns the minimum seen value.
*
* @return The current minimum value.
*/
public long getMinimum() {
return min;
} | 3.26 |
flink_StatsSummary_getCount_rdh | /**
* Returns the count of all seen values.
*
* @return Count of all values.
*/
public long getCount() {
return count;
} | 3.26 |
flink_OperationUtils_indent_rdh | /**
* Increases indentation for description of string of child {@link Operation}. The input can
* already contain indentation. This will increase all the indentations by one level.
*
* @param item
* result of {@link Operation#asSummaryString()}
* @return string with increased indentation
*/
static String indent(String item) {
return ("\n" + OPERATION_INDENT) + item.replace("\n" + OPERATION_INDENT, ("\n" + OPERATION_INDENT) + OPERATION_INDENT);
} | 3.26 |
flink_OperationUtils_formatWithChildren_rdh | /**
* Formats a Tree of {@link Operation} in a unified way. It prints all the parameters and adds
* all children formatted and properly indented in the following lines.
*
* <p>The format is
*
* <pre>{@code <operationName>: [(key1: [value1], key2: [v1, v2])]
* <child1>
* <child2>
* <child3>}</pre>
*
* @param operationName
* The operation name.
* @param parameters
* The operation's parameters.
* @param children
* The operation's children.
* @param childToString
* The function to convert child to String.
* @param <T>
* The type of the child.
* @return String representation of the given operation.
*/
public static <T extends Operation> String formatWithChildren(String operationName, Map<String, Object> parameters, List<T> children, Function<T, String> childToString) {
String description = parameters.entrySet().stream().map(entry -> formatParameter(entry.getKey(), entry.getValue())).collect(Collectors.joining(", "));
final StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append(operationName).append(":");
if (!StringUtils.isNullOrWhitespaceOnly(description)) {
stringBuilder.append(" (").append(description).append(")");
}
String childrenDescription = children.stream().map(child
-> OperationUtils.indent(childToString.apply(child))).collect(Collectors.joining());
return stringBuilder.append(childrenDescription).toString();
} | 3.26 |
flink_CheckpointedPosition_getOffset_rdh | /**
* Gets the offset that the reader will seek to when restored from this checkpoint.
*/
public long getOffset() {
return offset;} | 3.26 |
flink_CheckpointedPosition_getRecordsAfterOffset_rdh | /**
* Gets the records to skip after the offset.
*/
public long getRecordsAfterOffset() {
return recordsAfterOffset;
} | 3.26 |
flink_CheckpointedPosition_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}
final CheckpointedPosition that = ((CheckpointedPosition) (o));
return (offset
== that.offset) && (recordsAfterOffset == that.recordsAfterOffset);
} | 3.26 |
flink_LongValue_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/
@Override
public String toString() {
return String.valueOf(this.value);
} | 3.26 |
flink_LongValue_setValue_rdh | /**
* Sets the value of the encapsulated long to the specified value.
*
* @param value
* The new value of the encapsulated long.
*/
public void setValue(final long value) {
this.value = value;
} | 3.26 |
flink_LongValue_read_rdh | // --------------------------------------------------------------------------------------------
@Override
public void read(final DataInputView in) throws IOException {
this.value = in.readLong();
} | 3.26 |
flink_LongValue_compareTo_rdh | // --------------------------------------------------------------------------------------------
@Override
public int compareTo(LongValue o) {final long other = o.value;
return this.value < other
? -1 : this.value > other ? 1 : 0;
} | 3.26 |
flink_LongValue_getBinaryLength_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() {
return 8;
} | 3.26 |
flink_LongValue_getMaxNormalizedKeyLen_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getMaxNormalizedKeyLen() {
return 8;
} | 3.26 |
flink_LongValue_equals_rdh | /* (non-Javadoc)
@see java.lang.Object#equals(java.lang.Object)
*/
@Overridepublic boolean equals(final Object obj) {
if (obj instanceof LongValue) {
return ((LongValue) (obj)).value == this.value;
}
return false;
} | 3.26 |
flink_SkipListUtils_helpSetPrevNode_rdh | /**
* Set the previous node of the given node at the given level. The level must be positive.
*
* @param node
* the node.
* @param prevNode
* the previous node to set.
* @param level
* the level to find the next node.
* @param spaceAllocator
* the space allocator.
*/
static void helpSetPrevNode(long node, long prevNode, int level, Allocator spaceAllocator) {
Preconditions.checkArgument(level > 0, "only index level have previous node");
if ((node == HEAD_NODE) || (node == NIL_NODE)) {
return;
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
int topLevel = getLevel(segment, offsetInByteBuffer);
putPrevIndexNode(segment, offsetInByteBuffer, topLevel, level, prevNode);
} | 3.26 |
flink_SkipListUtils_putValueData_rdh | /**
* Puts the value data into value space.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
* @param value
* value data.
*/
public static void putValueData(MemorySegment memorySegment, int offset, byte[] value) {
MemorySegment valueSegment = MemorySegmentFactory.wrap(value);
valueSegment.copyTo(0, memorySegment, offset + getValueMetaLen(),
value.length);
} | 3.26 |
flink_SkipListUtils_buildLevelIndex_rdh | /**
* Build the level index for the given node.
*
* @param node
* the node.
* @param level
* level of the node.
* @param keySegment
* memory segment of the key in the node.
* @param keyOffset
* offset of the key in memory segment.
* @param levelIndexHeader
* the head level index.
* @param spaceAllocator
* the space allocator.
*/
static void buildLevelIndex(long node, int level, MemorySegment keySegment, int keyOffset, LevelIndexHeader levelIndexHeader, Allocator spaceAllocator) {
int
currLevel = level;
long v54 = findPredecessor(keySegment, keyOffset, currLevel, levelIndexHeader, spaceAllocator);
long currentNode = helpGetNextNode(v54, currLevel, levelIndexHeader, spaceAllocator);
for (; ;) {
if (currentNode !=
NIL_NODE) {
int c = compareSegmentAndNode(keySegment, keyOffset, currentNode, spaceAllocator);
if (c > 0) {
v54 = currentNode;
currentNode = helpGetNextNode(currentNode, currLevel, levelIndexHeader, spaceAllocator);
continue;
}
}
helpSetPrevAndNextNode(node, v54, currentNode, currLevel, spaceAllocator);
helpSetNextNode(v54, node, currLevel, levelIndexHeader, spaceAllocator);
helpSetPrevNode(currentNode, node, currLevel, spaceAllocator);
currLevel--;
if (currLevel == 0) {
break;
}
currentNode = helpGetNextNode(v54, currLevel, levelIndexHeader, spaceAllocator);
}
} | 3.26 |
flink_SkipListUtils_helpGetNodeLatestVersion_rdh | /**
* Return of the newest version of value for the node.
*
* @param node
* the node.
* @param spaceAllocator
* the space allocator.
*/
static int helpGetNodeLatestVersion(long node, Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk =
SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
long valuePointer = getValuePointer(segment, offsetInByteBuffer);
return helpGetValueVersion(valuePointer, spaceAllocator);} | 3.26 |
flink_SkipListUtils_getValuePointer_rdh | /**
* Returns the value pointer.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
*/
public static long getValuePointer(MemorySegment memorySegment, int offset) {return memorySegment.getLong(offset + VALUE_POINTER_OFFSET);
} | 3.26 |
flink_SkipListUtils_helpGetNextNode_rdh | /**
* Return the next of the given node at the given level.
*
* @param node
* the node to find the next node for.
* @param level
* the level to find the next node.
* @param levelIndexHeader
* the header of the level index.
* @param spaceAllocator
* the space allocator.
* @return the pointer to the next node of the given node at the given level.
*/
static long helpGetNextNode(long node, int level, LevelIndexHeader levelIndexHeader, Allocator spaceAllocator) {
if (node == HEAD_NODE) {
return levelIndexHeader.getNextNode(level);
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer
= chunk.getOffsetInSegment(offsetInChunk);
return level == 0 ? getNextKeyPointer(segment, offsetInByteBuffer) : getNextIndexNode(segment, offsetInByteBuffer, level);
} | 3.26 |
flink_SkipListUtils_getKeyLen_rdh | /**
* Returns the length of the key.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
*/
public static int getKeyLen(MemorySegment
memorySegment, int offset) {
return memorySegment.getInt(offset + KEY_LEN_OFFSET);
} | 3.26 |
flink_SkipListUtils_helpGetValuePointer_rdh | /**
* Returns the value pointer of the node.
*
* @param node
* the node.
* @param spaceAllocator
* the space allocator.
*/
static long helpGetValuePointer(long node, Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
return getValuePointer(segment, offsetInByteBuffer);
} | 3.26 |
flink_SkipListUtils_helpSetPrevAndNextNode_rdh | /**
* Set the previous node and the next node of the given node at the given level. The level must
* be positive.
*
* @param node
* the node.
* @param prevNode
* the previous node to set.
* @param nextNode
* the next node to set.
* @param level
* the level to find the next node.
* @param spaceAllocator
* the space allocator.
*/
static void helpSetPrevAndNextNode(long node, long prevNode, long nextNode, int level, Allocator spaceAllocator) {Preconditions.checkArgument(node != HEAD_NODE, "head node does not have previous node");
Preconditions.checkArgument(level > 0, "only index level have previous node");
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
int topLevel = getLevel(segment, offsetInByteBuffer);
putNextIndexNode(segment, offsetInByteBuffer, level, nextNode);
putPrevIndexNode(segment, offsetInByteBuffer, topLevel, level, prevNode);
} | 3.26 |
flink_SkipListUtils_putNextIndexNode_rdh | /**
* Puts next key pointer on the given index level to key space.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param level
* level of index.
* @param nextKeyPointer
* next key pointer on the given level.
*/
public static void putNextIndexNode(MemorySegment memorySegment, int offset, int level, long nextKeyPointer) {
memorySegment.putLong(offset + INDEX_NEXT_OFFSET_BY_LEVEL_ARRAY[level], nextKeyPointer);
} | 3.26 |
flink_SkipListUtils_removeLevelIndex_rdh | /**
* Remove the level index for the node from the skip list.
*
* @param node
* the node.
* @param spaceAllocator
* the space allocator.
* @param levelIndexHeader
* the head level index.
*/
static void removeLevelIndex(long node, Allocator spaceAllocator, LevelIndexHeader levelIndexHeader) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
int level =
getLevel(segment, offsetInByteBuffer);
for (int i = 1; i <= level; i++) {
long prevNode = getPrevIndexNode(segment, offsetInByteBuffer, level, i);
long nextNode = getNextIndexNode(segment, offsetInByteBuffer, i);
helpSetNextNode(prevNode, nextNode, i, levelIndexHeader, spaceAllocator);
helpSetPrevNode(nextNode, prevNode, i, spaceAllocator);
}
} | 3.26 |
flink_SkipListUtils_getPrevIndexNode_rdh | /**
* Returns previous key pointer on the given index level.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param totalLevel
* the level of the node.
* @param level
* on which level to get the previous key pointer of the node.
*/
public static long getPrevIndexNode(MemorySegment memorySegment, int offset, int totalLevel, int level) {
int of = getIndexOffset(offset, totalLevel, level);
return memorySegment.getLong(of);
} | 3.26 |
flink_SkipListUtils_getKeyPointer_rdh | /**
* Return the pointer to key space.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
*/
public static long getKeyPointer(MemorySegment memorySegment, int offset) {
return memorySegment.getLong(offset + KEY_POINTER_OFFSET);
} | 3.26 |
flink_SkipListUtils_getLevel_rdh | /**
* Returns the level of the node.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
*/
public static int getLevel(MemorySegment memorySegment, int offset) {
return memorySegment.getInt(offset + KEY_META_OFFSET) & BYTE_MASK;
} | 3.26 |
flink_SkipListUtils_getValueLen_rdh | /**
* Return the length of value data.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
*/
public static int getValueLen(MemorySegment memorySegment, int offset) {
return memorySegment.getInt(offset + VALUE_LEN_OFFSET);
} | 3.26 |
flink_SkipListUtils_putPrevIndexNode_rdh | /**
* Puts previous key pointer on the given index level to key space.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param totalLevel
* top level of the key.
* @param level
* level of index.
* @param prevKeyPointer
* previous key pointer on the given level.
*/
public static void putPrevIndexNode(MemorySegment memorySegment, int offset, int totalLevel, int level, long prevKeyPointer) {
int of = getIndexOffset(offset, totalLevel, level);
memorySegment.putLong(of, prevKeyPointer);
} | 3.26 |
flink_SkipListUtils_getNextIndexNode_rdh | /**
* Returns next key pointer on the given index level.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param level
* level of index.
*/
public static long getNextIndexNode(MemorySegment memorySegment, int offset, int level) {
return memorySegment.getLong(offset + INDEX_NEXT_OFFSET_BY_LEVEL_ARRAY[level]);
} | 3.26 |
flink_SkipListUtils_getValueVersion_rdh | /**
* Returns the version of value.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
*/
public static int getValueVersion(MemorySegment memorySegment, int offset) {
return memorySegment.getInt(offset + VALUE_VERSION_OFFSET);
} | 3.26 |
flink_SkipListUtils_putNextKeyPointer_rdh | /**
* Puts the next key pointer on level 0 to key space.
*
* @param memorySegment
* memory segment for key space.
* @param offset
* offset of key space in the memory segment.
* @param nextKeyPointer
* next key pointer on level 0.
*/
public static void putNextKeyPointer(MemorySegment memorySegment, int offset, long nextKeyPointer) {
memorySegment.putLong(offset + f0, nextKeyPointer);
} | 3.26 |
flink_SkipListUtils_getNextValuePointer_rdh | /**
* Return the pointer to next value space.
*
* @param memorySegment
* memory segment for value space.
* @param offset
* offset of value space in memory segment.
*/
public static long getNextValuePointer(MemorySegment memorySegment, int
offset) {
return memorySegment.getLong(offset + NEXT_VALUE_POINTER_OFFSET);
} | 3.26 |
flink_SkipListUtils_helpGetValueVersion_rdh | /**
* Returns the version of the value.
*
* @param valuePointer
* the pointer to the value.
* @param spaceAllocator
* the space allocator.
*/
static int helpGetValueVersion(long valuePointer, Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(valuePointer));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(valuePointer);
MemorySegment v72 = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
return getValueVersion(v72, offsetInByteBuffer);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.