name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_HiveDDLUtils_disableConstraint_rdh
// returns a constraint trait that doesn't require ENABLE public static byte disableConstraint(byte trait) { return ((byte) (trait & (~HIVE_CONSTRAINT_ENABLE))); }
3.26
flink_HiveDDLUtils_enableConstraint_rdh
// returns a constraint trait that requires ENABLE public static byte enableConstraint(byte trait) { return ((byte) (trait | HIVE_CONSTRAINT_ENABLE)); }
3.26
flink_HiveDDLUtils_defaultTrait_rdh
// a constraint is by default ENABLE NOVALIDATE RELY public static byte defaultTrait() { byte res = enableConstraint(((byte) (0))); res = relyConstraint(res); return res; }
3.26
flink_CheckpointStatsHistory_createSnapshot_rdh
/** * Creates a snapshot of the current state. * * @return Snapshot of the current state. */ CheckpointStatsHistory createSnapshot() { if (readOnly) { throw new UnsupportedOperationException("Can't create a snapshot of a read-only history."); } List<AbstractCheckpointStats> checkpointsHistory; Map<Long, AbstractCheckpointStats> v1;v1 = CollectionUtil.newHashMapWithExpectedSize(checkpointsArray.length); if (maxSize == 0) { checkpointsHistory = Collections.emptyList(); } else { AbstractCheckpointStats[] newCheckpointsArray = new AbstractCheckpointStats[checkpointsArray.length]; System.arraycopy(checkpointsArray, nextPos, newCheckpointsArray, 0, checkpointsArray.length - nextPos); System.arraycopy(checkpointsArray, 0, newCheckpointsArray, checkpointsArray.length - nextPos, nextPos); checkpointsHistory = Arrays.asList(newCheckpointsArray); // reverse the order such that we start with the youngest checkpoint Collections.reverse(checkpointsHistory); for (AbstractCheckpointStats checkpoint : checkpointsHistory) { v1.put(checkpoint.getCheckpointId(), checkpoint); } } if (latestCompletedCheckpoint != null) { v1.put(latestCompletedCheckpoint.getCheckpointId(), latestCompletedCheckpoint); } if (latestFailedCheckpoint != null) {v1.put(latestFailedCheckpoint.getCheckpointId(), latestFailedCheckpoint); } if (latestSavepoint != null) { v1.put(latestSavepoint.getCheckpointId(), latestSavepoint); } return new CheckpointStatsHistory(true, maxSize, null, checkpointsHistory, v1, latestCompletedCheckpoint, latestFailedCheckpoint, latestSavepoint); }
3.26
flink_CheckpointStatsHistory_addInProgressCheckpoint_rdh
/** * Adds an in progress checkpoint to the checkpoint history. * * @param pending * In progress checkpoint to add. */ void addInProgressCheckpoint(PendingCheckpointStats pending) { if (readOnly) { throw new UnsupportedOperationException("Can't create a snapshot of a read-only history."); } if (maxSize == 0) { return; } checkNotNull(pending, "Pending checkpoint"); // Grow the array if required. This happens only for the first entries // and makes the iterator logic easier, because we don't have any // null elements with the growing array. if (checkpointsArray.length < maxSize) { checkpointsArray = Arrays.copyOf(checkpointsArray, checkpointsArray.length + 1); } // Wrap around if we are at the end. The next pos is the least recently // added checkpoint. if (nextPos == checkpointsArray.length) { nextPos = 0; } checkpointsArray[nextPos++] = pending; recentCheckpoints.put(pending.checkpointId, pending); }
3.26
flink_CheckpointStatsHistory_replacePendingCheckpointById_rdh
/** * Searches for the in progress checkpoint with the given ID and replaces it with the given * completed or failed checkpoint. * * <p>This is bounded by the maximum number of concurrent in progress checkpointsArray, which * means that the runtime of this is constant. * * @param completedOrFailed * The completed or failed checkpoint to replace the in progress * checkpoint with. * @return <code>true</code> if the checkpoint was replaced or <code>false</code> otherwise. */ boolean replacePendingCheckpointById(AbstractCheckpointStats completedOrFailed) { checkArgument(!completedOrFailed.getStatus().isInProgress(), "Not allowed to replace with in progress checkpoints."); if (readOnly) { throw new UnsupportedOperationException("Can't create a snapshot of a read-only history."); } // Update the latest checkpoint stats if (completedOrFailed.getStatus().isCompleted()) { CompletedCheckpointStats completed = ((CompletedCheckpointStats) (completedOrFailed)); if (completed.getProperties().isSavepoint() && ((latestSavepoint == null) || (completed.getCheckpointId() > latestSavepoint.getCheckpointId()))) { latestSavepoint = completed; } else if ((latestCompletedCheckpoint == null) || (completed.getCheckpointId() > latestCompletedCheckpoint.getCheckpointId())) { latestCompletedCheckpoint = completed; } } else if (completedOrFailed.getStatus().isFailed()) { FailedCheckpointStats failed = ((FailedCheckpointStats) (completedOrFailed)); if ((latestFailedCheckpoint == null) || (failed.getCheckpointId() > latestFailedCheckpoint.getCheckpointId())) { latestFailedCheckpoint = failed; } } if (maxSize == 0) { return false; } long checkpointId = completedOrFailed.getCheckpointId(); recentCheckpoints.computeIfPresent(checkpointId, (unusedKey, unusedValue) -> completedOrFailed); // We start searching from the last inserted position. Since the entries // wrap around the array we search until we are at index 0 and then from // the end of the array until (start pos + 1). int startPos = (nextPos == checkpointsArray.length) ? checkpointsArray.length - 1 : nextPos - 1; for (int i = startPos; i >= 0; i--) { if (checkpointsArray[i].getCheckpointId() == checkpointId) { checkpointsArray[i] = completedOrFailed; return true; } } for (int i = checkpointsArray.length - 1; i > startPos; i--) { if (checkpointsArray[i].getCheckpointId() == checkpointId) { checkpointsArray[i] = completedOrFailed; return true; } } return false; }
3.26
flink_FileSystemJobResultStore_constructDirtyPath_rdh
/** * Given a job ID, construct the path for a dirty entry corresponding to it in the job result * store. * * @param jobId * The job ID to construct a dirty entry path from. * @return A path for a dirty entry for the given the Job ID. */ private Path constructDirtyPath(JobID jobId) { return constructEntryPath(jobId.toString() + DIRTY_FILE_EXTENSION); }
3.26
flink_FileSystemJobResultStore_constructCleanPath_rdh
/** * Given a job ID, construct the path for a clean entry corresponding to it in the job result * store. * * @param jobId * The job ID to construct a clean entry path from. * @return A path for a clean entry for the given the Job ID. */ private Path constructCleanPath(JobID jobId) { return constructEntryPath(jobId.toString() + FILE_EXTENSION); }
3.26
flink_RichOrCondition_getLeft_rdh
/** * * @return One of the {@link IterativeCondition conditions} combined in this condition. */ public IterativeCondition<T> getLeft() { return getNestedConditions()[0]; }
3.26
flink_RichOrCondition_getRight_rdh
/** * * @return One of the {@link IterativeCondition conditions} combined in this condition. */ public IterativeCondition<T> getRight() { return getNestedConditions()[1]; }
3.26
flink_SinkV2Provider_of_rdh
/** * Helper method for creating a Sink provider with a provided sink parallelism. */ static SinkV2Provider of(Sink<RowData> sink, @Nullable Integer sinkParallelism) { return new SinkV2Provider() { @Override public Sink<RowData> createSink() { return sink; } @Override public Optional<Integer> getParallelism() { return Optional.ofNullable(sinkParallelism); } }; }
3.26
flink_LogicalTypeMerging_findModuloDecimalType_rdh
/** * Finds the result type of a decimal modulo operation. */ public static DecimalType findModuloDecimalType(int precision1, int scale1, int precision2, int scale2) { final int scale = Math.max(scale1, scale2); int precision = Math.min(precision1 - scale1, precision2 - scale2) + scale; return m1(precision, scale); }
3.26
flink_LogicalTypeMerging_m1_rdh
// -------------------------------------------------------------------------------------------- /** * Scale adjustment implementation is inspired to SQLServer's one. In particular, when a result * precision is greater than MAX_PRECISION, the corresponding scale is reduced to prevent the * integral part of a result from being truncated. * * <p>https://docs.microsoft.com/en-us/sql/t-sql/data-types/precision-scale-and-length-transact-sql * * <p>The rules (although inspired by SQL Server) are not followed 100%, instead the approach of * Spark/Hive is followed for adjusting the precision. * * <p>http://www.openkb.info/2021/05/understand-decimal-precision-and-scale.html * * <p>For (38, 8) + (32, 8) -> (39, 8) (The rules for addition, initially calculate a decimal * type, assuming its precision is infinite) results in a decimal with integral part of 31 * digits. * * <p>This method is called subsequently to adjust the resulting decimal since the maximum * allowed precision is 38 (so far a precision of 39 is calculated in the first step). So, the * rounding for SQL Server would be: (39, 8) -> (38, 8) // integral part: 30, but instead we * follow the Hive/Spark approach which gives: (39, 8) -> (38, 7) // integral part: 31 */ private static DecimalType m1(int precision, int scale) { if (precision <= DecimalType.MAX_PRECISION) { // Adjustment only needed when we exceed max precision return new DecimalType(false, precision, scale); } else { int v20 = precision - scale; // If original scale is less than MINIMUM_ADJUSTED_SCALE, use original scale value; // otherwise preserve at least MINIMUM_ADJUSTED_SCALE fractional digits int minScalePart = Math.min(scale, MINIMUM_ADJUSTED_SCALE); int adjustScale = Math.max(DecimalType.MAX_PRECISION - v20, minScalePart); return new DecimalType(false, DecimalType.MAX_PRECISION, adjustScale); } }
3.26
flink_LogicalTypeMerging_findMultiplicationDecimalType_rdh
/** * Finds the result type of a decimal multiplication operation. */ public static DecimalType findMultiplicationDecimalType(int precision1, int scale1, int precision2, int scale2) { int scale = scale1 + scale2; int precision = (precision1 + precision2) + 1; return m1(precision, scale); }
3.26
flink_LogicalTypeMerging_m0_rdh
/** * Finds the result type of a decimal sum aggregation. */ public static LogicalType m0(LogicalType argType) { // adopted from // https://docs.microsoft.com/en-us/sql/t-sql/functions/sum-transact-sql final LogicalType resultType; if (argType.is(DECIMAL)) { // a hack to make legacy types possible until we drop them if (argType instanceof LegacyTypeInformationType) { return argType; } resultType = new DecimalType(false, 38, getScale(argType)); } else { resultType = argType; } return resultType.copy(argType.isNullable()); }
3.26
flink_LogicalTypeMerging_findCommonType_rdh
/** * Returns the most common, more general {@link LogicalType} for a given set of types. If such a * type exists, all given types can be casted to this more general type. * * <p>For example: {@code [INT, BIGINT, DECIMAL(2, 2)]} would lead to {@code DECIMAL(21, 2)}. * * <p>This class aims to be compatible with the SQL standard. It is inspired by Apache Calcite's * {@code SqlTypeFactoryImpl#leastRestrictive} method. */ public static Optional<LogicalType> findCommonType(List<LogicalType> types) { Preconditions.checkArgument(types.size() > 0, "List of types must not be empty.");// collect statistics first boolean hasRawType = false; boolean hasNullType = false; boolean hasNullableTypes = false; for (LogicalType v5 : types) { final LogicalTypeRoot v6 = v5.getTypeRoot(); if (v6 == RAW) { hasRawType = true; } else if (v6 == NULL) { hasNullType = true; } if (v5.isNullable()) { hasNullableTypes = true; } } final List<LogicalType> normalizedTypes = types.stream().map(t -> t.copy(true)).collect(Collectors.toList()); LogicalType foundType = findCommonNullableType(normalizedTypes, hasRawType, hasNullType); if (foundType == null) { foundType = findCommonCastableType(normalizedTypes); } if (foundType != null) { final LogicalType typeWithNullability = foundType.copy(hasNullableTypes); // NULL is reserved for untyped literals only if (typeWithNullability.is(NULL)) { return Optional.empty(); } return Optional.of(typeWithNullability); } return Optional.empty(); }
3.26
flink_LogicalTypeMerging_findAvgAggType_rdh
/** * Finds the result type of a decimal average aggregation. */public static LogicalType findAvgAggType(LogicalType argType) { final LogicalType resultType; if (argType.is(DECIMAL)) { // a hack to make legacy types possible until we drop them if (argType instanceof LegacyTypeInformationType) { return argType; } // adopted from // https://docs.microsoft.com/en-us/sql/t-sql/functions/avg-transact-sql // however, we count by BIGINT, therefore divide by DECIMAL(20,0), // but the end result is actually the same, which is DECIMAL(38, MAX(6, s)). resultType = LogicalTypeMerging.findDivisionDecimalType(38, getScale(argType), 20, 0); } else { resultType = argType; } return resultType.copy(argType.isNullable()); }
3.26
flink_LogicalTypeMerging_findAdditionDecimalType_rdh
/** * Finds the result type of a decimal addition operation. */ public static DecimalType findAdditionDecimalType(int precision1, int scale1, int precision2, int scale2) { final int scale = Math.max(scale1, scale2); int precision = (Math.max(precision1 - scale1, precision2 - scale2) + scale) + 1; return m1(precision, scale); }
3.26
flink_LogicalTypeMerging_findDivisionDecimalType_rdh
// e1 - e2 max(s1, s2) + max(p1-s1, p2-s2) + 1 max(s1, s2) // e1 * e2 p1 + p2 + 1 s1 + s2 // e1 / e2 p1 - s1 + s2 + max(6, s1 + p2 + 1) max(6, s1 + p2 + 1) // e1 % e2 min(p1-s1, p2-s2) + max(s1, s2) max(s1, s2) // // Also, if the precision / scale are out of the range, the scale may be sacrificed // in order to prevent the truncation of the integer part of the decimals. /** * Finds the result type of a decimal division operation. */ public static DecimalType findDivisionDecimalType(int precision1, int scale1, int precision2, int scale2) { int scale = Math.max(6, (scale1 + precision2) + 1); int precision = ((precision1 - scale1) + scale2) + scale;return m1(precision, scale); }
3.26
flink_LogicalTypeMerging_findRoundDecimalType_rdh
/** * Finds the result type of a decimal rounding operation. */ public static DecimalType findRoundDecimalType(int precision, int scale, int round) { if (round >= scale) { return new DecimalType(false, precision, scale); } if (round < 0) { return new DecimalType(false, Math.min(DecimalType.MAX_PRECISION, (1 + precision) - scale), 0); } // 0 <= r < s // NOTE: rounding may increase the digits by 1, therefore we need +1 on precisions. return new DecimalType(false, ((1 + precision) - scale) + round, round); }
3.26
flink_TransformationMetadata_fill_rdh
/** * Fill a transformation with this meta. */ public <T extends Transformation<?>> T fill(T transformation) { transformation.setName(getName()); transformation.setDescription(getDescription()); if (getUid() != null) { transformation.setUid(getUid()); } return transformation; }
3.26
flink_DualInputPlanNode_getTwoInputNode_rdh
// -------------------------------------------------------------------------------------------- public TwoInputNode getTwoInputNode() { if (this.template instanceof TwoInputNode) { return ((TwoInputNode) (this.template)); } else { throw new RuntimeException(); } }
3.26
flink_DualInputPlanNode_getInput1_rdh
/** * Gets the first input channel to this node. * * @return The first input channel to this node. */ public Channel getInput1() { return this.input1; }
3.26
flink_DualInputPlanNode_accept_rdh
// -------------------------------------------------------------------------------------------- @Override public void accept(Visitor<PlanNode> visitor) {if (visitor.preVisit(this)) { this.input1.getSource().accept(visitor); this.input2.getSource().accept(visitor); for (Channel broadcastInput : getBroadcastInputs()) { broadcastInput.getSource().accept(visitor); } visitor.postVisit(this); } }
3.26
flink_DeltaIteration_getName_rdh
/** * Gets the name from this iteration. * * @return The name of the iteration. */ public String getName() { return name; }
3.26
flink_DeltaIteration_name_rdh
/** * Sets the name for the iteration. The name is displayed in logs and messages. * * @param name * The name for the iteration. * @return The iteration object, for function call chaining. */ public DeltaIteration<ST, WT> name(String name) { this.name = name; return this; }
3.26
flink_DeltaIteration_setSolutionSetUnManaged_rdh
/** * Sets whether to keep the solution set in managed memory (safe against heap exhaustion) or * unmanaged memory (objects on heap). * * @param solutionSetUnManaged * True to keep the solution set in unmanaged memory, false to keep * it in managed memory. * @see #isSolutionSetUnManaged() */ public void setSolutionSetUnManaged(boolean solutionSetUnManaged) { this.solutionSetUnManaged = solutionSetUnManaged; }
3.26
flink_DeltaIteration_getInitialWorkset_rdh
/** * Gets the initial workset. This is the data set passed to the method that starts the delta * iteration. * * <p>Consider the following example: * * <pre>{@code DataSet<MyType> solutionSetData = ...; * DataSet<AnotherType> worksetData = ...; * * DeltaIteration<MyType, AnotherType> iteration = solutionSetData.iteratorDelta(worksetData, 10, ...);}</pre> * * <p>The <tt>worksetData</tt> would be the data set returned by {@code iteration.getInitialWorkset();}. * * @return The data set that forms the initial workset. */ public DataSet<WT> getInitialWorkset() { return initialWorkset; }
3.26
flink_DeltaIteration_registerAggregator_rdh
/** * Registers an {@link Aggregator} for the iteration. Aggregators can be used to maintain simple * statistics during the iteration, such as number of elements processed. The aggregators * compute global aggregates: After each iteration step, the values are globally aggregated to * produce one aggregate that represents statistics across all parallel instances. The value of * an aggregator can be accessed in the next iteration. * * <p>Aggregators can be accessed inside a function via the {@link org.apache.flink.api.common.functions.AbstractRichFunction#getIterationRuntimeContext()} * method. * * @param name * The name under which the aggregator is registered. * @param aggregator * The aggregator class. * @return The DeltaIteration itself, to allow chaining function calls. */ @PublicEvolving public DeltaIteration<ST, WT> registerAggregator(String name, Aggregator<?> aggregator) { this.aggregators.registerAggregator(name, aggregator); return this; }
3.26
flink_DeltaIteration_setResources_rdh
/** * Sets the resources for the iteration, and the minimum and preferred resources are the same by * default. The lower and upper resource limits will be considered in dynamic resource resize * feature for future plan. * * @param resources * The resources for the iteration. * @return The iteration with set minimum and preferred resources. */ private DeltaIteration<ST, WT> setResources(ResourceSpec resources) { OperatorValidationUtils.validateResources(resources); this.minResources = resources; this.preferredResources = resources; return this; }
3.26
flink_DeltaIteration_getInitialSolutionSet_rdh
/** * Gets the initial solution set. This is the data set on which the delta iteration was started. * * <p>Consider the following example: * * <pre>{@code DataSet<MyType> solutionSetData = ...; * DataSet<AnotherType> worksetData = ...; * * DeltaIteration<MyType, AnotherType> iteration = solutionSetData.iteratorDelta(worksetData, 10, ...);}</pre> * * <p>The <tt>solutionSetData</tt> would be the data set returned by {@code iteration.getInitialSolutionSet();}. * * @return The data set that forms the initial solution set. */ public DataSet getInitialSolutionSet() { return initialSolutionSet; }
3.26
flink_DeltaIteration_getSolutionSet_rdh
/** * Gets the solution set of the delta iteration. The solution set represents the state that is * kept across iterations. * * @return The solution set of the delta iteration. */public SolutionSetPlaceHolder getSolutionSet() { return solutionSetPlaceholder; }
3.26
flink_DeltaIteration_registerAggregationConvergenceCriterion_rdh
/** * Registers an {@link Aggregator} for the iteration together with a {@link ConvergenceCriterion}. For a general description of aggregators, see {@link #registerAggregator(String, Aggregator)} and {@link Aggregator}. At the end of each * iteration, the convergence criterion takes the aggregator's global aggregate value and * decides whether the iteration should terminate. A typical use case is to have an aggregator * that sums up the total error of change in an iteration step and have to have a convergence * criterion that signals termination as soon as the aggregate value is below a certain * threshold. * * @param name * The name under which the aggregator is registered. * @param aggregator * The aggregator class. * @param convergenceCheck * The convergence criterion. * @return The DeltaIteration itself, to allow chaining function calls. */ @PublicEvolving public <X extends Value> DeltaIteration<ST, WT> registerAggregationConvergenceCriterion(String name, Aggregator<X> aggregator, ConvergenceCriterion<X> convergenceCheck) { this.aggregators.registerAggregationConvergenceCriterion(name, aggregator, convergenceCheck); return this; }
3.26
flink_DeltaIteration_parallelism_rdh
/** * Sets the parallelism for the iteration. * * @param parallelism * The parallelism. * @return The iteration object, for function call chaining. */ public DeltaIteration<ST, WT> parallelism(int parallelism) { OperatorValidationUtils.validateParallelism(parallelism); this.parallelism = parallelism; return this; } /** * Gets the iteration's parallelism. * * @return The iteration's parallelism, or {@link ExecutionConfig#PARALLELISM_DEFAULT}
3.26
flink_PageRank_getPagesDataSet_rdh
// ************************************************************************* // UTIL METHODS // ************************************************************************* private static DataSet<Long> getPagesDataSet(ExecutionEnvironment env, ParameterTool params) { if (params.has("pages")) { return env.readCsvFile(params.get("pages")).fieldDelimiter(" ").lineDelimiter("\n").types(Long.class).map(new MapFunction<Tuple1<Long>, Long>() { @Override public Long map(Tuple1<Long> v) { return v.f0; } }); } else { System.out.println("Executing PageRank example with default pages data set."); System.out.println("Use --pages to specify file input."); return PageRankData.getDefaultPagesDataSet(env); } }
3.26
flink_PageRank_main_rdh
// ************************************************************************* public static void main(String[] args) throws Exception { LOGGER.warn(DATASET_DEPRECATION_INFO); ParameterTool params = ParameterTool.fromArgs(args); final int v1 = params.getInt("numPages", PageRankData.getNumberOfPages()); final int maxIterations = params.getInt("iterations", 10); // set up execution environment final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make the parameters available to the web ui env.getConfig().setGlobalJobParameters(params); // get input data DataSet<Long> pagesInput = getPagesDataSet(env, params); DataSet<Tuple2<Long, Long>> linksInput = getLinksDataSet(env, params); // assign initial rank to pages DataSet<Tuple2<Long, Double>> pagesWithRanks = pagesInput.map(new RankAssigner(1.0 / v1)); // build adjacency list from link input DataSet<Tuple2<Long, Long[]>> adjacencyListInput = linksInput.groupBy(0).reduceGroup(new BuildOutgoingEdgeList()); // set iterative data set IterativeDataSet<Tuple2<Long, Double>> iteration = pagesWithRanks.iterate(maxIterations); DataSet<Tuple2<Long, Double>> newRanks = // apply dampening factor // collect and sum ranks // join pages with outgoing edges and distribute rank iteration.join(adjacencyListInput).where(0).equalTo(0).flatMap(new JoinVertexWithEdgesMatch()).groupBy(0).aggregate(SUM, 1).map(new Dampener(DAMPENING_FACTOR, v1)); DataSet<Tuple2<Long, Double>> finalPageRanks = iteration.closeWith(newRanks, // termination condition newRanks.join(iteration).where(0).equalTo(0).filter(new EpsilonFilter())); // emit result if (params.has("output")) { finalPageRanks.writeAsCsv(params.get("output"), "\n", " "); // execute program env.execute("Basic Page Rank Example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); finalPageRanks.print(); } }
3.26
flink_Keys_areCompatible_rdh
/** * Check if two sets of keys are compatible to each other (matching types, key counts) */ public boolean areCompatible(Keys<?> other) throws IncompatibleKeysException { TypeInformation<?>[] thisKeyFieldTypes = this.getKeyFieldTypes(); TypeInformation<?>[] otherKeyFieldTypes = other.getKeyFieldTypes(); if (thisKeyFieldTypes.length != otherKeyFieldTypes.length) { throw new IncompatibleKeysException(IncompatibleKeysException.SIZE_MISMATCH_MESSAGE); } else { for (int i = 0; i < thisKeyFieldTypes.length; i++) { if (!thisKeyFieldTypes[i].equals(otherKeyFieldTypes[i])) { throw new IncompatibleKeysException(thisKeyFieldTypes[i], otherKeyFieldTypes[i]); } } } return true; }
3.26
flink_Keys_createIncrIntArray_rdh
// -------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------- // Utilities // -------------------------------------------------------------------------------------------- private static int[] createIncrIntArray(int numKeys) { int[] keyFields = new int[numKeys]; for (int i = 0; i < numKeys; i++) { keyFields[i] = i; } return keyFields;}
3.26
flink_Tuple17_copy_rdh
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> copy() { return new Tuple17<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16); }
3.26
flink_Tuple17_toString_rdh
// ------------------------------------------------------------------------------------------------- // standard utilities // ------------------------------------------------------------------------------------------------- /** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15, f16), where the individual fields are the value returned by * calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return ((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ")"; }
3.26
flink_Tuple17_of_rdh
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16) { return new Tuple17<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16); }
3.26
flink_Tuple17_equals_rdh
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o * the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple17)) { return false; } @SuppressWarnings("rawtypes") Tuple17 tuple = ((Tuple17) (o)); if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false; } if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; }if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) { return false; } if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) { return false; } if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) { return false; } if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) { return false; } if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) { return false; } if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) { return false; } return true; }
3.26
flink_Tuple17_setFields_rdh
/** * Sets new values to all fields of the tuple. * * @param f0 * The value for field 0 * @param f1 * The value for field 1 * @param f2 * The value for field 2 * @param f3 * The value for field 3 * @param f4 * The value for field 4 * @param f5 * The value for field 5 * @param f6 * The value for field 6 * @param f7 * The value for field 7 * @param f8 * The value for field 8 * @param f9 * The value for field 9 * @param f10 * The value for field 10 * @param f11 * The value for field 11 * @param f12 * The value for field 12 * @param f13 * The value for field 13 * @param f14 * The value for field 14 * @param f15 * The value for field 15 * @param f16 * The value for field 16 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8; this.f9 = f9; this.f10 = f10; this.f11 = f11; this.f12 = f12; this.f13 = f13; this.f14 = f14;this.f15 = f15; this.f16 = f16; }
3.26
flink_Tuple8_copy_rdh
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple8<T0, T1, T2, T3, T4, T5, T6, T7> copy() { return new Tuple8<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7); }
3.26
flink_Tuple8_of_rdh
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7> Tuple8<T0, T1, T2, T3, T4, T5, T6, T7> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7) { return new Tuple8<>(f0, f1, f2, f3, f4, f5, f6, f7); }
3.26
flink_Tuple8_equals_rdh
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o * the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple8)) { return false; } @SuppressWarnings("rawtypes") Tuple8 tuple = ((Tuple8) (o)); if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false;} if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false;} return true; }
3.26
flink_Tuple8_toString_rdh
// ------------------------------------------------------------------------------------------------- // standard utilities // ------------------------------------------------------------------------------------------------- /** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7), * where the individual fields are the value returned by calling {@link Object#toString} on that * field. * * @return The string representation of the tuple. */ @Override public String toString() { return ((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ")";}
3.26
flink_Tuple8_setFields_rdh
/** * Sets new values to all fields of the tuple. * * @param f0 * The value for field 0 * @param f1 * The value for field 1 * @param f2 * The value for field 2 * @param f3 * The value for field 3 * @param f4 * The value for field 4 * @param f5 * The value for field 5 * @param f6 * The value for field 6 * @param f7 * The value for field 7 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; }
3.26
flink_ElementTriggers_every_rdh
/** * Creates a new trigger that triggers on receiving of every element. */ public static <W extends Window> EveryElement<W> every() { return new EveryElement<>(); }
3.26
flink_ElementTriggers_count_rdh
/** * Creates a trigger that fires when the pane contains at lease {@code countElems} elements. */ public static <W extends Window> CountElement<W> count(long countElems) {return new CountElement<>(countElems); }
3.26
flink_DistinctOperator_translateSelectorFunctionDistinct_rdh
// -------------------------------------------------------------------------------------------- private static <IN, K> SingleInputOperator<?, IN, ?> translateSelectorFunctionDistinct(SelectorFunctionKeys<IN, ?> rawKeys, ReduceFunction<IN> function, TypeInformation<IN> outputType, String name, Operator<IN> input, int parallelism, CombineHint hint) { @SuppressWarnings("unchecked") final SelectorFunctionKeys<IN, K> keys = ((SelectorFunctionKeys<IN, K>) (rawKeys)); TypeInformation<Tuple2<K, IN>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys); Operator<Tuple2<K, IN>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys); PlanUnwrappingReduceOperator<IN, K> reducer = new PlanUnwrappingReduceOperator<>(function, keys, name, outputType, typeInfoWithKey); reducer.setInput(keyedInput); reducer.setCombineHint(hint); reducer.setParallelism(parallelism); return KeyFunctions.appendKeyRemover(reducer, keys); }
3.26
flink_DistinctOperator_setCombineHint_rdh
/** * Sets the strategy to use for the combine phase of the reduce. * * <p>If this method is not called, then the default hint will be used. ({@link org.apache.flink.api.common.operators.base.ReduceOperatorBase.CombineHint#OPTIMIZER_CHOOSES}) * * @param strategy * The hint to use. * @return The DistinctOperator object, for function call chaining. */ @PublicEvolving public DistinctOperator<T> setCombineHint(CombineHint strategy) { this.hint = strategy; return this; }
3.26
flink_GuavaFlinkConnectorRateLimiter_setRate_rdh
/** * Set the global per consumer and per sub-task rates. * * @param globalRate * Value of rate in bytes per second. */ @Override public void setRate(long globalRate) { this.globalRateBytesPerSecond = globalRate; }
3.26
flink_GuavaFlinkConnectorRateLimiter_open_rdh
/** * Creates a rate limiter with the runtime context provided. * * @param runtimeContext */ @Override public void open(RuntimeContext runtimeContext) { this.runtimeContext = runtimeContext; localRateBytesPerSecond = globalRateBytesPerSecond / runtimeContext.getNumberOfParallelSubtasks(); this.rateLimiter = RateLimiter.create(localRateBytesPerSecond); }
3.26
flink_ExternalResourceOptions_getExternalResourceDriverFactoryConfigOptionForResource_rdh
/** * Generate the config option key for the factory class name of {@link org.apache.flink.api.common.externalresource.ExternalResourceDriver}. */ public static String getExternalResourceDriverFactoryConfigOptionForResource(String resourceName) { return keyWithResourceNameAndSuffix(resourceName, EXTERNAL_RESOURCE_DRIVER_FACTORY_SUFFIX); }
3.26
flink_ExternalResourceOptions_getAmountConfigOptionForResource_rdh
/** * Generate the config option key for the amount of external resource with resource_name. */ public static String getAmountConfigOptionForResource(String resourceName) { return keyWithResourceNameAndSuffix(resourceName, EXTERNAL_RESOURCE_AMOUNT_SUFFIX); }
3.26
flink_ExternalResourceOptions_keyWithResourceNameAndSuffix_rdh
/** * Generate the config option key with resource_name and suffix. */ private static String keyWithResourceNameAndSuffix(String resourceName, String suffix) { return String.format("%s.%s.%s", EXTERNAL_RESOURCE_PREFIX, Preconditions.checkNotNull(resourceName), Preconditions.checkNotNull(suffix)); }
3.26
flink_ExternalResourceOptions_getExternalResourceParamConfigPrefixForResource_rdh
/** * Generate the suffix option key prefix for the user-defined params for external resources. */ public static String getExternalResourceParamConfigPrefixForResource(String resourceName) { return keyWithResourceNameAndSuffix(resourceName, EXTERNAL_RESOURCE_DRIVER_PARAM_SUFFIX); }
3.26
flink_ExternalResourceOptions_getSystemConfigKeyConfigOptionForResource_rdh
/** * Generate the config option key for the configuration key of external resource in the * deploying system. */ public static String getSystemConfigKeyConfigOptionForResource(String resourceName, String suffix) { return keyWithResourceNameAndSuffix(resourceName, suffix); }
3.26
flink_ApiExpressionDefaultVisitor_visitNonApiExpression_rdh
// -------------------------------------------------------------------------------------------- // other expressions // -------------------------------------------------------------------------------------------- @Override public T visitNonApiExpression(Expression other) { return defaultMethod(other); }
3.26
flink_ApiExpressionDefaultVisitor_visit_rdh
// -------------------------------------------------------------------------------------------- // unresolved API expressions // -------------------------------------------------------------------------------------------- @Override public T visit(UnresolvedReferenceExpression unresolvedReference) { return defaultMethod(unresolvedReference); }
3.26
flink_JsonRowDeserializationSchema_ignoreParseErrors_rdh
/** * Configures schema to fail when parsing json failed. * * <p>By default, an exception will be thrown when parsing json fails. */ public Builder ignoreParseErrors() { this.ignoreParseErrors = true; return this; }
3.26
flink_JsonRowDeserializationSchema_setFailOnMissingField_rdh
/** * * @deprecated Use the provided {@link Builder} instead. */ @Deprecated public void setFailOnMissingField(boolean failOnMissingField) { // TODO make this class immutable once we drop this method this.failOnMissingField = failOnMissingField; this.runtimeConverter = createConverter(this.typeInfo); }
3.26
flink_Reference_owned_rdh
/** * Returns the value if it is owned. */public Optional<T> owned() { return isOwned ? Optional.of(value) : Optional.empty(); }
3.26
flink_CheckpointRequestDecider_chooseQueuedRequestToExecute_rdh
/** * Choose one of the queued requests to execute, if any. * * @return request that should be executed */ Optional<CheckpointTriggerRequest> chooseQueuedRequestToExecute(boolean isTriggering, long lastCompletionMs) { Optional<CheckpointTriggerRequest> request = chooseRequestToExecute(isTriggering, lastCompletionMs); request.ifPresent(CheckpointRequestDecider::logInQueueTime); return request; }
3.26
flink_CheckpointRequestDecider_chooseRequestToExecute_rdh
/** * Choose the next {@link CheckpointTriggerRequest request} to execute based on the provided * candidate and the current state. Acquires a lock and may update the state. * * @return request that should be executed */ private Optional<CheckpointTriggerRequest> chooseRequestToExecute(boolean isTriggering, long lastCompletionMs) { if ((isTriggering || f1.isEmpty()) || (numberOfCleaningCheckpointsSupplier.getAsInt() > maxConcurrentCheckpointAttempts)) { return Optional.empty(); } if (pendingCheckpointsSizeSupplier.getAsInt() >= maxConcurrentCheckpointAttempts) { return Optional.of(f1.first()).filter(CheckpointTriggerRequest::isForce).map(unused -> f1.pollFirst()); } CheckpointTriggerRequest first = f1.first(); if ((!first.isForce()) && first.isPeriodic) { long currentRelativeTime = clock.relativeTimeMillis(); long nextTriggerDelayMillis = (lastCompletionMs - currentRelativeTime) + minPauseBetweenCheckpoints; if (nextTriggerDelayMillis > 0) { f1.pollFirst().completeExceptionally(new CheckpointException(MINIMUM_TIME_BETWEEN_CHECKPOINTS)); f0.accept(currentRelativeTime, nextTriggerDelayMillis); return Optional.empty(); }} return Optional.of(f1.pollFirst()); }
3.26
flink_RuntimeConverter_create_rdh
/** * Creates a new instance of {@link Context}. * * @param classLoader * runtime classloader for loading user-defined classes. */ static Context create(ClassLoader classLoader) { return new Context() { @Override public ClassLoader getClassLoader() { return classLoader; } }; }
3.26
flink_PhysicalSlotRequestBulkCheckerImpl_checkPhysicalSlotRequestBulkTimeout_rdh
/** * Check the slot request bulk and timeout its requests if it has been unfulfillable for too * long. * * @param slotRequestBulk * bulk of slot requests * @param slotRequestTimeout * indicates how long a pending request can be unfulfillable * @return result of the check, indicating the bulk is fulfilled, still pending, or timed out */ @VisibleForTesting TimeoutCheckResult checkPhysicalSlotRequestBulkTimeout(final PhysicalSlotRequestBulkWithTimestamp slotRequestBulk, final Time slotRequestTimeout) { if (slotRequestBulk.getPendingRequests().isEmpty()) { return TimeoutCheckResult.FULFILLED; } final boolean fulfillable = isSlotRequestBulkFulfillable(slotRequestBulk, slotsRetriever); if (fulfillable) { slotRequestBulk.markFulfillable();} else { final long currentTimestamp = clock.relativeTimeMillis(); slotRequestBulk.markUnfulfillable(currentTimestamp); final long v5 = slotRequestBulk.getUnfulfillableSince(); if ((v5 + slotRequestTimeout.toMilliseconds()) <= currentTimestamp) {return TimeoutCheckResult.TIMEOUT; } } return TimeoutCheckResult.PENDING; }
3.26
flink_PhysicalSlotRequestBulkCheckerImpl_areRequestsFulfillableWithSlots_rdh
/** * Tries to match pending requests to all registered slots (available or allocated). * * <p>NOTE: The complexity of the method is currently quadratic (number of pending requests x * number of all slots). */ private static boolean areRequestsFulfillableWithSlots(final Collection<ResourceProfile> requestResourceProfiles, final Set<SlotInfo> slots) { final Set<SlotInfo> remainingSlots = new HashSet<>(slots); for (ResourceProfile requestResourceProfile : requestResourceProfiles) { final Optional<SlotInfo> matchedSlot = findMatchingSlotForRequest(requestResourceProfile, remainingSlots); if (matchedSlot.isPresent()) { remainingSlots.remove(matchedSlot.get()); } else { return false; } } return true; }
3.26
flink_AvroOutputFormat_setCodec_rdh
/** * Set avro codec for compression. * * @param codec * avro codec. */ public void setCodec(final Codec codec) { this.codec = checkNotNull(codec, "codec can not be null"); }
3.26
flink_HiveParserBaseSemanticAnalyzer_unescapeIdentifier_rdh
/** * Remove the encapsulating "`" pair from the identifier. We allow users to use "`" to escape * identifier for table names, column names and aliases, in case that coincide with Hive * language keywords. */ public static String unescapeIdentifier(String val) { if (val == null) { return null; } if ((val.charAt(0) == '`') && (val.charAt(val.length() - 1) == '`')) { val = val.substring(1, val.length() - 1); } return val; }
3.26
flink_HiveParserBaseSemanticAnalyzer_unparseExprForValuesClause_rdh
// Take an expression in the values clause and turn it back into a string. This is far from // comprehensive. At the moment it only supports: // * literals (all types) // * unary negatives // * true/false static String unparseExprForValuesClause(HiveParserASTNode expr) throws SemanticException { switch (expr.getToken().getType()) { case HiveASTParser.Number : return expr.getText(); case HiveASTParser.StringLiteral : return unescapeSQLString(expr.getText()); case HiveASTParser.KW_FALSE : // UDFToBoolean casts any non-empty string to true, so set this to false return ""; case HiveASTParser.KW_TRUE : return "TRUE"; case HiveASTParser.MINUS : return "-" + unparseExprForValuesClause(((HiveParserASTNode) (expr.getChildren().get(0)))); case HiveASTParser.TOK_NULL : return null; default : throw new SemanticException(("Expression of type " + expr.getText()) + " not supported in insert/values"); } }
3.26
flink_HiveParserBaseSemanticAnalyzer_getGroupByForClause_rdh
// This function is a wrapper of parseInfo.getGroupByForClause which automatically translates // SELECT DISTINCT a,b,c to SELECT a,b,c GROUP BY a,b,c. public static List<HiveParserASTNode> getGroupByForClause(HiveParserQBParseInfo parseInfo, String dest) { if (parseInfo.getSelForClause(dest).getToken().getType() == HiveASTParser.TOK_SELECTDI) { HiveParserASTNode selectExprs = parseInfo.getSelForClause(dest); List<HiveParserASTNode> result = new ArrayList<>(selectExprs == null ? 0 : selectExprs.getChildCount()); if (selectExprs != null) { for (int i = 0; i < selectExprs.getChildCount(); ++i) {if (((HiveParserASTNode) (selectExprs.getChild(i))).getToken().getType() == HiveASTParser.QUERY_HINT) { continue; } // table.column AS alias HiveParserASTNode grpbyExpr = ((HiveParserASTNode) (selectExprs.getChild(i).getChild(0))); result.add(grpbyExpr); } } return result; } else { HiveParserASTNode grpByExprs = parseInfo.getGroupByForClause(dest); List<HiveParserASTNode> result = new ArrayList<>(grpByExprs == null ? 0 : grpByExprs.getChildCount()); if (grpByExprs != null) { for (int i = 0; i < grpByExprs.getChildCount(); ++i) { HiveParserASTNode grpbyExpr = ((HiveParserASTNode) (grpByExprs.getChild(i))); if (grpbyExpr.getType() != HiveASTParser.TOK_GROUPING_SETS_EXPRESSION) { result.add(grpbyExpr); } } }return result; } }
3.26
flink_HiveParserBaseSemanticAnalyzer_validateNoHavingReferenceToAlias_rdh
// We support having referring alias just as in hive's semantic analyzer. This check only prints // a warning now. public static void validateNoHavingReferenceToAlias(HiveParserQB qb, HiveParserASTNode havingExpr, HiveParserRowResolver inputRR, HiveParserSemanticAnalyzer semanticAnalyzer) throws SemanticException { HiveParserQBParseInfo qbPI = qb.getParseInfo();Map<HiveParserASTNode, String> exprToAlias = qbPI.getAllExprToColumnAlias(); for (Map.Entry<HiveParserASTNode, String> exprAndAlias : exprToAlias.entrySet()) { final HiveParserASTNode expr = exprAndAlias.getKey();final String alias = exprAndAlias.getValue(); // put the alias in input RR so that we can generate ExprNodeDesc with it if (inputRR.getExpression(expr) != null) { inputRR.put("", alias, inputRR.getExpression(expr)); } final Set<Object> aliasReferences = new HashSet<>(); TreeVisitorAction action = new TreeVisitorAction() { @Override public Object m1(Object t) { if (HiveASTParseDriver.ADAPTOR.getType(t) == HiveASTParser.TOK_TABLE_OR_COL) { Object c = HiveASTParseDriver.ADAPTOR.getChild(t, 0); if (((c != null) && (HiveASTParseDriver.ADAPTOR.getType(c) == HiveASTParser.Identifier)) && HiveASTParseDriver.ADAPTOR.getText(c).equals(alias)) { aliasReferences.add(t); } } return t; } @Override public Object post(Object t) { return t; } }; new TreeVisitor(HiveASTParseDriver.ADAPTOR).visit(havingExpr, action); if (aliasReferences.size() > 0) { String havingClause = semanticAnalyzer.ctx.getTokenRewriteStream().toString(havingExpr.getTokenStartIndex(), havingExpr.getTokenStopIndex()); String msg = String.format("Encountered Select alias '%s' in having clause '%s'" + " This is non standard behavior.", alias, havingClause); LOG.warn(msg); } } }
3.26
flink_HiveParserBaseSemanticAnalyzer_getVariablesSetForFilter_rdh
/** * traverse the given node to find all correlated variables, the main logic is from {@link HiveFilter#getVariablesSet()}. */ public static Set<CorrelationId> getVariablesSetForFilter(RexNode rexNode) { Set<CorrelationId> correlationVariables = new HashSet<>(); if (rexNode instanceof RexSubQuery) { RexSubQuery rexSubQuery = ((RexSubQuery) (rexNode)); // we expect correlated variables in Filter only for now. // also check case where operator has 0 inputs .e.g TableScan if (rexSubQuery.rel.getInputs().isEmpty()) { return correlationVariables; } RelNode input = rexSubQuery.rel.getInput(0); while (((input != null) && (!(input instanceof LogicalFilter))) && (input.getInputs().size() >= 1)) { // we don't expect corr vars within UNION for now if (input.getInputs().size() > 1) { if (input instanceof LogicalJoin) { correlationVariables.addAll(findCorrelatedVar(((LogicalJoin) (input)).getCondition())); } // todo: throw Unsupported exception when the input isn't LogicalJoin and // contains correlate variables in FLINK-28317 return correlationVariables; } input = input.getInput(0); } if (input instanceof LogicalFilter) { correlationVariables.addAll(findCorrelatedVar(((LogicalFilter) (input)).getCondition())); } return correlationVariables; } // AND, NOT etc if (rexNode instanceof RexCall) { int numOperands = ((RexCall) (rexNode)).getOperands().size(); for (int i = 0; i < numOperands; i++) {RexNode op = ((RexCall) (rexNode)).getOperands().get(i); correlationVariables.addAll(getVariablesSetForFilter(op)); } } return correlationVariables; }
3.26
flink_HiveParserBaseSemanticAnalyzer_convert_rdh
/* This method returns the flip big-endian representation of value */ public static ImmutableBitSet convert(int value, int length) { BitSet v211 = new BitSet(); for (int index = length - 1; index >= 0; index--) { if ((value % 2) != 0) { v211.set(index); } value = value >>> 1; } // We flip the bits because Calcite considers that '1' // means that the column participates in the GroupBy // and '0' does not, as opposed to grouping_id. v211.flip(0, length); return ImmutableBitSet.fromBitSet(v211); }
3.26
flink_HiveParserBaseSemanticAnalyzer_processPositionAlias_rdh
// Process the position alias in GROUPBY and ORDERBY public static void processPositionAlias(HiveParserASTNode ast, HiveConf conf) throws SemanticException { boolean isBothByPos = HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS); boolean isGbyByPos = isBothByPos || Boolean.parseBoolean(conf.get("hive.groupby.position.alias", "false"));boolean isObyByPos = isBothByPos || Boolean.parseBoolean(conf.get("hive.orderby.position.alias", "true"));Deque<HiveParserASTNode> stack = new ArrayDeque<>(); stack.push(ast); while (!stack.isEmpty()) { HiveParserASTNode next = stack.pop(); if (next.getChildCount() == 0) { continue;} boolean isAllCol; HiveParserASTNode selectNode = null; HiveParserASTNode groupbyNode = null; HiveParserASTNode orderbyNode = null; // get node type int childCount = next.getChildCount(); for (int childPos = 0; childPos < childCount; ++childPos) { HiveParserASTNode node = ((HiveParserASTNode) (next.getChild(childPos))); int type = node.getToken().getType(); if (type == HiveASTParser.TOK_SELECT) { selectNode = node; } else if (type == HiveASTParser.TOK_GROUPBY) { groupbyNode = node; } else if (type == HiveASTParser.TOK_ORDERBY) { orderbyNode = node; } } if (selectNode != null) { int selectExpCnt = selectNode.getChildCount(); // replace each of the position alias in GROUPBY with the actual column name if (groupbyNode != null) { for (int childPos = 0; childPos < groupbyNode.getChildCount(); ++childPos) { HiveParserASTNode node = ((HiveParserASTNode) (groupbyNode.getChild(childPos)));if (node.getToken().getType() == HiveASTParser.Number) {if (isGbyByPos) { int pos = Integer.parseInt(node.getText()); if ((pos > 0) && (pos <= selectExpCnt)) { groupbyNode.setChild(childPos, selectNode.getChild(pos - 1).getChild(0));} else { throw new SemanticException(ErrorMsg.INVALID_POSITION_ALIAS_IN_GROUPBY.getMsg(((("Position alias: " + pos) + " does not exist\n") + "The Select List is indexed from 1 to ") + selectExpCnt)); } } else { warn(("Using constant number " + node.getText()) + " in group by. If you try to use position alias when hive.groupby.position.alias is false, the position alias will be ignored."); } } } } // replace each of the position alias in ORDERBY with the actual column name if (orderbyNode != null) { isAllCol = false; for (int v145 = 0; v145 < selectNode.getChildCount(); ++v145) { HiveParserASTNode node = ((HiveParserASTNode) (selectNode.getChild(v145).getChild(0))); if ((node != null) && (node.getToken().getType() == HiveASTParser.TOK_ALLCOLREF)) { isAllCol = true; } } for (int childPos = 0; childPos < orderbyNode.getChildCount(); ++childPos) { HiveParserASTNode colNode = ((HiveParserASTNode) (orderbyNode.getChild(childPos).getChild(0))); HiveParserASTNode node = ((HiveParserASTNode) (colNode.getChild(0))); if ((node != null) && (node.getToken().getType() == HiveASTParser.Number)) {if (isObyByPos) { if (!isAllCol) { int pos = Integer.parseInt(node.getText()); if ((pos > 0) && (pos <= selectExpCnt)) {colNode.setChild(0, selectNode.getChild(pos - 1).getChild(0)); } else { throw new SemanticException(ErrorMsg.INVALID_POSITION_ALIAS_IN_ORDERBY.getMsg(((("Position alias: " + pos) + " does not exist\n") + "The Select List is indexed from 1 to ") + selectExpCnt)); } } else { throw new SemanticException(ErrorMsg.NO_SUPPORTED_ORDERBY_ALLCOLREF_POS.getMsg()); } } else { // if not using position alias and it is a number. warn(("Using constant number " + node.getText()) + " in order by. If you try to use position alias when hive.orderby.position.alias is false, the position alias will be ignored."); } } } } } for (int i = next.getChildren().size() - 1; i >= 0; i--) { stack.push(((HiveParserASTNode) (next.getChildren().get(i)))); } } }
3.26
flink_HiveParserBaseSemanticAnalyzer_readProps_rdh
/** * Converts parsed key/value properties pairs into a map. * * @param prop * HiveParserASTNode parent of the key/value pairs * @param mapProp * property map which receives the mappings */public static void readProps(HiveParserASTNode prop, Map<String, String> mapProp) { for (int propChild = 0; propChild < prop.getChildCount(); propChild++) { String key = unescapeSQLString(prop.getChild(propChild).getChild(0).getText()); String value = null; if (prop.getChild(propChild).getChild(1) != null) { value = unescapeSQLString(prop.getChild(propChild).getChild(1).getText()); } mapProp.put(key, value); } }
3.26
flink_HiveParserBaseSemanticAnalyzer_getUnescapedOriginTableName_rdh
/** * Get the unescaped origin table name for the table node. This method returns * "catalog.db.table","db.table" or "table" according to what the table node actually specifies * * @param node * the table node * @return "catalog.db.table", "db.table" or "table" */ public static String getUnescapedOriginTableName(HiveParserASTNode node) throws SemanticException { UnresolvedIdentifier tableIdentifier = getQualifiedTableName(node); return getDotName(tableIdentifier.getCatalogName().orElse(null), tableIdentifier.getDatabaseName().orElse(null), tableIdentifier.getObjectName()); }
3.26
flink_CollectionExecutor_execute_rdh
// -------------------------------------------------------------------------------------------- // General execution methods // -------------------------------------------------------------------------------------------- public JobExecutionResult execute(Plan program) throws Exception { long startTime = System.currentTimeMillis(); JobID jobID = (program.getJobId() == null) ? new JobID() : program.getJobId(); initCache(program.getCachedFiles()); Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks(); for (Operator<?> sink : sinks) { execute(sink, jobID); } long endTime = System.currentTimeMillis(); Map<String, OptionalFailure<Object>> accumulatorResults = AccumulatorHelper.toResultMap(accumulators); return new JobExecutionResult(null, endTime - startTime, accumulatorResults); }
3.26
flink_CollectionExecutor_executeDataSink_rdh
// -------------------------------------------------------------------------------------------- // Operator class specific execution methods // -------------------------------------------------------------------------------------------- private <IN> void executeDataSink(GenericDataSinkBase<?> sink, int superStep, JobID jobID) throws Exception { Operator<?> inputOp = sink.getInput(); if (inputOp == null) { throw new InvalidProgramException(("The data sink " + sink.getName()) + " has no input."); } @SuppressWarnings("unchecked") List<IN> input = ((List<IN>) (execute(inputOp, jobID))); @SuppressWarnings("unchecked") GenericDataSinkBase<IN> typedSink = ((GenericDataSinkBase<IN>) (sink)); // build the runtime context and compute broadcast variables, if necessary TaskInfo taskInfo = new TaskInfo(typedSink.getName(), 1, 0, 1, 0); RuntimeUDFContext v13; if (RichOutputFormat.class.isAssignableFrom(typedSink.getUserCodeWrapper().getUserCodeClass())) { v13 = createContext(superStep, taskInfo, jobID); } else { v13 = null; } typedSink.executeOnCollections(input, v13, executionConfig); }
3.26
flink_UnorderedStreamElementQueue_emitCompleted_rdh
/** * Pops one completed elements into the given output. Because an input element may produce * an arbitrary number of output elements, there is no correlation between the size of the * collection and the popped elements. * * @return the number of popped input elements. */ int emitCompleted(TimestampedCollector<OUT> output) { final StreamElementQueueEntry<OUT> v11 = completedElements.poll(); if (v11 == null) { return 0; } v11.emitResult(output); return 1; }
3.26
flink_UnorderedStreamElementQueue_hasCompleted_rdh
/** * True if there is at least one completed elements, such that {@link #emitCompleted(TimestampedCollector)} will actually output an element. */ boolean hasCompleted() { return !completedElements.isEmpty(); }
3.26
flink_UnorderedStreamElementQueue_completed_rdh
/** * Signals that an entry finished computation. */ void completed(StreamElementQueueEntry<OUT> elementQueueEntry) { // adding only to completed queue if not completed before // there may be a real result coming after a timeout result, which is updated in the // queue entry but // the entry is not re-added to the complete queue if (incompleteElements.remove(elementQueueEntry)) { completedElements.add(elementQueueEntry); } }
3.26
flink_UnorderedStreamElementQueue_isEmpty_rdh
/** * True if there are no incomplete elements and all complete elements have been consumed. */ boolean isEmpty() { return incompleteElements.isEmpty() && completedElements.isEmpty(); }
3.26
flink_SimpleTypeSerializerSnapshot_getCurrentVersion_rdh
// ------------------------------------------------------------------------ // Serializer Snapshot Methods // ------------------------------------------------------------------------ @Override public int getCurrentVersion() { return CURRENT_VERSION; }
3.26
flink_SimpleTypeSerializerSnapshot_equals_rdh
// ------------------------------------------------------------------------ // standard utilities // ------------------------------------------------------------------------ @Override public final boolean equals(Object obj) { return (obj != null) && (obj.getClass() == getClass()); }
3.26
flink_DefaultConfigurableOptionsFactory_getUseDynamicLevelSize_rdh
// -------------------------------------------------------------------------- // Whether to configure RocksDB to pick target size of each level dynamically. // -------------------------------------------------------------------------- private boolean getUseDynamicLevelSize() { return getInternal(USE_DYNAMIC_LEVEL_SIZE.key()).compareToIgnoreCase("false") != 0; }
3.26
flink_DefaultConfigurableOptionsFactory_setLogDir_rdh
/** * The directory for RocksDB's logging files. * * @param logDir * If empty, log files will be in the same directory as data files<br> * If non-empty, this directory will be used and the data directory's absolute path will be * used as the prefix of the log file name. * @return this options factory */ public DefaultConfigurableOptionsFactory setLogDir(String logDir) { Preconditions.checkArgument(new File(logDir).isAbsolute(), ("Invalid configuration: " + logDir) + " does not point to an absolute path."); setInternal(LOG_DIR.key(), logDir); return this; }
3.26
flink_DefaultConfigurableOptionsFactory_getUseBloomFilter_rdh
// -------------------------------------------------------------------------- // Filter policy in RocksDB // -------------------------------------------------------------------------- private boolean getUseBloomFilter() { return Boolean.parseBoolean(getInternal(USE_BLOOM_FILTER.key())); }
3.26
flink_DefaultConfigurableOptionsFactory_getMaxOpenFiles_rdh
// -------------------------------------------------------------------------- private int getMaxOpenFiles() { return Integer.parseInt(getInternal(MAX_OPEN_FILES.key())); }
3.26
flink_DefaultConfigurableOptionsFactory_getMetadataBlockSize_rdh
// -------------------------------------------------------------------------- // Approximate size of partitioned metadata packed per block. // Currently applied to indexes block when partitioned index/filters option is enabled. // -------------------------------------------------------------------------- private long getMetadataBlockSize() { return MemorySize.parseBytes(getInternal(METADATA_BLOCK_SIZE.key()));}
3.26
flink_DefaultConfigurableOptionsFactory_m0_rdh
// -------------------------------------------------------------------------- // The target file size for compaction, i.e., the per-file size for level-1 // -------------------------------------------------------------------------- private long m0() { return MemorySize.parseBytes(getInternal(TARGET_FILE_SIZE_BASE.key())); }
3.26
flink_DefaultConfigurableOptionsFactory_getBlockSize_rdh
// -------------------------------------------------------------------------- // Approximate size of user data packed per block. Note that the block size // specified here corresponds to uncompressed data. The actual size of the // unit read from disk may be smaller if compression is enabled // -------------------------------------------------------------------------- private long getBlockSize() { return MemorySize.parseBytes(getInternal(BLOCK_SIZE.key()));}
3.26
flink_DefaultConfigurableOptionsFactory_getCompactionStyle_rdh
// -------------------------------------------------------------------------- // The style of compaction for DB. // -------------------------------------------------------------------------- private CompactionStyle getCompactionStyle() { return CompactionStyle.valueOf(getInternal(COMPACTION_STYLE.key()).toUpperCase()); }
3.26
flink_DefaultConfigurableOptionsFactory_setInternal_rdh
/** * Sets the configuration with (key, value) if the key is predefined, otherwise throws * IllegalArgumentException. * * @param key * The configuration key, if key is not predefined, throws IllegalArgumentException * out. * @param value * The configuration value. */ private void setInternal(String key, String value) { Preconditions.checkArgument((value != null) && (!value.isEmpty()), "The configuration value must not be empty."); configuredOptions.put(key, value); }
3.26
flink_DefaultConfigurableOptionsFactory_getMinWriteBufferNumberToMerge_rdh
// -------------------------------------------------------------------------- // The minimum number that will be merged together before writing to storage // -------------------------------------------------------------------------- private int getMinWriteBufferNumberToMerge() { return Integer.parseInt(getInternal(MIN_WRITE_BUFFER_NUMBER_TO_MERGE.key())); }
3.26
flink_DefaultConfigurableOptionsFactory_getWriteBufferSize_rdh
// -------------------------------------------------------------------------- // Amount of data to build up in memory (backed by an unsorted log on disk) // before converting to a sorted on-disk file. Larger values increase // performance, especially during bulk loads. // -------------------------------------------------------------------------- private long getWriteBufferSize() { return MemorySize.parseBytes(getInternal(WRITE_BUFFER_SIZE.key())); }
3.26
flink_DefaultConfigurableOptionsFactory_setMaxLogFileSize_rdh
/** * The maximum size of RocksDB's file used for logging. * * <p>If the log files becomes larger than this, a new file will be created. If 0, all logs will * be written to one log file. * * @param maxLogFileSize * max file size limit * @return this options factory */ public DefaultConfigurableOptionsFactory setMaxLogFileSize(String maxLogFileSize) { Preconditions.checkArgument(MemorySize.parseBytes(maxLogFileSize) >= 0, ("Invalid configuration " + maxLogFileSize) + " for max log file size."); setInternal(LOG_MAX_FILE_SIZE.key(), maxLogFileSize); return this; }
3.26
flink_DefaultConfigurableOptionsFactory_checkArgumentValid_rdh
/** * Helper method to check whether the (key,value) is valid through given configuration and * returns the formatted value. * * @param option * The configuration key which is configurable in {@link RocksDBConfigurableOptions}. * @param value * The value within given configuration. */ private static void checkArgumentValid(ConfigOption<?> option, Object value) { final String key = option.key(); if (POSITIVE_INT_CONFIG_SET.contains(option)) { Preconditions.checkArgument(((Integer) (value)) > 0, ("Configured value for key: " + key) + " must be larger than 0."); } else if (SIZE_CONFIG_SET.contains(option)) { Preconditions.checkArgument(((MemorySize) (value)).getBytes() > 0, ("Configured size for key" + key) + " must be larger than 0."); } else if (LOG_MAX_FILE_SIZE.equals(option)) { Preconditions.checkArgument(((MemorySize) (value)).getBytes() >= 0, ("Configured size for key " + key) + " must be larger than or equal to 0."); } else if (LOG_DIR.equals(option)) { Preconditions.checkArgument(new File(((String) (value))).isAbsolute(), ("Configured path for key " + key) + " is not absolute."); } }
3.26