name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_InputTypeStrategies_comparable_rdh
/** * Strategy that checks all types are comparable with each other. Requires at least one * argument. */ public static InputTypeStrategy comparable(ConstantArgumentCount argumentCount, StructuredComparison requiredComparison) { return new ComparableTypeStrategy(argumentCount, requiredComparison); }
3.26
flink_FloatWriter_forRow_rdh
/** * {@link ArrowFieldWriter} for Float. */ @Internal
3.26
flink_DefaultVertexParallelismAndInputInfosDecider_decideParallelismAndEvenlyDistributeData_rdh
/** * Decide parallelism and input infos, which will make the data be evenly distributed to * downstream subtasks, such that different downstream subtasks consume roughly the same amount * of data. * * @param jobVertexId * The job vertex id * @param consumedResults * The information of consumed blocking results * @param initialParallelism * The initial parallelism of the job vertex * @param minParallelism * the min parallelism * @param maxParallelism * the max parallelism * @return the parallelism and vertex input infos */ private ParallelismAndInputInfos decideParallelismAndEvenlyDistributeData(JobVertexID jobVertexId, List<BlockingResultInfo> consumedResults, int initialParallelism, int minParallelism, int maxParallelism) { checkArgument(initialParallelism == ExecutionConfig.PARALLELISM_DEFAULT); checkArgument(!consumedResults.isEmpty()); consumedResults.forEach(resultInfo -> checkState(!resultInfo.isPointwise())); // Considering that the sizes of broadcast results are usually very small, we compute the // parallelism and input infos only based on sizes of non-broadcast results final List<BlockingResultInfo> nonBroadcastResults = getNonBroadcastResultInfos(consumedResults); int subpartitionNum = checkAndGetSubpartitionNum(nonBroadcastResults); long[] bytesBySubpartition = new long[subpartitionNum]; Arrays.fill(bytesBySubpartition, 0L); for (BlockingResultInfo resultInfo : nonBroadcastResults) { List<Long> subpartitionBytes = ((AllToAllBlockingResultInfo) (resultInfo)).getAggregatedSubpartitionBytes(); for (int i = 0; i < subpartitionNum; ++i) { bytesBySubpartition[i] += subpartitionBytes.get(i); } } int maxNumPartitions = getMaxNumPartitions(nonBroadcastResults); int maxRangeSize = MAX_NUM_SUBPARTITIONS_PER_TASK_CONSUME / maxNumPartitions; // compute subpartition ranges List<IndexRange> subpartitionRanges = computeSubpartitionRanges(bytesBySubpartition, f0, maxRangeSize); // if the parallelism is not legal, adjust to a legal parallelism if (!isLegalParallelism(subpartitionRanges.size(), minParallelism, maxParallelism)) { Optional<List<IndexRange>> v17 = adjustToClosestLegalParallelism(f0, subpartitionRanges.size(), minParallelism, maxParallelism, Arrays.stream(bytesBySubpartition).min().getAsLong(), Arrays.stream(bytesBySubpartition).sum(), limit -> computeParallelism(bytesBySubpartition, limit, maxRangeSize), limit -> computeSubpartitionRanges(bytesBySubpartition, limit, maxRangeSize));if (!v17.isPresent()) { // can't find any legal parallelism, fall back to evenly distribute subpartitions LOG.info("Cannot find a legal parallelism to evenly distribute data for job vertex {}. " + "Fall back to compute a parallelism that can evenly distribute subpartitions.", jobVertexId); return decideParallelismAndEvenlyDistributeSubpartitions(jobVertexId, consumedResults, initialParallelism, minParallelism, maxParallelism); } subpartitionRanges = v17.get(); } checkState(isLegalParallelism(subpartitionRanges.size(), minParallelism, maxParallelism)); return createParallelismAndInputInfos(consumedResults, subpartitionRanges); }
3.26
flink_DefaultVertexParallelismAndInputInfosDecider_decideParallelismAndEvenlyDistributeSubpartitions_rdh
/** * Decide parallelism and input infos, which will make the subpartitions be evenly distributed * to downstream subtasks, such that different downstream subtasks consume roughly the same * number of subpartitions. * * @param jobVertexId * The job vertex id * @param consumedResults * The information of consumed blocking results * @param initialParallelism * The initial parallelism of the job vertex * @param minParallelism * the min parallelism * @param maxParallelism * the max parallelism * @return the parallelism and vertex input infos */ private ParallelismAndInputInfos decideParallelismAndEvenlyDistributeSubpartitions(JobVertexID jobVertexId, List<BlockingResultInfo> consumedResults, int initialParallelism, int minParallelism, int maxParallelism) { checkArgument(!consumedResults.isEmpty()); int parallelism = (initialParallelism > 0) ? initialParallelism : decideParallelism(jobVertexId, consumedResults, minParallelism, maxParallelism); return new ParallelismAndInputInfos(parallelism, VertexInputInfoComputationUtils.computeVertexInputInfos(parallelism, consumedResults, true)); }
3.26
flink_NettyConfig_getServerConnectBacklog_rdh
// ------------------------------------------------------------------------ // Getters // ------------------------------------------------------------------------ public int getServerConnectBacklog() { return config.getInteger(NettyShuffleEnvironmentOptions.CONNECT_BACKLOG); }
3.26
flink_DecimalDataUtils_sign_rdh
/** * SQL <code>SIGN</code> operator applied to BigDecimal values. preserve precision and scale. */ public static DecimalData sign(DecimalData b0) { if (b0.isCompact()) { return new DecimalData(b0.precision, b0.scale, signum(b0) * POW10[b0.scale], null); } else { return fromBigDecimal(BigDecimal.valueOf(signum(b0)), b0.precision, b0.scale); } }
3.26
flink_DecimalDataUtils_signum_rdh
/** * Returns the signum function of this decimal. (The return value is -1 if this decimal is * negative; 0 if this decimal is zero; and 1 if this decimal is positive.) * * @return the signum function of this decimal. */ public static int signum(DecimalData decimal) { if (decimal.isCompact()) { return Long.signum(decimal.toUnscaledLong());} else { return decimal.toBigDecimal().signum(); } }
3.26
flink_DecimalDataUtils_floor_rdh
// floor()/ceil() preserve precision, but set scale to 0. // note that result may exceed the original precision. public static DecimalData floor(DecimalData decimal) { BigDecimal bd = decimal.toBigDecimal().setScale(0, RoundingMode.FLOOR); return fromBigDecimal(bd, bd.precision(), 0); }
3.26
flink_DecimalDataUtils_sround_rdh
/** * SQL <code>ROUND</code> operator applied to BigDecimal values. */ public static DecimalData sround(DecimalData b0, int r) { if (r >= b0.scale) { return b0; }BigDecimal b2 = b0.toBigDecimal().movePointRight(r).setScale(0, RoundingMode.HALF_UP).movePointLeft(r); int v14 = b0.precision; int s = b0.scale; if (r < 0) { return fromBigDecimal(b2, Math.min(38, (1 + v14) - s), 0); } else { // 0 <= r < s return fromBigDecimal(b2, ((1 + v14) - s) + r, r); } }
3.26
flink_DecimalDataUtils_divideToIntegralValue_rdh
/** * Returns a {@code DecimalData} whose value is the integer part of the quotient {@code (this / * divisor)} rounded down. * * @param value * value by which this {@code DecimalData} is to be divided. * @param divisor * value by which this {@code DecimalData} is to be divided. * @return The integer part of {@code this / divisor}. * @throws ArithmeticException * if {@code divisor==0} */ public static DecimalData divideToIntegralValue(DecimalData value, DecimalData divisor, int precision, int scale) { BigDecimal bd = value.toBigDecimal().divideToIntegralValue(divisor.toBigDecimal()); return fromBigDecimal(bd, precision, scale); }
3.26
flink_DecimalDataUtils_castToIntegral_rdh
// cast decimal to integral or floating data types, by SQL standard. // to cast to integer, rounding-DOWN is performed, and overflow will just return null. // to cast to floats, overflow will not happen, because precision<=38. public static long castToIntegral(DecimalData dec) { BigDecimal bd = dec.toBigDecimal(); // rounding down. This is consistent with float=>int, // and consistent with SQLServer, Spark. bd = bd.setScale(0, RoundingMode.DOWN); return bd.longValue(); }
3.26
flink_RuntimeRestAPIDocGenerator_main_rdh
/** * Generates the Runtime REST API documentation. * * @param args * args[0] contains the directory into which the generated files are placed * @throws IOException * if any file operation failed */ public static void main(String[] args) throws IOException, ConfigurationException { String outputDirectory = args[0]; for (final RuntimeRestAPIVersion apiVersion : RuntimeRestAPIVersion.values()) { if (apiVersion == RuntimeRestAPIVersion.V0) { // this version exists only for testing purposes continue; } createHtmlFile(new DocumentingDispatcherRestEndpoint(), apiVersion, Paths.get(outputDirectory, ("rest_" + apiVersion.getURLVersionPrefix()) + "_dispatcher.html"));} }
3.26
flink_SqlAlterViewPropertiesConverter_convertSqlNode_rdh
/** * A converter for {@link SqlAlterViewProperties}. */public class SqlAlterViewPropertiesConverter implements SqlNodeConverter<SqlAlterViewProperties> { @Override public Operation convertSqlNode(SqlAlterViewProperties alterView, ConvertContext context) { CatalogView oldView = validateAlterView(alterView, context); ObjectIdentifier viewIdentifier = context.getCatalogManager().qualifyIdentifier(UnresolvedIdentifier.of(alterView.fullViewName())); Map<String, String> newOptions = new HashMap<>(oldView.getOptions()); newOptions.putAll(OperationConverterUtils.extractProperties(alterView.getPropertyList())); CatalogView newView = CatalogView.of(oldView.getUnresolvedSchema(), oldView.getComment(), oldView.getOriginalQuery(), oldView.getExpandedQuery(), newOptions); return new AlterViewPropertiesOperation(viewIdentifier, newView); }
3.26
flink_FixedLengthByteKeyComparator_supportsSerializationWithKeyNormalization_rdh
// -------------------------------------------------------------------------------------------- // unsupported normalization // -------------------------------------------------------------------------------------------- @Override public boolean supportsSerializationWithKeyNormalization() { return false; }
3.26
flink_HiveParserStorageFormat_fillStorageFormat_rdh
/** * Returns true if the passed token was a storage format token and thus was processed * accordingly. */ public boolean fillStorageFormat(HiveParserASTNode child) throws SemanticException { switch (child.getToken().getType()) { case HiveASTParser.TOK_TABLEFILEFORMAT : if (child.getChildCount() < 2) { throw new SemanticException("Incomplete specification of File Format. " + "You must provide InputFormat, OutputFormat."); } inputFormat = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText()); outputFormat = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(1).getText()); if (child.getChildCount() == 3) { serde = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(2).getText()); } break;case HiveASTParser.TOK_STORAGEHANDLER : storageHandler = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText()); if (child.getChildCount() == 2) { HiveParserBaseSemanticAnalyzer.readProps(((HiveParserASTNode) (child.getChild(1).getChild(0))), serdeProps); } break; case HiveASTParser.TOK_FILEFORMAT_GENERIC : HiveParserASTNode grandChild = ((HiveParserASTNode) (child.getChild(0))); genericName = (grandChild == null ? "" : grandChild.getText()).trim().toUpperCase(); processStorageFormat(genericName); break; default : // token was not a storage format token return false; } return true;}
3.26
flink_GroupCombineOperator_translateSelectorFunctionReducer_rdh
// -------------------------------------------------------------------------------------------- @SuppressWarnings("unchecked") private static <IN, OUT, K> PlanUnwrappingGroupCombineOperator<IN, OUT, K> translateSelectorFunctionReducer(SelectorFunctionKeys<IN, ?> rawKeys, GroupCombineFunction<IN, OUT> function, TypeInformation<OUT> outputType, String name, Operator<IN> input) { final SelectorFunctionKeys<IN, K> keys = ((SelectorFunctionKeys<IN, K>) (rawKeys)); TypeInformation<Tuple2<K, IN>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys);Operator<Tuple2<K, IN>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys); PlanUnwrappingGroupCombineOperator<IN, OUT, K> reducer = new PlanUnwrappingGroupCombineOperator<>(function, keys, name, outputType, typeInfoWithKey);reducer.setInput(keyedInput); return reducer; }
3.26
flink_GroupCombineOperator_translateToDataFlow_rdh
// -------------------------------------------------------------------------------------------- // Translation // -------------------------------------------------------------------------------------------- @Override protected GroupCombineOperatorBase<?, OUT, ?> translateToDataFlow(Operator<IN> input) { String v2 = (getName() != null) ? getName() : "GroupCombine at " + defaultName; // distinguish between grouped reduce and non-grouped reduce if (grouper == null) { // non grouped reduce UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType()); GroupCombineOperatorBase<IN, OUT, GroupCombineFunction<IN, OUT>> po = new GroupCombineOperatorBase<>(function, operatorInfo, new int[0], v2); po.setInput(input); // the parallelism for a non grouped reduce can only be 1 po.setParallelism(1); return po; } if (grouper.getKeys() instanceof SelectorFunctionKeys) { @SuppressWarnings("unchecked") SelectorFunctionKeys<IN, ?> selectorKeys = ((SelectorFunctionKeys<IN, ?>) (grouper.getKeys())); if (grouper instanceof SortedGrouping) { SortedGrouping<IN> sortedGrouping = ((SortedGrouping<IN>) (grouper)); SelectorFunctionKeys<IN, ?> sortKeys = sortedGrouping.getSortSelectionFunctionKey(); Ordering v8 = sortedGrouping.getGroupOrdering(); PlanUnwrappingSortedGroupCombineOperator<IN, OUT, ?, ?> po = translateSelectorFunctionSortedReducer(selectorKeys, sortKeys, v8, function, getResultType(), v2, input); po.setParallelism(this.getParallelism()); return po; } else { PlanUnwrappingGroupCombineOperator<IN, OUT, ?> po = translateSelectorFunctionReducer(selectorKeys, function, getResultType(), v2, input); po.setParallelism(this.getParallelism()); return po; } } else if (grouper.getKeys() instanceof Keys.ExpressionKeys) { int[] logicalKeyPositions = grouper.getKeys().computeLogicalKeyPositions(); UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType()); GroupCombineOperatorBase<IN, OUT, GroupCombineFunction<IN, OUT>> po = new GroupCombineOperatorBase<>(function, operatorInfo, logicalKeyPositions, v2); po.setInput(input); po.setParallelism(getParallelism()); // set group order if (grouper instanceof SortedGrouping) { SortedGrouping<IN> sortedGrouper = ((SortedGrouping<IN>) (grouper)); int[] sortKeyPositions = sortedGrouper.getGroupSortKeyPositions(); Order[] sortOrders = sortedGrouper.getGroupSortOrders(); Ordering o = new Ordering(); for (int i = 0; i < sortKeyPositions.length; i++) { o.appendOrdering(sortKeyPositions[i], null, sortOrders[i]); } po.setGroupOrder(o); } return po; } else { throw new UnsupportedOperationException("Unrecognized key type."); } }
3.26
flink_RocksDBIncrementalCheckpointUtils_beforeThePrefixBytes_rdh
/** * check whether the bytes is before prefixBytes in the character order. */ public static boolean beforeThePrefixBytes(@Nonnull byte[] bytes, @Nonnull byte[] prefixBytes) { final int prefixLength = prefixBytes.length; for (int i = 0; i < prefixLength; ++i) { int r = ((char) (prefixBytes[i])) - ((char) (bytes[i])); if (r != 0) { return r > 0; } } return false; }
3.26
flink_RocksDBIncrementalCheckpointUtils_stateHandleEvaluator_rdh
/** * Evaluates state handle's "score" regarding to the target range when choosing the best state * handle to init the initial db for recovery, if the overlap fraction is less than * overlapFractionThreshold, then just return {@code Score.MIN} to mean the handle has no chance * to be the initial handle. */ private static Score stateHandleEvaluator(KeyedStateHandle stateHandle, KeyGroupRange targetKeyGroupRange, double overlapFractionThreshold) { final KeyGroupRange handleKeyGroupRange = stateHandle.getKeyGroupRange(); final KeyGroupRange intersectGroup = handleKeyGroupRange.getIntersection(targetKeyGroupRange); final double overlapFraction = ((double) (intersectGroup.getNumberOfKeyGroups())) / handleKeyGroupRange.getNumberOfKeyGroups(); if (overlapFraction < overlapFractionThreshold) { return Score.MIN; } return new Score(intersectGroup.getNumberOfKeyGroups(), overlapFraction); }
3.26
flink_RocksDBIncrementalCheckpointUtils_clipDBWithKeyGroupRange_rdh
/** * The method to clip the db instance according to the target key group range using the {@link RocksDB#delete(ColumnFamilyHandle, byte[])}. * * @param db * the RocksDB instance to be clipped. * @param columnFamilyHandles * the column families in the db instance. * @param targetKeyGroupRange * the target key group range. * @param currentKeyGroupRange * the key group range of the db instance. * @param keyGroupPrefixBytes * Number of bytes required to prefix the key groups. */ public static void clipDBWithKeyGroupRange(@Nonnull RocksDB db, @Nonnull List<ColumnFamilyHandle> columnFamilyHandles, @Nonnull KeyGroupRange targetKeyGroupRange, @Nonnull KeyGroupRange currentKeyGroupRange, @Nonnegative int keyGroupPrefixBytes) throws RocksDBException { final byte[] beginKeyGroupBytes = new byte[keyGroupPrefixBytes]; final byte[] endKeyGroupBytes = new byte[keyGroupPrefixBytes]; if (currentKeyGroupRange.getStartKeyGroup() < targetKeyGroupRange.getStartKeyGroup()) { CompositeKeySerializationUtils.serializeKeyGroup(currentKeyGroupRange.getStartKeyGroup(), beginKeyGroupBytes); CompositeKeySerializationUtils.serializeKeyGroup(targetKeyGroupRange.getStartKeyGroup(), endKeyGroupBytes); deleteRange(db, columnFamilyHandles, beginKeyGroupBytes, endKeyGroupBytes); } if (currentKeyGroupRange.getEndKeyGroup() > targetKeyGroupRange.getEndKeyGroup()) { CompositeKeySerializationUtils.serializeKeyGroup(targetKeyGroupRange.getEndKeyGroup() + 1, beginKeyGroupBytes); CompositeKeySerializationUtils.serializeKeyGroup(currentKeyGroupRange.getEndKeyGroup() + 1, endKeyGroupBytes); deleteRange(db, columnFamilyHandles, beginKeyGroupBytes, endKeyGroupBytes); } }
3.26
flink_RocksDBIncrementalCheckpointUtils_deleteRange_rdh
/** * Delete the record falls into [beginKeyBytes, endKeyBytes) of the db. * * @param db * the target need to be clipped. * @param columnFamilyHandles * the column family need to be clipped. * @param beginKeyBytes * the begin key bytes * @param endKeyBytes * the end key bytes */ private static void deleteRange(RocksDB db, List<ColumnFamilyHandle> columnFamilyHandles, byte[] beginKeyBytes, byte[] endKeyBytes) throws RocksDBException { for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { // Using RocksDB's deleteRange will take advantage of delete // tombstones, which mark the range as deleted. // // https://github.com/ververica/frocksdb/blob/FRocksDB-6.20.3/include/rocksdb/db.h#L363-L377 db.deleteRange(columnFamilyHandle, beginKeyBytes, endKeyBytes); } }
3.26
flink_RocksDBIncrementalCheckpointUtils_chooseTheBestStateHandleForInitial_rdh
/** * Choose the best state handle according to the {@link #stateHandleEvaluator(KeyedStateHandle, * KeyGroupRange, double)} to init the initial db. * * @param restoreStateHandles * The candidate state handles. * @param targetKeyGroupRange * The target key group range. * @return The best candidate or null if no candidate was a good fit. */ @Nullablepublic static <T extends KeyedStateHandle> T chooseTheBestStateHandleForInitial(@Nonnull Collection<T> restoreStateHandles, @Nonnull KeyGroupRange targetKeyGroupRange, double overlapFractionThreshold) { T bestStateHandle = null; Score bestScore = Score.MIN; for (T rawStateHandle : restoreStateHandles) { Score handleScore = stateHandleEvaluator(rawStateHandle, targetKeyGroupRange, overlapFractionThreshold); if ((bestStateHandle == null) || (handleScore.compareTo(bestScore) > 0)) { bestStateHandle = rawStateHandle;bestScore = handleScore; } } return bestStateHandle; }
3.26
flink_CoLocationGroupImpl_getId_rdh
// -------------------------------------------------------------------------------------------- @Override public AbstractID getId() { return id; }
3.26
flink_CoLocationGroupImpl_addVertex_rdh
// -------------------------------------------------------------------------------------------- public void addVertex(JobVertex vertex) { Preconditions.checkNotNull(vertex); this.vertices.add(vertex); }
3.26
flink_StaticResultProvider_rowToInternalRow_rdh
/** * This function supports only String, long, int and boolean fields. */ @VisibleForTesting static RowData rowToInternalRow(Row row) { Object[] values = new Object[row.getArity()]; for (int i = 0; i < row.getArity(); i++) { Object value = row.getField(i); if (value == null) { values[i] = null; } else if (value instanceof String) { values[i] = StringData.fromString(((String) (value))); } else if (((value instanceof Boolean) || (value instanceof Long)) || (value instanceof Integer)) { values[i] = value; } else { throw new TableException("Cannot convert row type"); } } return GenericRowData.of(values); }
3.26
flink_NonReusingBuildFirstHashJoinIterator_open_rdh
// -------------------------------------------------------------------------------------------- @Override public void open() throws IOException, MemoryAllocationException, InterruptedException { this.hashJoin.open(this.firstInput, this.secondInput, this.buildSideOuterJoin); }
3.26
flink_BlobKey_addToMessageDigest_rdh
/** * Adds the BLOB key to the given {@link MessageDigest}. * * @param md * the message digest to add the BLOB key to */ public void addToMessageDigest(MessageDigest md) { md.update(this.key); }
3.26
flink_BlobKey_getHash_rdh
/** * Returns the hash component of this key. * * @return a 20 bit hash of the contents the key refers to */ @VisibleForTesting public byte[] getHash() { return key; }
3.26
flink_BlobKey_readFromInputStream_rdh
// -------------------------------------------------------------------------------------------- /** * Auxiliary method to read a BLOB key from an input stream. * * @param inputStream * the input stream to read the BLOB key from * @return the read BLOB key * @throws IOException * throw if an I/O error occurs while reading from the input stream */ static BlobKey readFromInputStream(InputStream inputStream) throws IOException { final byte[] key = new byte[BlobKey.SIZE]; final byte[] random = new byte[AbstractID.SIZE]; int bytesRead = 0; // read key while (bytesRead < key.length) { final int read = inputStream.read(key, bytesRead, key.length - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete BLOB key"); } bytesRead += read; } // read BLOB type final BlobType blobType; { final int v14 = inputStream.read(); if (v14 < 0) { throw new EOFException("Read an incomplete BLOB type"); } else if (v14 == TRANSIENT_BLOB.ordinal()) { blobType = TRANSIENT_BLOB; } else if (v14 == PERMANENT_BLOB.ordinal()) { blobType = PERMANENT_BLOB; } else { throw new IOException("Invalid data received for the BLOB type: " + v14); } } // read random component bytesRead = 0;while (bytesRead < AbstractID.SIZE) { final int read = inputStream.read(random, bytesRead, AbstractID.SIZE - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete BLOB key"); } bytesRead += read; } return createKey(blobType, key, random); }
3.26
flink_BlobKey_writeToOutputStream_rdh
/** * Auxiliary method to write this BLOB key to an output stream. * * @param outputStream * the output stream to write the BLOB key to * @throws IOException * thrown if an I/O error occurs while writing the BLOB key */ void writeToOutputStream(final OutputStream outputStream) throws IOException { outputStream.write(this.key); outputStream.write(this.type.ordinal()); outputStream.write(this.random.getBytes()); }
3.26
flink_BlobKey_createKey_rdh
/** * Returns the right {@link BlobKey} subclass for the given parameters. * * @param type * whether the referenced BLOB is permanent or transient * @param key * the actual key data * @param random * the random component of the key * @return BlobKey subclass */ static BlobKey createKey(BlobType type, byte[] key, byte[] random) { if (type == PERMANENT_BLOB) { return new PermanentBlobKey(key, random); } else { return new TransientBlobKey(key, random); } }
3.26
flink_JoinRecordStateViews_create_rdh
/** * Creates a {@link JoinRecordStateView} depends on {@link JoinInputSideSpec}. */ public static JoinRecordStateView create(RuntimeContext ctx, String stateName, JoinInputSideSpec inputSideSpec, InternalTypeInfo<RowData> recordType, long retentionTime) { StateTtlConfig ttlConfig = createTtlConfig(retentionTime); if (inputSideSpec.hasUniqueKey()) { if (inputSideSpec.joinKeyContainsUniqueKey()) { return new JoinKeyContainsUniqueKey(ctx, stateName, recordType, ttlConfig); } else { return new InputSideHasUniqueKey(ctx, stateName, recordType, inputSideSpec.getUniqueKeyType(), inputSideSpec.getUniqueKeySelector(), ttlConfig); } } else { return new InputSideHasNoUniqueKey(ctx, stateName, recordType, ttlConfig); } }
3.26
flink_Over_orderBy_rdh
/** * Specifies the time attribute on which rows are ordered. * * <p>For streaming tables, reference a rowtime or proctime time attribute here to specify the * time mode. * * <p>For batch tables, refer to a timestamp or long attribute. * * @param orderBy * field reference * @return an over window with defined order */ public static OverWindowPartitionedOrdered orderBy(Expression orderBy) { return partitionBy().orderBy(orderBy); }
3.26
flink_Over_partitionBy_rdh
/** * Partitions the elements on some partition keys. * * <p>Each partition is individually sorted and aggregate functions are applied to each * partition separately. * * @param partitionBy * list of field references * @return an over window with defined partitioning */ public static OverWindowPartitioned partitionBy(Expression... partitionBy) { return new OverWindowPartitioned(Arrays.asList(partitionBy)); }
3.26
flink_Client_shutdown_rdh
/** * Shuts down the client and closes all connections. * * <p>After a call to this method, all returned futures will be failed. * * @return A {@link CompletableFuture} that will be completed when the shutdown process is done. */ public CompletableFuture<Void> shutdown() { final CompletableFuture<Void> newShutdownFuture = new CompletableFuture<>(); if (clientShutdownFuture.compareAndSet(null, newShutdownFuture)) { final List<CompletableFuture<Void>> connectionFutures = new ArrayList<>(); for (Map.Entry<InetSocketAddress, ServerConnection<REQ, RESP>> conn : connections.entrySet()) { if (connections.remove(conn.getKey(), conn.getValue())) { connectionFutures.add(conn.getValue().close()); } } CompletableFuture.allOf(connectionFutures.toArray(new CompletableFuture<?>[connectionFutures.size()])).whenComplete((result, throwable) -> { if (throwable != null) { LOG.warn("Problem while shutting down the connections at the {}: {}", clientName, throwable); } if (bootstrap != null) { EventLoopGroup group = bootstrap.config().group(); if ((group != null) && (!group.isShutdown())) { group.shutdownGracefully(0L, 0L, TimeUnit.MILLISECONDS).addListener(finished -> { if (finished.isSuccess()) { newShutdownFuture.complete(null); } else { newShutdownFuture.completeExceptionally(finished.cause()); } }); } else { newShutdownFuture.complete(null); } } else {newShutdownFuture.complete(null); }}); return newShutdownFuture; } return clientShutdownFuture.get(); }
3.26
flink_TaskInfo_m0_rdh
/** * Gets the parallelism with which the parallel task runs. * * @return The parallelism with which the parallel task runs. */ public int m0() { return this.numberOfParallelSubtasks; }
3.26
flink_TaskInfo_m1_rdh
/** * Returns the name of the task, appended with the subtask indicator, such as "MyTask (3/6)#1", * where 3 would be ({@link #getIndexOfThisSubtask()} + 1), and 6 would be {@link #getNumberOfParallelSubtasks()}, and 1 would be {@link #getAttemptNumber()}. * * @return The name of the task, with subtask indicator. */ public String m1() { return this.taskNameWithSubtasks; }
3.26
flink_TaskInfo_getIndexOfThisSubtask_rdh
/** * Gets the number of this parallel subtask. The numbering starts from 0 and goes up to * parallelism-1 (parallelism as returned by {@link #getNumberOfParallelSubtasks()}). * * @return The index of the parallel subtask. */ public int getIndexOfThisSubtask() { return this.indexOfSubtask; }
3.26
flink_AbstractMergeIterator_crossFirst1withNValues_rdh
/** * Crosses a single value from the first input with N values, all sharing a common key. * Effectively realizes a <i>1:N</i> join. * * @param val1 * The value form the <i>1</i> side. * @param firstValN * The first of the values from the <i>N</i> side. * @param valsN * Iterator over remaining <i>N</i> side values. * @throws Exception * Forwards all exceptions thrown by the stub. */ private void crossFirst1withNValues(final T1 val1, final T2 firstValN, final Iterator<T2> valsN, final FlatJoinFunction<T1, T2, O> joinFunction, final Collector<O> collector) throws Exception { T1 copy1 = createCopy(serializer1, val1, this.copy1); joinFunction.join(copy1, firstValN, collector); // set copy and join first element boolean more = true; do { final T2 nRec = valsN.next(); if (valsN.hasNext()) { copy1 = createCopy(serializer1, val1, this.copy1); joinFunction.join(copy1, nRec, collector); } else { joinFunction.join(val1, nRec, collector); more = false; } } while (more ); }
3.26
flink_AbstractMergeIterator_crossSecond1withNValues_rdh
/** * Crosses a single value from the second side with N values, all sharing a common key. * Effectively realizes a <i>N:1</i> join. * * @param val1 * The value form the <i>1</i> side. * @param firstValN * The first of the values from the <i>N</i> side. * @param valsN * Iterator over remaining <i>N</i> side values. * @throws Exception * Forwards all exceptions thrown by the stub. */ private void crossSecond1withNValues(T2 val1, T1 firstValN, Iterator<T1> valsN, FlatJoinFunction<T1, T2, O> joinFunction, Collector<O> collector) throws Exception { T2 copy2 = createCopy(serializer2, val1, this.copy2); joinFunction.join(firstValN, copy2, collector); // set copy and join first element boolean more = true; do { final T1 nRec = valsN.next(); if (valsN.hasNext()) { copy2 = createCopy(serializer2, val1, this.copy2); joinFunction.join(nRec, copy2, collector); } else { joinFunction.join(nRec, val1, collector); more = false; } } while (more ); }
3.26
flink_StreamTaskActionExecutor_synchronizedExecutor_rdh
/** * Returns an ExecutionDecorator that synchronizes each invocation on a given object. */ static SynchronizedStreamTaskActionExecutor synchronizedExecutor(Object mutex) { return new SynchronizedStreamTaskActionExecutor(mutex); } /** * A {@link StreamTaskActionExecutor} that synchronizes every operation on the provided mutex. * * @deprecated this class should only be used in {@link SourceStreamTask}
3.26
flink_OptimizableHashSet_arraySize_rdh
/** * Returns the least power of two smaller than or equal to 2<sup>30</sup> and larger than or * equal to <code>Math.ceil( expected / f )</code>. * * @param expected * the expected number of elements in a hash table. * @param f * the load factor. * @return the minimum possible size for a backing array. * @throws IllegalArgumentException * if the necessary size is larger than 2<sup>30</sup>. */ public static int arraySize(int expected, float f) { long s = Math.max(2L, nextPowerOfTwo(((long) (Math.ceil(((double) (((float) (expected)) / f))))))); if (s > ((Integer.MAX_VALUE / 2) + 1)) { throw new IllegalArgumentException(((("Too large (" + expected) + " expected elements with load factor ") + f) + ")"); } else { return ((int) (s)); } }
3.26
flink_OptimizableHashSet_m0_rdh
/** * Is there a null key. */ public boolean m0() { return containsNull; }
3.26
flink_NullValueComparator_supportsSerializationWithKeyNormalization_rdh
// -------------------------------------------------------------------------------------------- // unsupported normalization // -------------------------------------------------------------------------------------------- @Override public boolean supportsSerializationWithKeyNormalization() { return false; }
3.26
flink_RequestedGlobalProperties_filterBySemanticProperties_rdh
/** * Filters these properties by what can be preserved by the given SemanticProperties when * propagated down to the given input. * * @param props * The SemanticProperties which define which fields are preserved. * @param input * The index of the operator's input. * @return The filtered RequestedGlobalProperties */ public RequestedGlobalProperties filterBySemanticProperties(SemanticProperties props, int input) { // no semantic properties available. All global properties are filtered. if (props == null) {throw new NullPointerException("SemanticProperties may not be null."); } RequestedGlobalProperties rgProp = new RequestedGlobalProperties(); switch (this.partitioning) { case FULL_REPLICATION : case FORCED_REBALANCED : case CUSTOM_PARTITIONING : case RANDOM_PARTITIONED : case ANY_DISTRIBUTION : // make sure that certain properties are not pushed down return null;case HASH_PARTITIONED : case ANY_PARTITIONING : FieldSet newFields; if (this.partitioningFields instanceof FieldList) { newFields = new FieldList(); } else {newFields = new FieldSet(); } for (Integer v2 : this.partitioningFields) { int sourceField = props.getForwardingSourceField(input, v2); if (sourceField >= 0) { newFields = newFields.addField(sourceField); } else { // partial partitionings are not preserved to avoid skewed partitioning return null; } } rgProp.partitioning = this.partitioning; rgProp.partitioningFields = newFields; return rgProp; case RANGE_PARTITIONED : // range partitioning Ordering newOrdering = new Ordering(); for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {int value = this.ordering.getInvolvedIndexes().get(i); int sourceField = props.getForwardingSourceField(input, value); if (sourceField >= 0) { newOrdering.appendOrdering(sourceField, this.ordering.getType(i), this.ordering.getOrder(i)); } else { return null; } } rgProp.partitioning = this.partitioning; rgProp.ordering = newOrdering; rgProp.dataDistribution = this.dataDistribution; return rgProp; default :throw new RuntimeException("Unknown partitioning type encountered."); }}
3.26
flink_RequestedGlobalProperties_reset_rdh
/** * This method resets the properties to a state where no properties are given. */ public void reset() { this.partitioning = PartitioningProperty.RANDOM_PARTITIONED; this.ordering = null; this.partitioningFields = null; this.dataDistribution = null; this.customPartitioner = null; }
3.26
flink_RequestedGlobalProperties_hashCode_rdh
// ------------------------------------------------------------------------ @Override public int hashCode() { final int prime = 31; int result = 1; result = (prime * result) + (partitioning == null ? 0 : partitioning.ordinal()); result = (prime * result) + (partitioningFields == null ? 0 : partitioningFields.hashCode()); result = (prime * result) + (ordering == null ? 0 : ordering.hashCode()); return result; }
3.26
flink_RequestedGlobalProperties_isMetBy_rdh
/** * Checks, if this set of interesting properties, is met by the given produced properties. * * @param props * The properties for which to check whether they meet these properties. * @return True, if the properties are met, false otherwise. */ public boolean isMetBy(GlobalProperties props) { if (this.partitioning == PartitioningProperty.ANY_DISTRIBUTION) { return true; } else if (this.partitioning == PartitioningProperty.FULL_REPLICATION) { return props.isFullyReplicated(); } else if (props.isFullyReplicated()) { return false; } else if (this.partitioning == PartitioningProperty.RANDOM_PARTITIONED) { return true; } else if (this.partitioning == PartitioningProperty.ANY_PARTITIONING) {return checkCompatiblePartitioningFields(props); } else if (this.partitioning == PartitioningProperty.HASH_PARTITIONED) { return (props.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) && checkCompatiblePartitioningFields(props); } else if (this.partitioning == PartitioningProperty.RANGE_PARTITIONED) { return (props.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED) && props.matchesOrderedPartitioning(this.ordering); } else if (this.partitioning == PartitioningProperty.FORCED_REBALANCED) { return props.getPartitioning() == PartitioningProperty.FORCED_REBALANCED; } else if (this.partitioning == PartitioningProperty.CUSTOM_PARTITIONING) { return ((props.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING) && checkCompatiblePartitioningFields(props)) && props.getCustomPartitioner().equals(this.customPartitioner); } else { throw new CompilerException("Properties matching logic leaves open cases."); } }
3.26
flink_RequestedGlobalProperties_getCustomPartitioner_rdh
/** * Gets the custom partitioner associated with these properties. * * @return The custom partitioner associated with these properties. */ public Partitioner<?> getCustomPartitioner() { return customPartitioner; }
3.26
flink_RequestedGlobalProperties_isTrivial_rdh
/** * Checks, if the properties in this object are trivial, i.e. only standard values. */ public boolean isTrivial() { return (this.partitioning == null) || (this.partitioning == PartitioningProperty.RANDOM_PARTITIONED);}
3.26
flink_RequestedGlobalProperties_setHashPartitioned_rdh
// -------------------------------------------------------------------------------------------- /** * Sets these properties to request a hash partitioning on the given fields. * * <p>If the fields are provided as {@link FieldSet}, then any permutation of the fields is a * valid partitioning, including subsets. If the fields are given as a {@link FieldList}, then * only an exact partitioning on the fields matches this requested partitioning. * * @param partitionedFields * The key fields for the partitioning. */ public void setHashPartitioned(FieldSet partitionedFields) { if (partitionedFields == null) { throw new NullPointerException(); } this.partitioning = PartitioningProperty.HASH_PARTITIONED; this.partitioningFields = partitionedFields; this.ordering = null; }
3.26
flink_RequestedGlobalProperties_setCustomPartitioned_rdh
/** * Sets these properties to request a custom partitioning with the given {@link Partitioner} * instance. * * <p>If the fields are provided as {@link FieldSet}, then any permutation of the fields is a * valid partitioning, including subsets. If the fields are given as a {@link FieldList}, then * only an exact partitioning on the fields matches this requested partitioning. * * @param partitionedFields * The key fields for the partitioning. */ public void setCustomPartitioned(FieldSet partitionedFields, Partitioner<?> partitioner) {if ((partitionedFields == null) || (partitioner == null)) { throw new NullPointerException(); } this.partitioning = PartitioningProperty.CUSTOM_PARTITIONING; this.partitioningFields = partitionedFields; this.ordering = null; this.customPartitioner = partitioner; }
3.26
flink_RequestedGlobalProperties_setAnyPartitioning_rdh
/** * Sets these properties to request some partitioning on the given fields. This will allow both * hash partitioning and range partitioning to match. * * <p>If the fields are provided as {@link FieldSet}, then any permutation of the fields is a * valid partitioning, including subsets. If the fields are given as a {@link FieldList}, then * only an exact partitioning on the fields matches this requested partitioning. * * @param partitionedFields * The key fields for the partitioning. */ public void setAnyPartitioning(FieldSet partitionedFields) { if (partitionedFields == null) { throw new NullPointerException(); } this.partitioning = PartitioningProperty.ANY_PARTITIONING; this.partitioningFields = partitionedFields; this.ordering = null; }
3.26
flink_RequestedGlobalProperties_parameterizeChannel_rdh
/** * Parametrizes the ship strategy fields of a channel such that the channel produces the desired * global properties. * * @param channel * The channel to parametrize. * @param globalDopChange * Flag indicating whether the parallelism changes between sender and * receiver. * @param exchangeMode * The mode of data exchange (pipelined, always batch, batch only on * shuffle, ...) * @param breakPipeline * Indicates whether this data exchange should break pipelines (unless * pipelines are forced). */ public void parameterizeChannel(Channel channel, boolean globalDopChange, ExecutionMode exchangeMode, boolean breakPipeline) { // safety check. Fully replicated input must be preserved. if (channel.getSource().getGlobalProperties().isFullyReplicated() && (!((this.partitioning == PartitioningProperty.FULL_REPLICATION) || (this.partitioning == PartitioningProperty.ANY_DISTRIBUTION)))) { throw new CompilerException("Fully replicated input must be preserved " + "and may not be converted into another global property."); } // if we request nothing, then we need no special strategy. forward, if the number of // instances remains // the same, randomly repartition otherwise if (isTrivial() || (this.partitioning == PartitioningProperty.ANY_DISTRIBUTION)) { ShipStrategyType shipStrategy = (globalDopChange) ? ShipStrategyType.PARTITION_RANDOM : ShipStrategyType.FORWARD; DataExchangeMode em = DataExchangeMode.select(exchangeMode, shipStrategy, breakPipeline); channel.setShipStrategy(shipStrategy, em); return; } final GlobalProperties inGlobals = channel.getSource().getGlobalProperties(); // if we have no global parallelism change, check if we have already compatible global // properties if ((!globalDopChange) && isMetBy(inGlobals)) { DataExchangeMode em = DataExchangeMode.select(exchangeMode, ShipStrategyType.FORWARD, breakPipeline); channel.setShipStrategy(ShipStrategyType.FORWARD, em); return; } // if we fall through the conditions until here, we need to re-establish ShipStrategyType shipType; FieldList partitionKeys; boolean[] sortDirection; Partitioner<?> partitioner; switch (this.partitioning) { case FULL_REPLICATION : shipType = ShipStrategyType.BROADCAST; partitionKeys = null; sortDirection = null; partitioner = null; break; case ANY_PARTITIONING : case HASH_PARTITIONED : shipType = ShipStrategyType.PARTITION_HASH; partitionKeys = Utils.createOrderedFromSet(this.partitioningFields); sortDirection = null; partitioner = null; break; case RANGE_PARTITIONED : shipType = ShipStrategyType.PARTITION_RANGE; partitionKeys = this.ordering.getInvolvedIndexes(); sortDirection = this.ordering.getFieldSortDirections(); partitioner = null; if (this.dataDistribution != null) { channel.setDataDistribution(this.dataDistribution); } break; case FORCED_REBALANCED : shipType = ShipStrategyType.PARTITION_FORCED_REBALANCE; partitionKeys = null; sortDirection = null; partitioner = null; break; case CUSTOM_PARTITIONING : shipType = ShipStrategyType.PARTITION_CUSTOM; partitionKeys = Utils.createOrderedFromSet(this.partitioningFields); sortDirection = null; partitioner = this.customPartitioner; break; default : throw new CompilerException("Invalid partitioning to create through a data exchange: " + this.partitioning.name()); } DataExchangeMode exMode = DataExchangeMode.select(exchangeMode, shipType, breakPipeline); channel.setShipStrategy(shipType, partitionKeys, sortDirection, partitioner, exMode); }
3.26
flink_Tuple25_equals_rdh
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o * the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple25)) { return false; } @SuppressWarnings("rawtypes") Tuple25 tuple = ((Tuple25) (o)); if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false;} if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) { return false; } if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) { return false; } if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) { return false; } if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) { return false; } if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) { return false; } if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) { return false; } if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) { return false; } if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) { return false; } if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) { return false; } if (f20 != null ? !f20.equals(tuple.f20) : tuple.f20 != null) { return false; } if (f21 != null ? !f21.equals(tuple.f21) : tuple.f21 != null) { return false; } if (f22 != null ? !f22.equals(tuple.f22) : tuple.f22 != null) { return false; } if (f23 != null ? !f23.equals(tuple.f23) : tuple.f23 != null) { return false; } if (f24 != null ? !f24.equals(tuple.f24) : tuple.f24 != null) { return false; } return true; }
3.26
flink_Tuple25_copy_rdh
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> copy() { return new Tuple25<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19, this.f20, this.f21, this.f22, this.f23, this.f24); } /** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)}
3.26
flink_Tuple25_toString_rdh
// ------------------------------------------------------------------------------------------------- // standard utilities // ------------------------------------------------------------------------------------------------- /** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24), where the * individual fields are the value returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return ((((((((((((((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ",") + StringUtils.arrayAwareToString(this.f19)) + ",") + StringUtils.arrayAwareToString(this.f20)) + ",") + StringUtils.arrayAwareToString(this.f21)) + ",") + StringUtils.arrayAwareToString(this.f22)) + ",") + StringUtils.arrayAwareToString(this.f23)) + ",") + StringUtils.arrayAwareToString(this.f24)) + ")"; }
3.26
flink_Tuple25_setFields_rdh
/** * Sets new values to all fields of the tuple. * * @param f0 * The value for field 0 * @param f1 * The value for field 1 * @param f2 * The value for field 2 * @param f3 * The value for field 3 * @param f4 * The value for field 4 * @param f5 * The value for field 5 * @param f6 * The value for field 6 * @param f7 * The value for field 7 * @param f8 * The value for field 8 * @param f9 * The value for field 9 * @param f10 * The value for field 10 * @param f11 * The value for field 11 * @param f12 * The value for field 12 * @param f13 * The value for field 13 * @param f14 * The value for field 14 * @param f15 * The value for field 15 * @param f16 * The value for field 16 * @param f17 * The value for field 17 * @param f18 * The value for field 18 * @param f19 * The value for field 19 * @param f20 * The value for field 20 * @param f21 * The value for field 21 * @param f22 * The value for field 22 * @param f23 * The value for field 23 * @param f24 * The value for field 24 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20, T21 f21, T22 f22, T23 f23, T24 f24) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8;this.f9 = f9; this.f10 = f10; this.f11 = f11; this.f12 = f12; this.f13 = f13; this.f14 = f14; this.f15 = f15; this.f16 = f16; this.f17 = f17; this.f18 = f18; this.f19 = f19; this.f20 = f20; this.f21 = f21; this.f22 = f22; this.f23 = f23; this.f24 = f24; }
3.26
flink_CliChangelogResultView_getHelpOptions_rdh
// -------------------------------------------------------------------------------------------- private List<Tuple2<String, String>> getHelpOptions() { final List<Tuple2<String, String>> options = new ArrayList<>(); options.add(Tuple2.of("Q", CliStrings.RESULT_QUIT)); options.add(Tuple2.of("R", CliStrings.RESULT_REFRESH)); options.add(Tuple2.of("+", CliStrings.RESULT_INC_REFRESH)); options.add(Tuple2.of("-", CliStrings.RESULT_DEC_REFRESH)); options.add(Tuple2.of("O", CliStrings.RESULT_OPEN)); return options; }
3.26
flink_SharedBufferAccessor_releaseEvent_rdh
/** * Decreases the reference counter for the given event so that it can be removed once the * reference counter reaches 0. * * @param eventId * id of the event * @throws Exception * Thrown if the system cannot access the state. */ public void releaseEvent(EventId eventId) throws Exception { Lockable<V> eventWrapper = sharedBuffer.getEvent(eventId); if (eventWrapper != null) { if (eventWrapper.release()) { sharedBuffer.removeEvent(eventId); } else { sharedBuffer.upsertEvent(eventId, eventWrapper); } } }
3.26
flink_SharedBufferAccessor_lockNode_rdh
/** * Increases the reference counter for the given entry so that it is not accidentally removed. * * @param node * id of the entry * @param version * dewey number of the (potential) edge that locks the given node */ public void lockNode(final NodeId node, final DeweyNumber version) { Lockable<SharedBufferNode> v24 = sharedBuffer.getEntry(node); if (v24 != null) { v24.lock(); for (Lockable<SharedBufferEdge> edge : v24.getElement().getEdges()) { if (version.isCompatibleWith(edge.getElement().getDeweyNumber())) { edge.lock(); } } sharedBuffer.upsertEntry(node, v24); } }
3.26
flink_SharedBufferAccessor_releaseNode_rdh
/** * Decreases the reference counter for the given entry so that it can be removed once the * reference counter reaches 0. * * @param node * id of the entry * @param version * dewey number of the (potential) edge that locked the given node * @throws Exception * Thrown if the system cannot access the state. */ public void releaseNode(final NodeId node, final DeweyNumber version) throws Exception { // the stack used to detect all nodes that needs to be released. Stack<NodeId> nodesToExamine = new Stack<>(); Stack<DeweyNumber> versionsToExamine = new Stack<>(); nodesToExamine.push(node); versionsToExamine.push(version); while (!nodesToExamine.isEmpty()) { NodeId curNode = nodesToExamine.pop(); Lockable<SharedBufferNode> curBufferNode = sharedBuffer.getEntry(curNode); if (curBufferNode == null) { break; } DeweyNumber currentVersion = versionsToExamine.pop(); List<Lockable<SharedBufferEdge>> edges = curBufferNode.getElement().getEdges(); Iterator<Lockable<SharedBufferEdge>> edgesIterator = edges.iterator(); while (edgesIterator.hasNext()) { Lockable<SharedBufferEdge> sharedBufferEdge = edgesIterator.next(); SharedBufferEdge edge = sharedBufferEdge.getElement(); if (currentVersion.isCompatibleWith(edge.getDeweyNumber())) { if (sharedBufferEdge.release()) { edgesIterator.remove(); NodeId targetId = edge.getTarget(); if (targetId != null) { nodesToExamine.push(targetId); versionsToExamine.push(edge.getDeweyNumber()); } } } } if (curBufferNode.release()) { // first release the current node sharedBuffer.removeEntry(curNode); releaseEvent(curNode.getEventId()); } else { sharedBuffer.upsertEntry(curNode, curBufferNode); } } }
3.26
flink_SharedBufferAccessor_extractPatterns_rdh
/** * Returns all elements from the previous relation starting at the given entry. * * @param nodeId * id of the starting entry * @param version * Version of the previous relation which shall be extracted * @return Collection of previous relations starting with the given value */ public List<Map<String, List<EventId>>> extractPatterns(final NodeId nodeId, final DeweyNumber version) { List<Map<String, List<EventId>>> result = new ArrayList<>(); // stack to remember the current extraction states Stack<SharedBufferAccessor.ExtractionState> extractionStates = new Stack<>(); // get the starting shared buffer entry for the previous relation Lockable<SharedBufferNode> entryLock = sharedBuffer.getEntry(nodeId); if (entryLock != null) { SharedBufferNode entry = entryLock.getElement(); extractionStates.add(new SharedBufferAccessor.ExtractionState(Tuple2.of(nodeId, entry), version, new Stack<>())); // use a depth first search to reconstruct the previous relations while (!extractionStates.isEmpty()) { final SharedBufferAccessor.ExtractionState extractionState = extractionStates.pop(); // current path of the depth first search final Stack<Tuple2<NodeId, SharedBufferNode>> v7 = extractionState.getPath(); final Tuple2<NodeId, SharedBufferNode> currentEntry = extractionState.m0(); // termination criterion if (currentEntry == null) { final Map<String, List<EventId>> v9 = new LinkedHashMap<>(); while (!v7.isEmpty()) { final NodeId currentPathEntry = v7.pop().f0; String page = currentPathEntry.getPageName(); List<EventId> values = v9.computeIfAbsent(page, k -> new ArrayList<>()); values.add(currentPathEntry.getEventId()); } result.add(v9); } else { // append state to the path v7.push(currentEntry); boolean firstMatch = true; for (Lockable<SharedBufferEdge> lockableEdge : currentEntry.f1.getEdges()) { // we can only proceed if the current version is compatible to the version // of this previous relation final SharedBufferEdge v15 = lockableEdge.getElement(); final DeweyNumber currentVersion = extractionState.getVersion(); if (currentVersion.isCompatibleWith(v15.getDeweyNumber())) { final NodeId target = v15.getTarget(); Stack<Tuple2<NodeId, SharedBufferNode>> newPath; if (firstMatch) {// for the first match we don't have to copy the current path newPath = v7; firstMatch = false; } else { newPath = new Stack<>(); newPath.addAll(v7); } extractionStates.push(new SharedBufferAccessor.ExtractionState(target != null ? Tuple2.of(target, sharedBuffer.getEntry(target).getElement()) : null, v15.getDeweyNumber(), newPath)); } } } } } return result; }
3.26
flink_SharedBufferAccessor_materializeMatch_rdh
/** * Extracts the real event from the sharedBuffer with pre-extracted eventId. * * @param match * the matched event's eventId. * @return the event associated with the eventId. */ public Map<String, List<V>> materializeMatch(Map<String, List<EventId>> match) { Map<String, List<V>> materializedMatch = CollectionUtil.newLinkedHashMapWithExpectedSize(match.size()); for (Map.Entry<String, List<EventId>> pattern : match.entrySet()) { List<V> events = new ArrayList<>(pattern.getValue().size()); for (EventId eventId : pattern.getValue()) { try { V event = sharedBuffer.getEvent(eventId).getElement(); events.add(event); } catch (Exception ex) { throw new WrappingRuntimeException(ex); } } materializedMatch.put(pattern.getKey(), events); } return materializedMatch; }
3.26
flink_SharedBufferAccessor_close_rdh
/** * Persists the entry in the cache to the underlay state. * * @throws Exception * Thrown if the system cannot access the state. */ public void close() throws Exception { sharedBuffer.flushCache(); }
3.26
flink_SharedBufferAccessor_put_rdh
/** * Stores given value (value + timestamp) under the given state. It assigns a preceding element * relation to the previous entry. * * @param stateName * name of the state that the event should be assigned to * @param eventId * unique id of event assigned by this SharedBuffer * @param previousNodeId * id of previous entry (might be null if start of new run) * @param version * Version of the previous relation * @return assigned id of this element */ public NodeId put(final String stateName, final EventId eventId, @Nullable final NodeId previousNodeId, final DeweyNumber version) { if (previousNodeId != null) { lockNode(previousNodeId, version); } NodeId currentNodeId = new NodeId(eventId, getOriginalNameFromInternal(stateName)); Lockable<SharedBufferNode> currentNode = sharedBuffer.getEntry(currentNodeId); if (currentNode == null) { currentNode = new Lockable<>(new SharedBufferNode(), 0); lockEvent(eventId); } currentNode.getElement().addEdge(new SharedBufferEdge(previousNodeId, version)); sharedBuffer.upsertEntry(currentNodeId, currentNode); return currentNodeId; }
3.26
flink_SharedBufferAccessor_lockEvent_rdh
/** * Increases the reference counter for the given event so that it is not accidentally removed. * * @param eventId * id of the entry */ private void lockEvent(EventId eventId) { Lockable<V> eventWrapper = sharedBuffer.getEvent(eventId); checkState(eventWrapper != null, "Referring to non existent event with id %s", eventId); eventWrapper.lock(); sharedBuffer.upsertEvent(eventId, eventWrapper); }
3.26
flink_SharedBufferAccessor_registerEvent_rdh
/** * Adds another unique event to the shared buffer and assigns a unique id for it. It * automatically creates a lock on this event, so it won't be removed during processing of that * event. Therefore the lock should be removed after processing all {@link org.apache.flink.cep.nfa.ComputationState}s * * <p><b>NOTE:</b>Should be called only once for each unique event! * * @param value * event to be registered * @return unique id of that event that should be used when putting entries to the buffer. * @throws Exception * Thrown if the system cannot access the state. */ public EventId registerEvent(V value, long timestamp) throws Exception { return sharedBuffer.registerEvent(value, timestamp); }
3.26
flink_SharedBufferAccessor_advanceTime_rdh
/** * Notifies shared buffer that there will be no events with timestamp &lt;&eq; the given value. * It allows to clear internal counters for number of events seen so far per timestamp. * * @param timestamp * watermark, no earlier events will arrive * @throws Exception * Thrown if the system cannot access the state. */ public void advanceTime(long timestamp) throws Exception { sharedBuffer.advanceTime(timestamp); }
3.26
flink_Broker_handIn_rdh
/** * Hand in the object to share. */ public void handIn(String key, V obj) { if (!retrieveSharedQueue(key).offer(obj)) { throw new RuntimeException("Could not register the given element, broker slot is already occupied."); }}
3.26
flink_Broker_remove_rdh
/** * Blocking retrieval and removal of the object to share. */ public void remove(String key) { mediations.remove(key); }
3.26
flink_Broker_get_rdh
/** * Blocking retrieval and removal of the object to share. */ public V get(String key) { try { BlockingQueue<V> queue = retrieveSharedQueue(key); V objToShare = queue.take(); if (!queue.offer(objToShare)) { throw new RuntimeException(("Error: Concurrent modification of the broker slot for key '" + key) + "'."); } return objToShare; } catch (InterruptedException e) { throw new RuntimeException(e); } }
3.26
flink_Broker_retrieveSharedQueue_rdh
/** * Thread-safe call to get a shared {@link BlockingQueue}. */ private BlockingQueue<V> retrieveSharedQueue(String key) { BlockingQueue<V> queue = mediations.get(key);if (queue == null) { queue = new ArrayBlockingQueue<V>(1); BlockingQueue<V> commonQueue = mediations.putIfAbsent(key, queue); return commonQueue != null ? commonQueue : queue; } else { return queue; }}
3.26
flink_Broker_getAndRemove_rdh
/** * Blocking retrieval and removal of the object to share. */ public V getAndRemove(String key) { try {V objToShare = retrieveSharedQueue(key).take(); mediations.remove(key); return objToShare; } catch (InterruptedException e) { throw new RuntimeException(e); } }
3.26
flink_FileCache_shutdown_rdh
/** * Shuts down the file cache by cancelling all. */ public void shutdown() { synchronized(lock) { // first shutdown the thread pool ScheduledExecutorService es = this.executorService; if (es != null) { es.shutdown(); try { es.awaitTermination(cleanupInterval, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // may happen } } f0.clear(); jobRefHolders.clear(); // clean up the all storage directories for (File dir : storageDirectories) { try { FileUtils.deleteDirectory(dir); LOG.info("removed file cache directory {}", dir.getAbsolutePath()); } catch (IOException e) { LOG.error("File cache could not properly clean up storage directory: {}", dir.getAbsolutePath(), e); } } // Remove shutdown hook to prevent resource leaks ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG); } }
3.26
flink_FileCache_createTmpFile_rdh
// ------------------------------------------------------------------------ /** * If the file doesn't exists locally, retrieve the file from the blob-service. * * @param entry * The cache entry descriptor (path, executable flag) * @param jobID * The ID of the job for which the file is copied. * @return The handle to the task that copies the file. */ public Future<Path> createTmpFile(String name, DistributedCacheEntry entry, JobID jobID, ExecutionAttemptID executionId) throws Exception { synchronized(lock) { Map<String, Future<Path>> jobEntries = f0.computeIfAbsent(jobID, k -> new HashMap<>()); // register reference holder final Set<ExecutionAttemptID> refHolders = jobRefHolders.computeIfAbsent(jobID, id -> new HashSet<>()); refHolders.add(executionId); Future<Path> fileEntry = jobEntries.get(name); if (fileEntry != null) { // file is already in the cache. return a future that // immediately returns the file return fileEntry; } else { // need to copy the file // create the target path File tempDirToUse = new File(storageDirectories[nextDirectory++], jobID.toString()); if (nextDirectory >= storageDirectories.length) { nextDirectory = 0; } // kick off the copying Callable<Path> cp; if (entry.blobKey != null) { cp = new CopyFromBlobProcess(entry, jobID, blobService, new Path(tempDirToUse.getAbsolutePath())); } else {cp = new CopyFromDFSProcess(entry, new Path(tempDirToUse.getAbsolutePath())); } FutureTask<Path> copyTask = new FutureTask<>(cp); executorService.submit(copyTask); // store our entry jobEntries.put(name, copyTask); return copyTask; } } }
3.26
flink_StreamTask_advanceToEndOfEventTime_rdh
/** * Emits the {@link org.apache.flink.streaming.api.watermark.Watermark#MAX_WATERMARK * MAX_WATERMARK} so that all registered timers are fired. * * <p>This is used by the source task when the job is {@code TERMINATED}. In the case, we want * all the timers registered throughout the pipeline to fire and the related state (e.g. * windows) to be flushed. * * <p>For tasks other than the source task, this method does nothing. */ protected void advanceToEndOfEventTime() throws Exception { }
3.26
flink_StreamTask_createStateBackend_rdh
// ------------------------------------------------------------------------ // State backend // ------------------------------------------------------------------------ private StateBackend createStateBackend() throws Exception { final StateBackend fromApplication = configuration.getStateBackend(getUserCodeClassLoader());final Optional<Boolean> isChangelogEnabledOptional = environment.getJobConfiguration().getOptional(StateChangelogOptionsInternal.ENABLE_CHANGE_LOG_FOR_APPLICATION); final TernaryBoolean isChangelogStateBackendEnableFromApplication = (isChangelogEnabledOptional.isPresent()) ? TernaryBoolean.fromBoolean(isChangelogEnabledOptional.get()) : TernaryBoolean.UNDEFINED; return StateBackendLoader.fromApplicationOrConfigOrDefault(fromApplication, isChangelogStateBackendEnableFromApplication, getEnvironment().getTaskManagerInfo().getConfiguration(), getUserCodeClassLoader(), LOG); }
3.26
flink_StreamTask_m0_rdh
/** * The finalize method shuts down the timer. This is a fail-safe shutdown, in case the original * shutdown method was never called. * * <p>This should not be relied upon! It will cause shutdown to happen much later than if manual * shutdown is attempted, and cause threads to linger for longer than needed. */ @Override protected void m0() throws Throwable { super.finalize(); if (!timerService.isTerminated()) { LOG.info("Timer service is shutting down.");timerService.shutdownService(); } if (!systemTimerService.isTerminated()) { LOG.info("System timer service is shutting down."); systemTimerService.shutdownService(); } cancelables.close(); }
3.26
flink_StreamTask_getTaskNameWithSubtaskAndId_rdh
/** * Gets the name of the task, appended with the subtask indicator and execution id. * * @return The name of the task, with subtask indicator and execution id. */ String getTaskNameWithSubtaskAndId() { return ((getEnvironment().getTaskInfo().getTaskNameWithSubtasks() + " (") + getEnvironment().getExecutionId()) + ')'; }
3.26
flink_StreamTask_toString_rdh
// ------------------------------------------------------------------------ // Utilities // ------------------------------------------------------------------------ @Override public String toString() { return getName(); }
3.26
flink_StreamTask_getCheckpointBarrierHandler_rdh
/** * Acquires the optional {@link CheckpointBarrierHandler} associated with this stream task. The * {@code CheckpointBarrierHandler} should exist if the task has data inputs and requires to * align the barriers. */ protected Optional<CheckpointBarrierHandler> getCheckpointBarrierHandler() { return Optional.empty(); }
3.26
flink_StreamTask_disableInterruptOnCancel_rdh
/** * While we are outside the user code, we do not want to be interrupted further upon * cancellation. The shutdown logic below needs to make sure it does not issue calls that block * and stall shutdown. Additionally, the cancellation watch dog will issue a hard-cancel (kill * the TaskManager process) as a backup in case some shutdown procedure blocks outside our * control. */ private void disableInterruptOnCancel() { synchronized(shouldInterruptOnCancelLock) { shouldInterruptOnCancel = false; } }
3.26
flink_StreamTask_processInput_rdh
/** * This method implements the default action of the task (e.g. processing one event from the * input). Implementations should (in general) be non-blocking. * * @param controller * controller object for collaborative interaction between the action and the * stream task. * @throws Exception * on any problems in the action. */ protected void processInput(MailboxDefaultAction.Controller controller) throws Exception { DataInputStatus status = inputProcessor.processInput(); switch (status) { case MORE_AVAILABLE : if (taskIsAvailable()) { return; } break; case NOTHING_AVAILABLE : break; case END_OF_RECOVERY : throw new IllegalStateException("We should not receive this event here."); case STOPPED : endData(StopMode.NO_DRAIN); return; case END_OF_DATA : endData(StopMode.DRAIN);notifyEndOfData(); return; case END_OF_INPUT : // Suspend the mailbox processor, it would be resumed in afterInvoke and finished // after all records processed by the downstream tasks. We also suspend the default // actions to avoid repeat executing the empty default operation (namely process // records). controller.suspendDefaultAction(); mailboxProcessor.suspend(); return; } TaskIOMetricGroup ioMetrics = getEnvironment().getMetricGroup().getIOMetricGroup(); PeriodTimer timer; CompletableFuture<?> resumeFuture; if (!recordWriter.isAvailable()) { timer = new GaugePeriodTimer(ioMetrics.getSoftBackPressuredTimePerSecond()); resumeFuture = recordWriter.getAvailableFuture(); } else if (!inputProcessor.isAvailable()) { timer = new GaugePeriodTimer(ioMetrics.getIdleTimeMsPerSecond()); resumeFuture = inputProcessor.getAvailableFuture(); } else if ((changelogWriterAvailabilityProvider != null) && (!changelogWriterAvailabilityProvider.isAvailable())) {// waiting for changelog availability is reported as busy timer = new GaugePeriodTimer(ioMetrics.getChangelogBusyTimeMsPerSecond()); resumeFuture = changelogWriterAvailabilityProvider.getAvailableFuture(); } else { // data availability has changed in the meantime; retry immediately return; } assertNoException(resumeFuture.thenRun(new ResumeWrapper(controller.suspendDefaultAction(timer), timer))); }
3.26
flink_StreamTask_closeAllOperators_rdh
/** * Closes all the operators if not closed before. */ private void closeAllOperators() throws Exception { if ((operatorChain != null) && (!closedOperators)) { closedOperators = true; operatorChain.closeAllOperators();} }
3.26
flink_StreamTask_handleAsyncException_rdh
/** * Handles an exception thrown by another thread (e.g. a TriggerTask), other than the one * executing the main task by failing the task entirely. * * <p>In more detail, it marks task execution failed for an external reason (a reason other than * the task code itself throwing an exception). If the task is already in a terminal state (such * as FINISHED, CANCELED, FAILED), or if the task is already canceling this does nothing. * Otherwise it sets the state to FAILED, and, if the invokable code is running, starts an * asynchronous thread that aborts that code. * * <p>This method never blocks. */ @Override public void handleAsyncException(String message, Throwable exception) { if (isRestoring || isRunning) { // only fail if the task is still in restoring or running asyncExceptionHandler.handleAsyncException(message, exception); } }
3.26
flink_StreamTask_m3_rdh
/** * Returns the {@link TimerService} responsible for telling the current processing time and * registering actual timers. */ @VisibleForTesting TimerService m3() { return timerService; }
3.26
flink_StreamTask_getName_rdh
// ------------------------------------------------------------------------ // Access to properties and utilities // ------------------------------------------------------------------------ /** * Gets the name of the task, in the form "taskname (2/5)". * * @return The name of the task. */ public final String getName() { return getEnvironment().getTaskInfo().getTaskNameWithSubtasks(); }
3.26
flink_StreamTask_dispatchOperatorEvent_rdh
// ------------------------------------------------------------------------ // Operator Events // ------------------------------------------------------------------------ @Override public void dispatchOperatorEvent(OperatorID operator, SerializedValue<OperatorEvent> event) throws FlinkException { try { mainMailboxExecutor.execute(() -> operatorChain.dispatchOperatorEvent(operator, event), "dispatch operator event"); } catch (RejectedExecutionException e) { // this happens during shutdown, we can swallow this } }
3.26
flink_StreamTask_createStreamTaskStateInitializer_rdh
// ------------------------------------------------------------------------ // Core work methods of the Stream Task // ------------------------------------------------------------------------ public StreamTaskStateInitializer createStreamTaskStateInitializer() { InternalTimeServiceManager.Provider timerServiceProvider = configuration.getTimerServiceProvider(getUserCodeClassLoader()); return new StreamTaskStateInitializerImpl(getEnvironment(), stateBackend, TtlTimeProvider.DEFAULT, timerServiceProvider != null ? timerServiceProvider : InternalTimeServiceManagerImpl::create, () -> canceled); }
3.26
flink_StreamTask_triggerCheckpointAsync_rdh
// ------------------------------------------------------------------------ // Checkpoint and Restore // ------------------------------------------------------------------------ @Override public CompletableFuture<Boolean> triggerCheckpointAsync(CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) { checkForcedFullSnapshotSupport(checkpointOptions); CompletableFuture<Boolean> result = new CompletableFuture<>();mainMailboxExecutor.execute(() -> { try { boolean noUnfinishedInputGates = Arrays.stream(getEnvironment().getAllInputGates()).allMatch(InputGate::isFinished); if (noUnfinishedInputGates) { result.complete(triggerCheckpointAsyncInMailbox(checkpointMetaData, checkpointOptions)); } else { result.complete(triggerUnfinishedChannelsCheckpoint(checkpointMetaData, checkpointOptions)); } } catch (Exception ex) { // Report the failure both via the Future result but also to the mailbox result.completeExceptionally(ex); throw ex; } }, "checkpoint %s with %s", checkpointMetaData, checkpointOptions); return result; }
3.26
flink_StreamTask_createRecordWriterDelegate_rdh
// ------------------------------------------------------------------------ @VisibleForTesting public static <OUT> RecordWriterDelegate<SerializationDelegate<StreamRecord<OUT>>> createRecordWriterDelegate(StreamConfig configuration, Environment environment) { List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> recordWrites = createRecordWriters(configuration, environment); if (recordWrites.size() == 1) { return new SingleRecordWriter<>(recordWrites.get(0)); } else if (recordWrites.size() == 0) { return new NonRecordWriter<>(); } else { return new MultipleRecordWriters<>(recordWrites); } }
3.26
flink_ThrowableClassifier_findThrowableOfThrowableType_rdh
/** * Checks whether a throwable chain contains a specific throwable type and returns the * corresponding throwable. * * @param throwable * the throwable chain to check. * @param throwableType * the throwable type to search for in the chain. * @return Optional throwable of the throwable type if available, otherwise empty */ public static Optional<Throwable> findThrowableOfThrowableType(Throwable throwable, ThrowableType throwableType) { if ((throwable == null) || (throwableType == null)) { return Optional.empty();} Throwable t = throwable; while (t != null) { final ThrowableAnnotation annotation = t.getClass().getAnnotation(ThrowableAnnotation.class); if ((annotation != null) && (annotation.value() == throwableType)) {return Optional.of(t); } else { t = t.getCause(); } } return Optional.empty(); }
3.26
flink_EmbeddedLeaderService_addContender_rdh
// ------------------------------------------------------------------------ // adding and removing contenders & listeners // ------------------------------------------------------------------------ /** * Callback from leader contenders when they start their service. */ private void addContender(EmbeddedLeaderElection embeddedLeaderElection, LeaderContender contender) { synchronized(f0) { checkState(!shutdown, "leader election is shut down"); checkState(!embeddedLeaderElection.running, "leader election is already started"); try { if (!allLeaderContenders.add(embeddedLeaderElection)) { throw new IllegalStateException("leader election was added to this service multiple times"); } embeddedLeaderElection.contender = contender; embeddedLeaderElection.running = true; updateLeader().whenComplete((aVoid, throwable) -> { if (throwable != null) { fatalError(throwable);} }); } catch (Throwable t) { fatalError(t); } } }
3.26
flink_EmbeddedLeaderService_m1_rdh
// ------------------------------------------------------------------------ // creating contenders and listeners // ------------------------------------------------------------------------ public LeaderElection m1(String componentId) { checkState(!shutdown, "leader election service is shut down"); return new EmbeddedLeaderElection(componentId); }
3.26
flink_EmbeddedLeaderService_confirmLeader_rdh
/** * Callback from leader contenders when they confirm a leader grant. */ private void confirmLeader(final EmbeddedLeaderElection embeddedLeaderElection, final UUID leaderSessionId, final String leaderAddress) { synchronized(f0) { // if the leader election was shut down in the meantime, ignore this confirmation if ((!embeddedLeaderElection.running) || shutdown) { return; } try { // check if the confirmation is for the same grant, or whether it is a stale grant if ((embeddedLeaderElection == currentLeaderProposed) && currentLeaderSessionId.equals(leaderSessionId)) { LOG.info("Received confirmation of leadership for leader {} , session={}", leaderAddress, leaderSessionId); // mark leadership currentLeaderConfirmed = embeddedLeaderElection; currentLeaderAddress = leaderAddress; currentLeaderProposed = null; // notify all listeners notifyAllListeners(leaderAddress, leaderSessionId); } else {LOG.debug("Received confirmation of leadership for a stale leadership grant. Ignoring."); }} catch (Throwable t) {fatalError(t); } } }
3.26
flink_EmbeddedLeaderService_removeContender_rdh
/** * Callback from leader contenders when they stop their service. */ private void removeContender(EmbeddedLeaderElection embeddedLeaderElection) { synchronized(f0) { // if the leader election was not even started, simply do nothing if ((!embeddedLeaderElection.running) || shutdown) { return; } try {if (!allLeaderContenders.remove(embeddedLeaderElection)) { throw new IllegalStateException("leader election does not belong to this service"); } // stop the service if (embeddedLeaderElection.isLeader) { embeddedLeaderElection.contender.revokeLeadership(); } embeddedLeaderElection.contender = null; embeddedLeaderElection.running = false; embeddedLeaderElection.isLeader = false; // if that was the current leader, unset its status if (currentLeaderConfirmed == embeddedLeaderElection) { currentLeaderConfirmed = null; currentLeaderSessionId = null; currentLeaderAddress = null; } if (currentLeaderProposed == embeddedLeaderElection) { currentLeaderProposed = null; currentLeaderSessionId = null; } updateLeader().whenComplete((aVoid, throwable) -> { if (throwable != null) { fatalError(throwable); } }); } catch (Throwable t) { fatalError(t); } } }
3.26
flink_EmbeddedLeaderService_shutdown_rdh
// ------------------------------------------------------------------------ // shutdown and errors // ------------------------------------------------------------------------ /** * Shuts down this leader election service. * * <p>This method does not perform a clean revocation of the leader status and no notification * to any leader listeners. It simply notifies all contenders and listeners that the service is * no longer available. */ public void shutdown() { synchronized(f0) { m0(new Exception("Leader election service is shutting down")); } }
3.26
flink_SlotSharingExecutionSlotAllocator_allocateSlotsForVertices_rdh
/** * Creates logical {@link SlotExecutionVertexAssignment}s from physical shared slots. * * <p>The allocation has the following steps: * * <ol> * <li>Map the executions to {@link ExecutionSlotSharingGroup}s using {@link SlotSharingStrategy} * <li>Check which {@link ExecutionSlotSharingGroup}s already have shared slot * <li>For all involved {@link ExecutionSlotSharingGroup}s which do not have a shared slot * yet: * <li>Create a {@link SlotProfile} future using {@link SharedSlotProfileRetriever} and then * <li>Allocate a physical slot from the {@link PhysicalSlotProvider} * <li>Create a shared slot based on the returned physical slot futures * <li>Allocate logical slot futures for the executions from all corresponding shared slots. * <li>If a physical slot request fails, associated logical slot requests are canceled within * the shared slot * <li>Generate {@link SlotExecutionVertexAssignment}s based on the logical slot futures and * returns the results. * </ol> * * @param executionVertexIds * Execution vertices to allocate slots for */ private List<SlotExecutionVertexAssignment> allocateSlotsForVertices(List<ExecutionVertexID> executionVertexIds) { SharedSlotProfileRetriever sharedSlotProfileRetriever = sharedSlotProfileRetrieverFactory.createFromBulk(new HashSet<>(executionVertexIds)); Map<ExecutionSlotSharingGroup, List<ExecutionVertexID>> executionsByGroup = executionVertexIds.stream().collect(Collectors.groupingBy(slotSharingStrategy::getExecutionSlotSharingGroup)); Map<ExecutionSlotSharingGroup, SharedSlot> slots = new HashMap<>(executionsByGroup.size()); Set<ExecutionSlotSharingGroup> groupsToAssign = new HashSet<>(executionsByGroup.keySet()); Map<ExecutionSlotSharingGroup, SharedSlot> assignedSlots = tryAssignExistingSharedSlots(groupsToAssign); slots.putAll(assignedSlots); groupsToAssign.removeAll(assignedSlots.keySet()); if (!groupsToAssign.isEmpty()) { Map<ExecutionSlotSharingGroup, SharedSlot> allocatedSlots = allocateSharedSlots(groupsToAssign, sharedSlotProfileRetriever); slots.putAll(allocatedSlots); groupsToAssign.removeAll(allocatedSlots.keySet()); Preconditions.checkState(groupsToAssign.isEmpty()); } Map<ExecutionVertexID, SlotExecutionVertexAssignment> assignments = allocateLogicalSlotsFromSharedSlots(slots, executionsByGroup); // we need to pass the slots map to the createBulk method instead of using the allocator's // 'sharedSlots' // because if any physical slots have already failed, their shared slots have been removed // from the allocator's 'sharedSlots' by failed logical slots. SharingPhysicalSlotRequestBulk bulk = createBulk(slots, executionsByGroup); bulkChecker.schedulePendingRequestBulkTimeoutCheck(bulk, allocationTimeout); return executionVertexIds.stream().map(assignments::get).collect(Collectors.toList()); }
3.26
flink_TimestampWriter_forRow_rdh
/** * {@link ArrowFieldWriter} for Timestamp. */
3.26