name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ExecNodePlanDumper_dagToString_rdh
|
/**
* Converts an {@link ExecNode} DAG to a string as a tree style.
*
* <p>The following DAG of {@link ExecNode}
*
* <pre>{@code Sink1 Sink2
* | |
* Filter3 Filter4
* \ /
* Join
* / \
* Filter1 Filter2
* \ /
* Project
* |
* Scan}</pre>
*
* <p>would be converted to the tree style as following:
*
* <pre>{@code Join(reuse_id=[2])
* :- Filter1
* : +- Project(reuse_id=[1])
* : +- Scan
* +- Filter2
* +- Reused(reference_id=[1])
*
* Sink1
* +- Filter3
* +- Reused(reference_id=[2])
*
* Sink2
* +- Filter4
* +- Reused(reference_id=[2])}</pre>
*
* @param nodes
* the ExecNodes to convert
* @return the plan of ExecNode
*/
public static String dagToString(List<ExecNode<?>> nodes) {
Preconditions.checkArgument((nodes != null)
&& (!nodes.isEmpty()), "nodes should not be null or empty.");
if (nodes.size() == 1) {
return treeToString(nodes.get(0));
}
// nodes that stop visit when meet them
final List<ExecNode<?>> stopVisitNodes = new ArrayList<>();
final StringBuilder sb = new StringBuilder();
final DagReuseInfo reuseInfo = new DagReuseInfo(nodes, new ArrayList<>());
final ExecNodeVisitor visitor = new ExecNodeVisitorImpl() {
@Override
public
void visit(ExecNode<?> node) {
int visitedTimes = reuseInfo.addVisitedTimes(node);
boolean isFirstVisit = visitedTimes == 1;
if (isFirstVisit) {
super.visit(node);
}
int reuseId = reuseInfo.getReuseId(node);boolean isReuseNode = reuseId >= 0;
if
(((node instanceof CommonExecLegacySink) || (node
instanceof CommonExecSink)) || (isReuseNode && isFirstVisit)) {
if (isReuseNode) {
reuseInfo.setFirstVisited(node, true);
}
String reusePlan = doConvertTreeToString(node, reuseInfo, false, stopVisitNodes, false);
sb.append(reusePlan).append(System.lineSeparator());
if (isReuseNode) {
// update visit info after the reuse node visited
stopVisitNodes.add(node);
reuseInfo.setFirstVisited(node, false);
}
}
}
};
nodes.forEach(visitor::visit);
if (sb.length() > 0) {
// delete last line separator
sb.deleteCharAt(sb.length() - 1);
}
return sb.toString();
}
| 3.26 |
flink_CheckpointCommitter_setOperatorId_rdh
|
/**
* Internally used to set the operator ID after instantiation.
*
* @param id
* @throws Exception
*/
public void setOperatorId(String id) throws Exception {
this.operatorId
= id;
}
| 3.26 |
flink_CheckpointCommitter_setJobId_rdh
|
/**
* Internally used to set the job ID after instantiation.
*
* @param id
* @throws Exception
*/
public void setJobId(String id) throws Exception {
this.jobId = id;
}
| 3.26 |
flink_TPCHQuery3_m0_rdh
|
// *************************************************************************
// PROGRAM
// *************************************************************************
public static void m0(String[] args) throws Exception {
LOGGER.warn(DATASET_DEPRECATION_INFO);
final ParameterTool params = ParameterTool.fromArgs(args);
if (((!params.has("lineitem")) && (!params.has("customer"))) && (!params.has("orders"))) {
System.err.println(" This program expects data from the TPC-H benchmark as input data.");
System.err.println(" Due to legal restrictions, we can not ship generated data.");
System.out.println(" You can find the TPC-H data generator at http://www.tpc.org/tpch/.");
System.out.println(" Usage: TPCHQuery3 --lineitem <path> --customer <path> --orders <path> [--output <path>]");
return;
}
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setGlobalJobParameters(params);
// get input data
DataSet<Lineitem> lineitems = getLineitemDataSet(env, params.get("lineitem"));
DataSet<Customer> customers = getCustomerDataSet(env, params.get("customer"));
DataSet<Order> orders = getOrdersDataSet(env, params.get("orders"));
// Filter market segment "AUTOMOBILE"
customers = customers.filter(new FilterFunction<Customer>() {
@Override
public boolean filter(Customer c) {
return c.getMktsegment().equals("AUTOMOBILE");
}
});
// Filter all Orders with o_orderdate < 12.03.1995
orders = orders.filter(new FilterFunction<Order>() {
private final DateFormat format = new SimpleDateFormat("yyyy-MM-dd");
private final Date date = format.parse("1995-03-12");
@Override
public boolean filter(Order o) throws ParseException {
return format.parse(o.getOrderdate()).before(date);
}
});
// Filter all Lineitems with l_shipdate > 12.03.1995
lineitems = lineitems.filter(new FilterFunction<Lineitem>() {
private final DateFormat format = new SimpleDateFormat("yyyy-MM-dd");
private final Date date = format.parse("1995-03-12");
@Override
public boolean filter(Lineitem l) throws ParseException {return format.parse(l.getShipdate()).after(date);
}
});
// Join customers with orders and package them into a ShippingPriorityItem
DataSet<ShippingPriorityItem> customerWithOrders = customers.join(orders).where(0).equalTo(1).with(new JoinFunction<Customer, Order,
ShippingPriorityItem>() {
@Override
public ShippingPriorityItem m1(Customer c, Order o) {
return new ShippingPriorityItem(o.getOrderKey(), 0.0, o.getOrderdate(), o.getShippriority());
}
});
// Join the last join result with Lineitems
DataSet<ShippingPriorityItem> result = // Group by l_orderkey, o_orderdate and o_shippriority and compute revenue
// sum
customerWithOrders.join(lineitems).where(0).equalTo(0).with(new JoinFunction<ShippingPriorityItem, Lineitem, ShippingPriorityItem>() {
@Override
public ShippingPriorityItem join(ShippingPriorityItem i, Lineitem l) {
i.setRevenue(l.getExtendedprice() * (1 - l.getDiscount()));
return
i;
}
}).groupBy(0, 2, 3).aggregate(Aggregations.SUM, 1);
// emit result
if (params.has("output")) {result.writeAsCsv(params.get("output"), "\n", "|");
// execute program
env.execute("TPCH Query 3 Example");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
result.print();
}
}
| 3.26 |
flink_TPCHQuery3_getLineitemDataSet_rdh
|
// *************************************************************************
// UTIL METHODS
// *************************************************************************
private static DataSet<Lineitem> getLineitemDataSet(ExecutionEnvironment env, String lineitemPath) {
return env.readCsvFile(lineitemPath).fieldDelimiter("|").includeFields("1000011000100000").tupleType(TPCHQuery3.Lineitem.class);
}
| 3.26 |
flink_CharVarCharTrimPadCastRule_stringExceedsLength_rdh
|
// ---------------
// Shared methods
// ---------------
static String stringExceedsLength(String strTerm, int targetLength) {
return (methodCall(strTerm,
"length") + " > ") + targetLength;
}
| 3.26 |
flink_HiveParserExpressionWalker_walk_rdh
|
/**
* walk the current operator and its descendants.
*/
protected void walk(Node nd) throws SemanticException {
// Push the node in the stack
opStack.push(nd); // While there are still nodes to dispatch...
while (!opStack.empty()) {
Node node = opStack.peek();
if ((node.getChildren() == null) || getDispatchedList().containsAll(node.getChildren())) {
// Dispatch current node
if (!getDispatchedList().contains(node)) {
dispatch(node, opStack);
opQueue.add(node);
}
opStack.pop();
continue;
}
// Add a single child and restart the loop
for (Node childNode : node.getChildren()) {
if (!getDispatchedList().contains(childNode)) {
if (shouldByPass(childNode, node)) {
retMap.put(childNode,
null);
} else {
opStack.push(childNode);
}
break;
}
}
} // end while
}
| 3.26 |
flink_HiveParserExpressionWalker_shouldByPass_rdh
|
/**
* We should bypass subquery since we have already processed and created logical plan (in
* genLogicalPlan) for subquery at this point. SubQueryExprProcessor will use generated plan and
* creates appropriate ExprNodeSubQueryDesc.
*/
private boolean shouldByPass(Node childNode, Node parentNode) {
if ((parentNode instanceof HiveParserASTNode) && (((HiveParserASTNode) (parentNode)).getType() == HiveASTParser.TOK_SUBQUERY_EXPR)) {
HiveParserASTNode parentOp = ((HiveParserASTNode) (parentNode));
// subquery either in WHERE <LHS> IN <SUBQUERY> form OR WHERE EXISTS <SUBQUERY> form
// in first case LHS should not be bypassed
assert (parentOp.getChildCount() == 2) || (parentOp.getChildCount() == 3);
return (parentOp.getChildCount() != 3) || (childNode != parentOp.getChild(2));
}
return
false;
}
| 3.26 |
flink_HiveParserUnparseTranslator_addIdentifierTranslation_rdh
|
/**
* Register a translation for an identifier.
*/
public void
addIdentifierTranslation(HiveParserASTNode identifier) {
if (!enabled) {
return;
}
assert identifier.getToken().getType() == HiveASTParser.Identifier;
String replacementText = identifier.getText();
replacementText = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(replacementText);
replacementText = HiveUtils.unparseIdentifier(replacementText, conf);
addTranslation(identifier, replacementText);
}
| 3.26 |
flink_HiveParserUnparseTranslator_addCopyTranslation_rdh
|
/**
* Register a "copy" translation in which a node will be translated into whatever the
* translation turns out to be for another node (after previously registered translations have
* already been performed). Deferred translations are performed in the order they are
* registered, and follow the same rules regarding overlap as non-copy translations.
*
* @param targetNode
* node whose subtree is to be replaced
* @param sourceNode
* the node providing the replacement text
*/
public void addCopyTranslation(HiveParserASTNode targetNode, HiveParserASTNode sourceNode)
{
if (!enabled) {
return;
}
if (targetNode.getOrigin() != null) {
return;
}
CopyTranslation copyTranslation = new CopyTranslation();
copyTranslation.targetNode = targetNode;
copyTranslation.sourceNode = sourceNode;
copyTranslations.add(copyTranslation);
}
| 3.26 |
flink_LastValueAggFunction_getArgumentDataTypes_rdh
|
// --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType> getArgumentDataTypes() {return Collections.singletonList(valueDataType);
}
| 3.26 |
flink_LastValueAggFunction_createAccumulator_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public RowData createAccumulator()
{
GenericRowData acc = new GenericRowData(2);
acc.setField(0, null);
acc.setField(1, Long.MIN_VALUE);
return acc;
}
| 3.26 |
flink_IntMaximum_add_rdh
|
// ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(int value) {
this.max = Math.max(this.max, value);
}
| 3.26 |
flink_IntMaximum_toString_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "IntMaximum " + this.max;
}
| 3.26 |
flink_SourceTestSuiteBase_addCollectSink_rdh
|
/**
* Add a collect sink in the job.
*/
protected CollectIteratorBuilder<T> addCollectSink(DataStream<T> stream) {
TypeSerializer<T> serializer = stream.getType().createSerializer(stream.getExecutionConfig());
String accumulatorName = "dataStreamCollect_" + UUID.randomUUID();
CollectSinkOperatorFactory<T> factory = new CollectSinkOperatorFactory<>(serializer, accumulatorName);
CollectSinkOperator<T> operator = ((CollectSinkOperator<T>) (factory.getOperator()));
CollectStreamSink<T> sink = new CollectStreamSink<>(stream, factory);
sink.name("Data stream collect sink");
stream.getExecutionEnvironment().addOperator(sink.getTransformation());
return new CollectIteratorBuilder<>(operator, serializer, accumulatorName, stream.getExecutionEnvironment().getCheckpointConfig());
}
| 3.26 |
flink_SourceTestSuiteBase_testMultipleSplits_rdh
|
/**
* Test connector source with multiple splits in the external system
*
* <p>This test will create 4 splits in the external system, write test data to all splits, and
* consume back via a Flink job with 4 parallelism.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with multiple splits")
public void testMultipleSplits(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitNumber = 4;
List<List<T>> testRecordsLists = new ArrayList<>();
for (int v14 = 0; v14 < splitNumber; v14++) {
testRecordsLists.add(generateAndWriteTestData(v14, externalContext, sourceSettings));
}
// Step 3: Build and execute Flink job
StreamExecutionEnvironment v15 = testEnv.createExecutionEnvironment(envOptions);
DataStreamSource<T> v16 = v15.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(splitNumber);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(v16);
JobClient jobClient = submitJob(v15, "Source Multiple Split Test");
// Step 4: Validate test data
try (CloseableIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, testRecordsLists, semantic, null);
}
}
| 3.26 |
flink_SourceTestSuiteBase_generateAndWriteTestData_rdh
|
// ----------------------------- Helper Functions ---------------------------------
/**
* Generate a set of test records and write it to the given split writer.
*
* @param externalContext
* External context
* @return List of generated test records
*/
protected List<T> generateAndWriteTestData(int splitIndex, DataStreamSourceExternalContext<T> externalContext, TestingSourceSettings testingSourceSettings) {
List<T> testRecords = externalContext.generateTestData(testingSourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
LOG.info("Writing {} records for split {} to external system", testRecords.size(), splitIndex);
externalContext.createSourceSplitDataWriter(testingSourceSettings).writeRecords(testRecords);
return testRecords;
}
| 3.26 |
flink_SourceTestSuiteBase_testSourceMetrics_rdh
|
/**
* Test connector source metrics.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 4. Then read and compare the metrics.
*
* <p>Now test: numRecordsIn
*/@TestTemplate
@DisplayName("Test source metrics")
public void testSourceMetrics(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
final int splitNumber = 4;
final List<List<T>> testRecordCollections = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordCollections.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// make sure use different names when executes multi times
String sourceName = "metricTestSource" +
testRecordCollections.hashCode();final StreamExecutionEnvironment env = testEnv.createExecutionEnvironment(envOptions);
final DataStreamSource<T> dataStreamSource = env.fromSource(tryCreateSource(externalContext, sourceSettings), WatermarkStrategy.noWatermarks(), sourceName).setParallelism(splitNumber);
dataStreamSource.sinkTo(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync("Metrics Test");
final MetricQuerier queryRestClient = new MetricQuerier(new Configuration());
final ExecutorService executorService = Executors.newCachedThreadPool();
try {
waitForAllTaskRunning(() -> getJobDetails(new RestClient(new Configuration(), executorService), testEnv.getRestEndpoint(), jobClient.getJobID()));
waitUntilCondition(() -> {
// test metrics
try {
return checkSourceMetrics(queryRestClient, testEnv, jobClient.getJobID(), sourceName, getTestDataSize(testRecordCollections));
} catch (Exception e) {// skip failed assert try
return false;
}
});
} finally {
// Clean up
executorService.shutdown();
killJob(jobClient);
}
}
| 3.26 |
flink_SourceTestSuiteBase_generateTestDataForWriter_rdh
|
/**
* Generate a set of split writers.
*
* @param externalContext
* External context
* @param splitIndex
* the split index
* @param writer
* the writer to send data
* @return List of generated test records
*/
protected List<T> generateTestDataForWriter(DataStreamSourceExternalContext<T> externalContext, TestingSourceSettings sourceSettings, int splitIndex, ExternalSystemSplitDataWriter<T> writer) {
final List<T> testRecordCollection = externalContext.generateTestData(sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
LOG.debug("Writing {} records to external system", testRecordCollection.size());
writer.writeRecords(testRecordCollection);
return testRecordCollection;
}
| 3.26 |
flink_SourceTestSuiteBase_getTestDataSize_rdh
|
/**
* Get the size of test data.
*
* @param collections
* test data
* @return the size of test data
*/
protected int getTestDataSize(List<List<T>> collections) {
int v78 = 0;
for (Collection<T> collection : collections) {
v78 += collection.size();
}
return v78;
}
| 3.26 |
flink_SourceTestSuiteBase_checkSourceMetrics_rdh
|
/**
* Compare the metrics.
*/
private boolean checkSourceMetrics(MetricQuerier queryRestClient, TestEnvironment testEnv, JobID jobId, String sourceName, long allRecordSize) throws Exception {
Double sumNumRecordsIn = queryRestClient.getAggregatedMetricsByRestAPI(testEnv.getRestEndpoint(), jobId, sourceName, MetricNames.IO_NUM_RECORDS_IN, null);
return Precision.equals(allRecordSize, sumNumRecordsIn);
}
| 3.26 |
flink_SourceTestSuiteBase_testScaleDown_rdh
|
/**
* Test connector source restart from a savepoint with a lower parallelism.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 4. Then stop the job with savepoint,
* restart the job from the checkpoint with a lower parallelism 2. After the job has been
* running, add some extra data to the source and compare the result.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*/
@TestTemplate
@DisplayName("Test source restarting with a lower parallelism")
public void testScaleDown(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws
Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 4, 2);
}
| 3.26 |
flink_SourceTestSuiteBase_checkResultWithSemantic_rdh
|
/**
* Compare the test data with the result.
*
* <p>If the source is bounded, limit should be null.
*
* @param resultIterator
* the data read from the job
* @param testData
* the test data
* @param semantic
* the supported semantic, see {@link CheckpointingMode}
* @param limit
* expected number of the data to read from the job
*/
protected void checkResultWithSemantic(CloseableIterator<T> resultIterator, List<List<T>> testData, CheckpointingMode semantic, Integer limit) { if (limit != null) {
Runnable runnable = () -> CollectIteratorAssertions.assertThat(resultIterator).withNumRecordsLimit(limit).matchesRecordsFromSource(testData, semantic);
assertThatFuture(runAsync(runnable)).eventuallySucceeds();
} else {
CollectIteratorAssertions.assertThat(resultIterator).matchesRecordsFromSource(testData, semantic);
}
}
| 3.26 |
flink_SourceTestSuiteBase_testSavepoint_rdh
|
/**
* Test connector source restart from a savepoint.
*
* <p>This test will create 4 splits in the external system first, write test data to all
* splits, and consume back via a Flink job. Then stop the job with savepoint, restart the job
* from the checkpoint. After the job has been running, add some extra data to the source and
* compare the result.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*/
@TestTemplate
@DisplayName("Test source restarting from a savepoint")
public void testSavepoint(TestEnvironment testEnv,
DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 4, 4);
}
| 3.26 |
flink_SourceTestSuiteBase_testSourceSingleSplit_rdh
|
/**
* Test connector source with only one split in the external system.
*
* <p>This test will create one split in the external system, write test data into it, and
* consume back via a Flink job with 1 parallelism.
*
* <p>The number and order of records consumed by Flink need to be identical to the test data
* written to the external system in order to pass this test.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with single split")
public void testSourceSingleSplit(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envSettings
= TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
List<T> testRecords = generateAndWriteTestData(0, externalContext, sourceSettings);
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envSettings);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Source Single Split Test");
// Step 5: Validate test data
try
(CollectResultIterator<T>
resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, singletonList(testRecords), semantic, null); }
// Step 5: Clean up
waitForJobStatus(jobClient, singletonList(JobStatus.FINISHED));
}
| 3.26 |
flink_SourceTestSuiteBase_testScaleUp_rdh
|
/**
* Test connector source restart from a savepoint with a higher parallelism.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 2. Then stop the job with savepoint,
* restart the job from the checkpoint with a higher parallelism 4. After the job has been
* running, add some extra data to the source and compare the result.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*/
@TestTemplate
@DisplayName("Test source restarting with a higher parallelism")
public void
testScaleUp(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 2,
4);
}
| 3.26 |
flink_TieredStorageResultSubpartitionView_readNettyPayload_rdh
|
// -------------------------------
// Internal Methods
// -------------------------------
private Optional<Buffer> readNettyPayload(NettyPayloadManager nettyPayloadManager) throws IOException {
NettyPayload nettyPayload = nettyPayloadManager.poll();
if (nettyPayload == null) {
return Optional.empty();
} else {
checkState(nettyPayload.getSegmentId() == (-1));
Optional<Throwable> error = nettyPayload.getError();
if (error.isPresent()) {
m0();
throw new IOException(error.get());
}
else
{
return nettyPayload.getBuffer();
}
}
}
| 3.26 |
flink_CompressionUtils_extractTarFileUsingTar_rdh
|
// See
// https://github.com/apache/hadoop/blob/7f93349ee74da5f35276b7535781714501ab2457/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
private static void extractTarFileUsingTar(String inFilePath, String targetDirPath, boolean gzipped) throws IOException {inFilePath = makeSecureShellPath(inFilePath);
targetDirPath = makeSecureShellPath(targetDirPath);
String untarCommand = (gzipped) ? String.format("gzip -dc '%s' | (cd '%s' && tar -xf -)", inFilePath, targetDirPath)
: String.format("cd '%s' && tar -xf '%s'", targetDirPath, inFilePath);
Process process = new ProcessBuilder("bash", "-c", untarCommand).start();
int exitCode = 0;
try {
exitCode = process.waitFor();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Interrupted when untarring file " + inFilePath);
}
if (exitCode != 0) {
throw new IOException((("Error untarring file " + inFilePath) + ". Tar process exited with exit code ") + exitCode);
}
}
| 3.26 |
flink_CompressionUtils_extractTarFileUsingJava_rdh
|
// Follow the pattern suggested in
// https://commons.apache.org/proper/commons-compress/examples.html
private static void extractTarFileUsingJava(String inFilePath, String targetDirPath, boolean gzipped) throws IOException {
try (InputStream fi = Files.newInputStream(Paths.get(inFilePath));InputStream bi = new BufferedInputStream(fi);final TarArchiveInputStream tai = new TarArchiveInputStream(gzipped ? new GzipCompressorInputStream(bi) : bi)) {
final File targetDir = new File(targetDirPath);
TarArchiveEntry entry;
while ((entry = tai.getNextTarEntry()) != null) {
unpackEntry(tai, entry, targetDir);
}
}
}
| 3.26 |
flink_PersistentMetadataCheckpointStorageLocation_createMetadataOutputStream_rdh
|
// ------------------------------------------------------------------------
@Override
public CheckpointMetadataOutputStream createMetadataOutputStream() throws IOException {
return new FsCheckpointMetadataOutputStream(fileSystem,
metadataFilePath, checkpointDirectory);
}
| 3.26 |
flink_PatternStream_inEventTime_rdh
|
/**
* Sets the time characteristic to event time.
*/
public PatternStream<T>
inEventTime() {
return new PatternStream<>(builder.inEventTime());
}
/**
* Applies a process function to the detected pattern sequence. For each pattern sequence the
* provided {@link PatternProcessFunction} is called. In order to process timed out partial
* matches as well one can use {@link TimedOutPartialMatchHandler} as additional interface.
*
* @param patternProcessFunction
* The pattern process function which is called for each detected
* pattern sequence.
* @param <R>
* Type of the resulting elements
* @return {@link DataStream}
| 3.26 |
flink_PatternStream_inProcessingTime_rdh
|
/**
* Sets the time characteristic to processing time.
*/
public PatternStream<T> inProcessingTime() {
return new PatternStream<>(builder.inProcessingTime());
}
| 3.26 |
flink_PatternStream_sideOutputLateData_rdh
|
/**
* Send late arriving data to the side output identified by the given {@link OutputTag}. A
* record is considered late after the watermark has passed its timestamp.
*
* <p>You can get the stream of late data using {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the pattern processing operations.
*/
public PatternStream<T> sideOutputLateData(OutputTag<T> lateDataOutputTag) {
return new PatternStream<>(builder.withLateDataOutputTag(lateDataOutputTag));
}
| 3.26 |
flink_TriFunctionWithException_unchecked_rdh
|
/**
* Convert at {@link TriFunctionWithException} into a {@link TriFunction}.
*
* @param triFunctionWithException
* function with exception to convert into a function
* @param <A>
* first input type
* @param <B>
* second input type
* @param <C>
* third input type
* @param <D>
* output type
* @return {@link BiFunction} which throws all checked exception as an unchecked exception.
*/
static <A, B, C, D> TriFunction<A, B, C, D> unchecked(TriFunctionWithException<A, B, C, D, ?> triFunctionWithException) {
return (A a,B b,C c) -> {try {
return triFunctionWithException.apply(a, b, c);
} catch (Throwable
t) {
ExceptionUtils.rethrow(t);
// we need this to appease the compiler :-(
return null;
}
};
}
| 3.26 |
flink_DataExchangeMode_getForForwardExchange_rdh
|
// ------------------------------------------------------------------------
public static DataExchangeMode getForForwardExchange(ExecutionMode mode) {
return FORWARD[mode.ordinal()];
}
| 3.26 |
flink_DataExchangeMode_select_rdh
|
/**
* Computes the mode of data exchange to be used for a given execution mode and ship strategy.
* The type of the data exchange depends also on whether this connection has been identified to
* require pipeline breaking for deadlock avoidance.
*
* <ul>
* <li>If the connection is set to be pipeline breaking, this returns the pipeline breaking
* variant of the execution mode {@link org.apache.flink.runtime.io.network.DataExchangeMode#getPipelineBreakingExchange(org.apache.flink.api.common.ExecutionMode)}.
* <li>If the data exchange is a simple FORWARD (one-to-one communication), this returns
* {@link org.apache.flink.runtime.io.network.DataExchangeMode#getForForwardExchange(org.apache.flink.api.common.ExecutionMode)}.
* <li>If otherwise, this returns {@link org.apache.flink.runtime.io.network.DataExchangeMode#getForShuffleOrBroadcast(org.apache.flink.api.common.ExecutionMode)}.
* </ul>
*
* @param shipStrategy
* The ship strategy (FORWARD, PARTITION, BROADCAST, ...) of the runtime
* data exchange.
* @return The data exchange mode for the connection, given the concrete ship strategy.
*/
public static DataExchangeMode select(ExecutionMode executionMode, ShipStrategyType shipStrategy, boolean breakPipeline) {
if ((shipStrategy == null) ||
(shipStrategy == ShipStrategyType.NONE)) {
throw new IllegalArgumentException("shipStrategy may not be null or NONE");
}
if (executionMode == null) {
throw new
IllegalArgumentException("executionMode may not mbe null");
}
if (breakPipeline) {
return getPipelineBreakingExchange(executionMode);
} else if (shipStrategy == ShipStrategyType.FORWARD) {
return getForForwardExchange(executionMode);
} else {return getForShuffleOrBroadcast(executionMode);
}
}
| 3.26 |
flink_DataSetUtils_partitionByRange_rdh
|
/**
* Range-partitions a DataSet using the specified key selector function.
*/
public static <T, K extends Comparable<K>> PartitionOperator<T> partitionByRange(DataSet<T> input, DataDistribution distribution, KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, input.getType());
return new PartitionOperator<>(input, PartitionMethod.RANGE, new Keys.SelectorFunctionKeys<>(input.clean(keyExtractor), input.getType(), keyType), distribution,
Utils.getCallLocationName());
}
| 3.26 |
flink_DataSetUtils_sample_rdh
|
/**
* Generate a sample of DataSet by the probability fraction of each element.
*
* @param withReplacement
* Whether element can be selected more than once.
* @param fraction
* Probability that each element is chosen, should be [0,1] without replacement,
* and [0, ∞) with replacement. While fraction is larger than 1, the elements are expected
* to be selected multi times into sample on average.
* @param seed
* random number generator seed.
* @return The sampled DataSet
*/
public static <T> MapPartitionOperator<T, T> sample(DataSet<T> input, final boolean withReplacement, final double fraction, final long seed)
{
return input.mapPartition(new SampleWithFraction<T>(withReplacement, fraction, seed));
}
| 3.26 |
flink_DataSetUtils_zipWithIndex_rdh
|
/**
* Method that assigns a unique {@link Long} value to all elements in the input data set. The
* generated values are consecutive.
*
* @param input
* the input data set
* @return a data set of tuple 2 consisting of consecutive ids and initial values.
*/
public static <T> DataSet<Tuple2<Long, T>> zipWithIndex(DataSet<T> input) {
DataSet<Tuple2<Integer, Long>> elementCount = countElementsPerPartition(input);
return input.mapPartition(new RichMapPartitionFunction<T, Tuple2<Long, T>>() {
long start = 0;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
List<Tuple2<Integer, Long>> offsets = getRuntimeContext().getBroadcastVariableWithInitializer("counts", new BroadcastVariableInitializer<Tuple2<Integer, Long>, List<Tuple2<Integer, Long>>>() {
@Override
public List<Tuple2<Integer, Long>> initializeBroadcastVariable(Iterable<Tuple2<Integer, Long>> data) {
// sort the list by task id to
// calculate the correct offset
List<Tuple2<Integer, Long>> sortedData = new ArrayList<>();
for (Tuple2<Integer, Long> datum : data) {
sortedData.add(datum);
}
Collections.sort(sortedData, new Comparator<Tuple2<Integer, Long>>() {
@Override
public int compare(Tuple2<Integer,
Long> o1, Tuple2<Integer, Long> o2) {
return o1.f0.compareTo(o2.f0);
}});
return sortedData;
}
});
// compute the offset for each partition
for (int i =
0; i < getRuntimeContext().getIndexOfThisSubtask(); i++) {
start += offsets.get(i).f1;
}
}
@Override
public void mapPartition(Iterable<T> values, Collector<Tuple2<Long, T>> out) throws
Exception {
for (T value : values) {
out.collect(new Tuple2<>(start++, value));
}
}
}).withBroadcastSet(elementCount, "counts");
}
| 3.26 |
flink_DataSetUtils_zipWithUniqueId_rdh
|
/**
* Method that assigns a unique {@link Long} value to all elements in the input data set as
* described below.
*
* <ul>
* <li>a map function is applied to the input data set
* <li>each map task holds a counter c which is increased for each record
* <li>c is shifted by n bits where n = log2(number of parallel tasks)
* <li>to create a unique ID among all tasks, the task id is added to the counter
* <li>for each record, the resulting counter is collected
* </ul>
*
* @param input
* the input data set
* @return a data set of tuple 2 consisting of ids and initial values.
*/
public static <T> DataSet<Tuple2<Long, T>>
zipWithUniqueId(DataSet<T> input) {
return input.mapPartition(new RichMapPartitionFunction<T, Tuple2<Long, T>>() {
long maxBitSize = getBitSize(Long.MAX_VALUE);
long shifter = 0;
long start = 0;
long taskId = 0;
long label = 0;
@Override
public void m0(OpenContext openContext) throws Exception {
super.open(openContext);
shifter = getBitSize(getRuntimeContext().getNumberOfParallelSubtasks() - 1);
taskId = getRuntimeContext().getIndexOfThisSubtask();
}
@Override
public void mapPartition(Iterable<T> values, Collector<Tuple2<Long, T>> out) throws Exception {
for (T value : values) {
label = (start << shifter) + taskId;
if ((getBitSize(start) + shifter) < maxBitSize) {
out.collect(new Tuple2<>(label, value));
start++;
} else {throw new Exception("Exceeded Long value range while generating labels");
}
}
}
});
}
| 3.26 |
flink_DataSetUtils_countElementsPerPartition_rdh
|
/**
* Method that goes over all the elements in each partition in order to retrieve the total
* number of elements.
*
* @param input
* the DataSet received as input
* @return a data set containing tuples of subtask index, number of elements mappings.
*/
public static <T> DataSet<Tuple2<Integer, Long>> countElementsPerPartition(DataSet<T> input) {
return input.mapPartition(new RichMapPartitionFunction<T, Tuple2<Integer, Long>>() {
@Override
public void mapPartition(Iterable<T> values, Collector<Tuple2<Integer, Long>> out) throws Exception {
long counter = 0;
for (T
value : values) {
counter++;
}
out.collect(new Tuple2<>(getRuntimeContext().getIndexOfThisSubtask(), counter));
}
});
}
| 3.26 |
flink_DataSetUtils_getBitSize_rdh
|
// *************************************************************************
// UTIL METHODS
// *************************************************************************
public static int getBitSize(long value) {
if (value > Integer.MAX_VALUE) {
return 64 - Integer.numberOfLeadingZeros(((int) (value >> 32)));
} else {
return 32 - Integer.numberOfLeadingZeros(((int) (value)));
}
}
| 3.26 |
flink_DataSetUtils_summarize_rdh
|
// --------------------------------------------------------------------------------------------
// Summarize
// --------------------------------------------------------------------------------------------
/**
* Summarize a DataSet of Tuples by collecting single pass statistics for all columns.
*
* <p>Example usage:
*
* <pre>{@code Dataset<Tuple3<Double, String, Boolean>> input = // [...]
* Tuple3<NumericColumnSummary,StringColumnSummary, BooleanColumnSummary> summary = DataSetUtils.summarize(input)
*
* summary.f0.getStandardDeviation()
* summary.f1.getMaxLength()}</pre>
*
* @return the summary as a Tuple the same width as input rows
*/
public static <R extends Tuple, T extends Tuple> R summarize(DataSet<T> input) throws Exception {
if (!input.getType().isTupleType()) {
throw new IllegalArgumentException("summarize() is only implemented for DataSet's of Tuples");
}
final TupleTypeInfoBase<?> inType = ((TupleTypeInfoBase<?>) (input.getType()));
DataSet<TupleSummaryAggregator<R>> result = input.mapPartition(new MapPartitionFunction<T, TupleSummaryAggregator<R>>() {
@Override
public void mapPartition(Iterable<T> values, Collector<TupleSummaryAggregator<R>> out) throws Exception {
TupleSummaryAggregator<R> aggregator = SummaryAggregatorFactory.create(inType);
for (Tuple value : values) {
aggregator.aggregate(value);
}
out.collect(aggregator);
}
}).reduce(new ReduceFunction<TupleSummaryAggregator<R>>() {
@Override
public TupleSummaryAggregator<R> reduce(TupleSummaryAggregator<R>
agg1, TupleSummaryAggregator<R> agg2) throws Exception {
agg1.combine(agg2);
return agg1;
}
});
return result.collect().get(0).result();
}
| 3.26 |
flink_DataSetUtils_sampleWithSize_rdh
|
/**
* Generate a sample of DataSet which contains fixed size elements.
*
* <p><strong>NOTE:</strong> Sample with fixed size is not as efficient as sample with fraction,
* use sample with fraction unless you need exact precision.
*
* @param withReplacement
* Whether element can be selected more than once.
* @param numSamples
* The expected sample size.
* @param seed
* Random number generator seed.
* @return The sampled DataSet
*/
public static <T> DataSet<T> sampleWithSize(DataSet<T> input, final boolean withReplacement, final int numSamples, final long seed) {
SampleInPartition<T> sampleInPartition = new SampleInPartition<>(withReplacement, numSamples, seed);
MapPartitionOperator mapPartitionOperator = input.mapPartition(sampleInPartition);
// There is no previous group, so the parallelism of GroupReduceOperator is always 1.
String callLocation = Utils.getCallLocationName();
SampleInCoordinator<T> sampleInCoordinator = new SampleInCoordinator<>(withReplacement, numSamples, seed);
return new GroupReduceOperator<>(mapPartitionOperator, input.getType(), sampleInCoordinator, callLocation);
}
| 3.26 |
flink_FlinkZooKeeperQuorumPeer_setRequiredProperties_rdh
|
/**
* Sets required properties to reasonable defaults and logs it.
*/
private static void setRequiredProperties(Properties zkProps) {
// Set default client port
if (zkProps.getProperty("clientPort") == null) {
zkProps.setProperty("clientPort", String.valueOf(DEFAULT_ZOOKEEPER_CLIENT_PORT));
LOG.warn("No 'clientPort' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_CLIENT_PORT);
}// Set default init limit
if (zkProps.getProperty("initLimit") == null) {
zkProps.setProperty("initLimit", String.valueOf(DEFAULT_ZOOKEEPER_INIT_LIMIT));
LOG.warn("No 'initLimit' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_INIT_LIMIT);
}
// Set default sync limit
if (zkProps.getProperty("syncLimit") == null) {
zkProps.setProperty("syncLimit", String.valueOf(DEFAULT_ZOOKEEPER_SYNC_LIMIT)); LOG.warn("No 'syncLimit' configured. Set to '{}'.", DEFAULT_ZOOKEEPER_SYNC_LIMIT);
}
// Set default data dir
if (zkProps.getProperty("dataDir") == null) {
String dataDir = String.format("%s/%s/zookeeper", System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
zkProps.setProperty("dataDir", dataDir);
LOG.warn("No 'dataDir' configured. Set to '{}'.", dataDir);
}
int peerPort = DEFAULT_ZOOKEEPER_PEER_PORT;
int leaderPort = DEFAULT_ZOOKEEPER_LEADER_PORT;
// Set peer and leader ports if none given, because ZooKeeper complains if multiple
// servers are configured, but no ports are given.
for (Map.Entry<Object, Object> entry : zkProps.entrySet()) {
String key = ((String) (entry.getKey()));
if (entry.getKey().toString().startsWith("server.")) {
String value = ((String) (entry.getValue()));
String[] parts = value.split(":");
if (parts.length == 1) {
String address = String.format("%s:%d:%d", parts[0], peerPort, leaderPort);
zkProps.setProperty(key, address);
LOG.info("Set peer and leader port of '{}': '{}' => '{}'.", key, value, address);
} else if (parts.length == 2) {
String address = String.format("%s:%d:%d", parts[0], Integer.valueOf(parts[1]), leaderPort);
zkProps.setProperty(key, address);
LOG.info("Set peer port of '{}': '{}' => '{}'.", key, value, address);
}
}
}
}
/**
* Write 'myid' file to the 'dataDir' in the given ZooKeeper configuration.
*
* <blockquote>
*
* Every machine that is part of the ZooKeeper ensemble should know about every other machine in
* the ensemble. You accomplish this with the series of lines of the form
* server.id=host:port:port. The parameters host and port are straightforward. You attribute the
* server id to each machine by creating a file named myid, one for each server, which resides
* in that server's data directory, as specified by the configuration file parameter dataDir.
*
* </blockquote>
*
* @param zkProps
* ZooKeeper configuration.
* @param id
* The ID of this {@link QuorumPeer}
| 3.26 |
flink_FlinkZooKeeperQuorumPeer_main_rdh
|
// ------------------------------------------------------------------------
public static void main(String[] args) {
try {
// startup checks and logging
EnvironmentInformation.logEnvironmentInfo(LOG, "ZooKeeper Quorum Peer", args);
final ParameterTool params = ParameterTool.fromArgs(args);
final String zkConfigFile = params.getRequired("zkConfigFile");
final int peerId = params.getInt("peerId");
// Run quorum peer
runFlinkZkQuorumPeer(zkConfigFile, peerId);
} catch (Throwable t) {
LOG.error("Error running ZooKeeper quorum peer: " + t.getMessage(),
t);
System.exit(-1);
}
}
| 3.26 |
flink_FlinkZooKeeperQuorumPeer_runFlinkZkQuorumPeer_rdh
|
// ------------------------------------------------------------------------
/**
* Runs a ZooKeeper {@link QuorumPeer} if further peers are configured or a single {@link ZooKeeperServer} if no further peers are configured.
*
* @param zkConfigFile
* ZooKeeper config file 'zoo.cfg'
* @param peerId
* ID for the 'myid' file
*/
public static void runFlinkZkQuorumPeer(String zkConfigFile, int peerId) throws Exception {
Properties zkProps = new Properties();
try (InputStream inStream = new FileInputStream(new File(zkConfigFile))) {
zkProps.load(inStream);
}
LOG.info("Configuration: " + zkProps);
// Set defaults for required properties
setRequiredProperties(zkProps);
// Write peer id to myid file
writeMyIdToDataDir(zkProps, peerId);// The myid file needs to be written before creating the instance. Otherwise, this
// will fail.
QuorumPeerConfig v5 = new QuorumPeerConfig();
v5.parseProperties(zkProps);
if (v5.isDistributed()) {
// Run quorum peer
LOG.info("Running distributed ZooKeeper quorum peer (total peers: {}).", v5.getServers().size());
QuorumPeerMain qp = new QuorumPeerMain();
qp.runFromConfig(v5);
} else {
// Run standalone
LOG.info("Running standalone ZooKeeper quorum peer.");
ZooKeeperServerMain
zk = new ZooKeeperServerMain();
ServerConfig sc = new ServerConfig();
sc.readFrom(v5);
zk.runFromConfig(sc); }
}
| 3.26 |
flink_AbstractStreamingWriter_commitUpToCheckpoint_rdh
|
/**
* Commit up to this checkpoint id.
*/protected void commitUpToCheckpoint(long checkpointId) throws Exception {helper.commitUpToCheckpoint(checkpointId);
}
| 3.26 |
flink_ContaineredTaskManagerParameters_create_rdh
|
// ------------------------------------------------------------------------
// Factory
// ------------------------------------------------------------------------
/**
* Computes the parameters to be used to start a TaskManager Java process.
*
* @param config
* The Flink configuration.
* @param taskExecutorProcessSpec
* The resource specifics of the task executor.
* @return The parameters to start the TaskManager processes with.
*/
public static ContaineredTaskManagerParameters create(Configuration config, TaskExecutorProcessSpec taskExecutorProcessSpec) {
// obtain the additional environment variables from the configuration
final HashMap<String, String> envVars
= new HashMap<>();
final String prefix = ResourceManagerOptions.CONTAINERIZED_TASK_MANAGER_ENV_PREFIX;
for (String key : config.keySet())
{if (key.startsWith(prefix) && (key.length() > prefix.length())) {
// remove prefix
String envVarKey = key.substring(prefix.length());
envVars.put(envVarKey, config.getString(key, null));
}
}
// done
return
new ContaineredTaskManagerParameters(taskExecutorProcessSpec, envVars);
}
| 3.26 |
flink_ContaineredTaskManagerParameters_toString_rdh
|
// ------------------------------------------------------------------------
@Override
public String toString() {
return (((("TaskManagerParameters {" + "taskExecutorProcessSpec=") + taskExecutorProcessSpec) + ", taskManagerEnv=") + taskManagerEnv) + '}';
}
| 3.26 |
flink_ContaineredTaskManagerParameters_getTaskExecutorProcessSpec_rdh
|
// ------------------------------------------------------------------------
public TaskExecutorProcessSpec getTaskExecutorProcessSpec() {
return taskExecutorProcessSpec;
}
| 3.26 |
flink_SlidingProcessingTimeWindows_m1_rdh
|
/**
* Creates a new {@code SlidingProcessingTimeWindows} {@link WindowAssigner} that assigns
* elements to time windows based on the element timestamp and offset.
*
* <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of
* each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time
* windows start at 0:15:00,1:15:00,2:15:00,etc.
*
* <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as
* China which is using UTC+08:00,and you want a time window with size of one day, and window
* begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}.
* The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than
* UTC time.
*
* @param size
* The size of the generated windows.
* @param slide
* The slide interval of the generated windows.
* @param offset
* The offset which window start would be shifted by.
* @return The time policy.
*/
public static SlidingProcessingTimeWindows m1(Time size, Time slide, Time offset) {
return new SlidingProcessingTimeWindows(size.toMilliseconds(), slide.toMilliseconds(), offset.toMilliseconds());
}
| 3.26 |
flink_SlidingProcessingTimeWindows_m0_rdh
|
/**
* Creates a new {@code SlidingProcessingTimeWindows} {@link WindowAssigner} that assigns
* elements to sliding time windows based on the element timestamp.
*
* @param size
* The size of the generated windows.
* @param slide
* The slide interval of the generated windows.
* @return The time policy.
*/
public static SlidingProcessingTimeWindows m0(Time size, Time slide) {
return new SlidingProcessingTimeWindows(size.toMilliseconds(), slide.toMilliseconds(), 0);
}
| 3.26 |
flink_FileSourceRecordEmitter_emitRecord_rdh
|
/**
* The {@link RecordEmitter} implementation for {@link FileSourceReader}.
*
* <p>This updates the {@link FileSourceSplit} for every emitted record. Because the {@link FileSourceSplit} points to the position from where to start reading (after recovery), the current
* offset and records-to-skip need to always point to the record after the emitted record.
*/
@Internalfinal class FileSourceRecordEmitter<T, SplitT extends FileSourceSplit> implements RecordEmitter<RecordAndPosition<T>, T, FileSourceSplitState<SplitT>> {
@Override
public void emitRecord(final RecordAndPosition<T> element, final SourceOutput<T> output, final FileSourceSplitState<SplitT> splitState) {
output.collect(element.getRecord());
splitState.setPosition(element.getOffset(), element.getRecordSkipCount());
}
| 3.26 |
flink_TimestampsAndWatermarksOperator_m0_rdh
|
/**
* Override the base implementation to completely ignore statuses propagated from upstream.
*/
@Override
public void m0(WatermarkStatus watermarkStatus) throws Exception {
}
| 3.26 |
flink_TimestampsAndWatermarksOperator_processWatermark_rdh
|
/**
* Override the base implementation to completely ignore watermarks propagated from upstream,
* except for the "end of time" watermark.
*/
@Override
public void processWatermark(Watermark mark) throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if (mark.getTimestamp() == Long.MAX_VALUE) {
wmOutput.emitWatermark(Watermark.MAX_WATERMARK);
}
}
| 3.26 |
flink_CompensatedSum_delta_rdh
|
/**
* The correction term.
*/
public double delta() {
return delta;
}
| 3.26 |
flink_CompensatedSum_value_rdh
|
/**
* The value of the sum.
*/
public double value() {
return value;
}
| 3.26 |
flink_CompensatedSum_add_rdh
|
/**
* Increments the Kahan sum by adding two sums, and updating the correction term for reducing
* numeric errors.
*/
public CompensatedSum add(CompensatedSum other) {
double correctedSum = other.value() + (delta + other.delta());
double updatedValue = value + correctedSum;
double updatedDelta = correctedSum - (updatedValue - value);
return new CompensatedSum(updatedValue, updatedDelta);
}
| 3.26 |
flink_SlotProfile_getPreferredLocations_rdh
|
/**
* Returns the preferred locations for the slot.
*/
public Collection<TaskManagerLocation> getPreferredLocations() {
return preferredLocations;
}
| 3.26 |
flink_SlotProfile_getReservedAllocations_rdh
|
/**
* Returns a set of all reserved allocation ids from the execution graph. It will used by {@link PreviousAllocationSlotSelectionStrategy} to support local recovery. In this case, a vertex
* cannot take an reserved allocation unless it exactly prefers that allocation.
*
* <p>This is optional and can be empty if unused.
*/
public Set<AllocationID> getReservedAllocations() {
return reservedAllocations;
}
| 3.26 |
flink_SlotProfile_priorAllocation_rdh
|
/**
* Returns a slot profile for the given resource profile, prior allocations and all prior
* allocation ids from the whole execution graph.
*
* @param taskResourceProfile
* specifying the required resources for the task slot
* @param physicalSlotResourceProfile
* specifying the required resources for the physical slot to
* host this task slot
* @param preferredLocations
* specifying the preferred locations
* @param priorAllocations
* specifying the prior allocations
* @param reservedAllocations
* specifying all reserved allocations
* @return Slot profile with all the given information
*/
public static SlotProfile
priorAllocation(final ResourceProfile taskResourceProfile, final ResourceProfile physicalSlotResourceProfile, final Collection<TaskManagerLocation> preferredLocations, final Collection<AllocationID> priorAllocations, final Set<AllocationID> reservedAllocations) {
return new SlotProfile(taskResourceProfile, physicalSlotResourceProfile, preferredLocations, priorAllocations, reservedAllocations);
}
| 3.26 |
flink_SlotProfile_getPhysicalSlotResourceProfile_rdh
|
/**
* Returns the desired resource profile for the physical slot to host this task slot.
*/
public ResourceProfile getPhysicalSlotResourceProfile() {return physicalSlotResourceProfile;
}
| 3.26 |
flink_SlotProfile_getTaskResourceProfile_rdh
|
/**
* Returns the desired resource profile for the task slot.
*/
public ResourceProfile getTaskResourceProfile() {
return taskResourceProfile;
}
| 3.26 |
flink_SlotProfile_getPreferredAllocations_rdh
|
/**
* Returns the desired allocation ids for the slot.
*/
public Collection<AllocationID> getPreferredAllocations() {
return preferredAllocations;
}
| 3.26 |
flink_LongMinimum_add_rdh
|
// ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(long value) {
this.min = Math.min(this.min, value);
}
| 3.26 |
flink_LongMinimum_toString_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "LongMinimum " + this.min;
}
| 3.26 |
flink_StreamingSemiAntiJoinOperator_processElement1_rdh
|
/**
* Process an input element and output incremental joined records, retraction messages will be
* sent in some scenarios.
*
* <p>Following is the pseudo code to describe the core logic of this method.
*
* <pre>
* if there is no matched rows on the other side
* if anti join, send input record
* if there are matched rows on the other side
* if semi join, send input record
* if the input record is accumulate, state.add(record, matched size)
* if the input record is retract, state.retract(record)
* </pre>
*/
@Override
public void processElement1(StreamRecord<RowData> element) throws
Exception {
RowData input = element.getValue();
AssociatedRecords associatedRecords = AssociatedRecords.of(input, true, rightRecordStateView, joinCondition);
if (associatedRecords.isEmpty()) {
if (isAntiJoin) {
collector.collect(input);
}
} else // there are matched rows on the other side
if (!isAntiJoin) {
collector.collect(input);
}
if (RowDataUtil.isAccumulateMsg(input)) {
// erase RowKind for state updating
input.setRowKind(RowKind.INSERT);
leftRecordStateView.addRecord(input, associatedRecords.size());
} else {
// input is retract
// erase RowKind for state updating
input.setRowKind(RowKind.INSERT);
leftRecordStateView.retractRecord(input);
}
}
| 3.26 |
flink_StreamingSemiAntiJoinOperator_processElement2_rdh
|
/**
* Process an input element and output incremental joined records, retraction messages will be
* sent in some scenarios.
*
* <p>Following is the pseudo code to describe the core logic of this method.
*
* <p>Note: "+I" represents "INSERT", "-D" represents "DELETE", "+U" represents "UPDATE_AFTER",
* "-U" represents "UPDATE_BEFORE".
*
* <pre>
* if input record is accumulate
* | state.add(record)
* | if there is no matched rows on the other side, skip
* | if there are matched rows on the other side
* | | if the matched num in the matched rows == 0
* | | if anti join, send -D[other]s
* | | if semi join, send +I/+U[other]s (using input RowKind)
* | | if the matched num in the matched rows > 0, skip
* | | otherState.update(other, old+1)
* | endif
* endif
* if input record is retract
* | state.retract(record)
* | if there is no matched rows on the other side, skip
* | if there are matched rows on the other side
* | | if the matched num in the matched rows == 0, this should never happen!
* | | if the matched num in the matched rows == 1
* | | if semi join, send -D/-U[other] (using input RowKind)
* | | if anti join, send +I[other]
* | | if the matched num in the matched rows > 1, skip
* | | otherState.update(other, old-1)
* | endif
* endif
* </pre>
*/
@Overridepublic void processElement2(StreamRecord<RowData> element) throws Exception {
RowData input
= element.getValue(); boolean isAccumulateMsg = RowDataUtil.isAccumulateMsg(input);
RowKind inputRowKind = input.getRowKind();
input.setRowKind(RowKind.INSERT);// erase RowKind for later state updating
AssociatedRecords associatedRecords = AssociatedRecords.of(input, false, leftRecordStateView, joinCondition);
if (isAccumulateMsg) {
// record is accumulate
rightRecordStateView.addRecord(input);
if
(!associatedRecords.isEmpty()) {
// there are matched rows on the other side
for (OuterRecord outerRecord : associatedRecords.getOuterRecords()) {
RowData other = outerRecord.record;
if (outerRecord.numOfAssociations == 0) {
if (isAntiJoin) {
// send -D[other]
other.setRowKind(RowKind.DELETE);
} else {
// send +I/+U[other] (using input RowKind)
other.setRowKind(inputRowKind);}
collector.collect(other);
// set header back to INSERT, because we will update the other row to state
other.setRowKind(RowKind.INSERT);
}// ignore when number > 0
leftRecordStateView.updateNumOfAssociations(other, outerRecord.numOfAssociations + 1);
}
}// ignore when associated number == 0
} else {
// retract input
rightRecordStateView.retractRecord(input);
if (!associatedRecords.isEmpty()) {
// there are matched rows on the other side
for (OuterRecord outerRecord : associatedRecords.getOuterRecords()) {
RowData other = outerRecord.record;
if (outerRecord.numOfAssociations == 1) {
if
(!isAntiJoin) {
// send -D/-U[other] (using input RowKind)
other.setRowKind(inputRowKind);
} else {
// send +I[other]
other.setRowKind(RowKind.INSERT);
}
collector.collect(other);
// set RowKind back, because we will update the other row to state
other.setRowKind(RowKind.INSERT);
}// ignore when number > 0
leftRecordStateView.updateNumOfAssociations(other, outerRecord.numOfAssociations - 1);
}
}// ignore when associated number == 0
}
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getColumnExprProcessor_rdh
|
/**
* Factory method to get ColumnExprProcessor.
*/
public HiveParserTypeCheckProcFactory.ColumnExprProcessor getColumnExprProcessor() {
return new HiveParserTypeCheckProcFactory.ColumnExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getFuncExprNodeDescWithUdfData_rdh
|
/**
* This function create an ExprNodeDesc for a UDF function given the children (arguments).
* It will insert implicit type conversion functions if necessary. Currently this is only
* used to handle CAST with hive UDFs. So no need to check flink functions.
*/
public static ExprNodeDesc getFuncExprNodeDescWithUdfData(String udfName, TypeInfo typeInfo, ExprNodeDesc... children) throws UDFArgumentException {
FunctionInfo fi;
try {
fi = HiveParserUtils.getFunctionInfo(udfName);
} catch (SemanticException e) {
throw new UDFArgumentException(e);
}
if (fi == null) {
throw new UDFArgumentException(udfName + " not found.");
}
GenericUDF genericUDF = fi.getGenericUDF();
if (genericUDF == null) {
throw new UDFArgumentException(udfName + " is an aggregation function or a table function.");
}
// Add udfData to UDF if necessary
if (typeInfo != null) {
if (genericUDF instanceof SettableUDF) {
((SettableUDF) (genericUDF)).setTypeInfo(typeInfo);
}
}
List<ExprNodeDesc> childrenList = new ArrayList<>(children.length);
childrenList.addAll(Arrays.asList(children));
return ExprNodeGenericFuncDesc.newInstance(genericUDF, udfName, childrenList);
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_convert_rdh
|
// temporary type-safe casting
private static Map<HiveParserASTNode, ExprNodeDesc> convert(Map<Node, Object> outputs) {
Map<HiveParserASTNode, ExprNodeDesc> converted = new LinkedHashMap<>();
for (Map.Entry<Node, Object> v13 : outputs.entrySet()) {
if ((v13.getKey() instanceof HiveParserASTNode) && ((v13.getValue() == null) || (v13.getValue() instanceof ExprNodeDesc))) {converted.put(((HiveParserASTNode) (v13.getKey())), ((ExprNodeDesc) (v13.getValue())));
} else {
LOG.warn("Invalid type entry " + v13);
}
}
return converted;
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getDateTimeExprProcessor_rdh
|
/**
* Factory method to get DateExprProcessor.
*/
public HiveParserTypeCheckProcFactory.DateTimeExprProcessor getDateTimeExprProcessor() {return new HiveParserTypeCheckProcFactory.DateTimeExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getNumExprProcessor_rdh
|
/**
* Factory method to get NumExprProcessor.
*/
public HiveParserTypeCheckProcFactory.NumExprProcessor getNumExprProcessor() {
return new HiveParserTypeCheckProcFactory.NumExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_convertSqlOperator_rdh
|
// try to create an ExprNodeDesc with a SqlOperator
private ExprNodeDesc convertSqlOperator(String funcText, List<ExprNodeDesc> children, HiveParserTypeCheckCtx ctx) throws SemanticException {
SqlOperator sqlOperator = HiveParserUtils.getSqlOperator(funcText, ctx.getSqlOperatorTable(), SqlFunctionCategory.USER_DEFINED_FUNCTION);
if (sqlOperator == null) {return null;
}
List<RelDataType> relDataTypes
= children.stream().map(ExprNodeDesc::getTypeInfo).map(t -> {
try {
return HiveParserTypeConverter.convert(t, ctx.getTypeFactory());
} catch (SemanticException e) {throw new <e>FlinkHiveException();}
}).collect(Collectors.toList());
List<RexNode> operands = new ArrayList<>(children.size());
for (ExprNodeDesc child : children) {
if (child instanceof ExprNodeConstantDesc) {
operands.add(HiveParserRexNodeConverter.convertConstant(((ExprNodeConstantDesc) (child)), ctx.getCluster()));
}
else {
operands.add(null);
}
}
TypeInfo returnType = HiveParserTypeConverter.convert(HiveParserUtils.inferReturnTypeForOperandsTypes(sqlOperator, relDataTypes, operands, ctx.getTypeFactory()));
return new SqlOperatorExprNodeDesc(funcText, sqlOperator, children, returnType);
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_processGByExpr_rdh
|
/**
* Function to do groupby subexpression elimination. This is called by all the processors
* initially. As an example, consider the query select a+b, count(1) from T group by a+b; Then
* a+b is already precomputed in the group by operators key, so we substitute a+b in the select
* list with the internal column name of the a+b expression that appears in the in input row
* resolver.
*
* @param nd
* The node that is being inspected.
* @param procCtx
* The processor context.
* @return exprNodeColumnDesc.
*/public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) throws SemanticException {
// We recursively create the exprNodeDesc. Base cases: when we encounter
// a column ref, we convert that into an exprNodeColumnDesc; when we
// encounter
// a constant, we convert that into an exprNodeConstantDesc. For others we
// just
// build the exprNodeFuncDesc with recursively built children.
HiveParserASTNode expr = ((HiveParserASTNode) (nd));
HiveParserTypeCheckCtx ctx = ((HiveParserTypeCheckCtx) (procCtx));
// bypass only if outerRR is not null. Otherwise we need to look for expressions in outerRR
// for
// subqueries e.g. select min(b.value) from table b group by b.key
// having key in (select .. where a = min(b.value)
if ((!ctx.isUseCaching()) && (ctx.getOuterRR() == null)) {
return null;
}
HiveParserRowResolver input = ctx.getInputRR();
ExprNodeDesc desc = null;
if ((input == null) || (!ctx.getAllowGBExprElimination())) {
return null;
}
// If the current subExpression is pre-calculated, as in Group-By etc.
ColumnInfo colInfo = input.getExpression(expr);
// try outer row resolver
HiveParserRowResolver v5 = ctx.getOuterRR();
if ((colInfo == null) && (v5 != null)) {
colInfo = v5.getExpression(expr);
}
if (colInfo != null) {
desc = new ExprNodeColumnDesc(colInfo);
HiveParserASTNode source = input.getExpressionSource(expr);
if
(source != null) {
ctx.getUnparseTranslator().addCopyTranslation(expr, source);
}
return desc;
}
return desc;
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getNullExprProcessor_rdh
|
/**
* Factory method to get NullExprProcessor.
*/
public HiveParserTypeCheckProcFactory.NullExprProcessor getNullExprProcessor() {
return new HiveParserTypeCheckProcFactory.NullExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getStrExprProcessor_rdh
|
/**
* Factory method to get StrExprProcessor.
*/
public HiveParserTypeCheckProcFactory.StrExprProcessor getStrExprProcessor() {
return new HiveParserTypeCheckProcFactory.StrExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getBoolExprProcessor_rdh
|
/**
* Factory method to get BoolExprProcessor.
*/
public HiveParserTypeCheckProcFactory.BoolExprProcessor getBoolExprProcessor() {
return new HiveParserTypeCheckProcFactory.BoolExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getIntervalExprProcessor_rdh
|
/**
* Factory method to get IntervalExprProcessor.
*/
public HiveParserTypeCheckProcFactory.IntervalExprProcessor getIntervalExprProcessor() { return new HiveParserTypeCheckProcFactory.IntervalExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_getDefaultExprProcessor_rdh
|
/**
* Factory method to get DefaultExprProcessor.
*/
public HiveParserTypeCheckProcFactory.DefaultExprProcessor getDefaultExprProcessor() {
return new HiveParserTypeCheckProcFactory.DefaultExprProcessor();
}
| 3.26 |
flink_HiveParserTypeCheckProcFactory_isDescendant_rdh
|
// Returns true if des is a descendant of ans (ancestor)
private boolean isDescendant(Node ans, Node des) {
if (ans.getChildren() == null) {return false;
}
for (Node v120 : ans.getChildren()) {
if (v120 == des) {
return true;
}
if (isDescendant(v120, des))
{
return true;}
}
return false;
}
| 3.26 |
flink_FlinkBushyJoinReorderRule_findBestOrder_rdh
|
/**
* Find best join reorder using bushy join reorder strategy. We will first try to reorder all
* the inner join type input factors in the multiJoin. Then, we will add all outer join factors
* to the top of reordered join tree generated by the first step. If there are factors, which
* join condition is true, we will add these factors to the top in the final step.
*/
private static RelNode findBestOrder(RelBuilder relBuilder, LoptMultiJoin multiJoin) {
// Reorder all the inner join type input factors in the multiJoin.
List<Map<Set<Integer>, JoinPlan>> foundPlansForInnerJoin = reorderInnerJoin(relBuilder, multiJoin); Map<Set<Integer>, JoinPlan> lastLevelOfInnerJoin = foundPlansForInnerJoin.get(foundPlansForInnerJoin.size() - 1);
JoinPlan bestPlanForInnerJoin = getBestPlan(lastLevelOfInnerJoin);
JoinPlan containOuterJoinPlan;
// Add all outer join factors in the multiJoin (including left/right/full) on the
// top of tree if outer join condition exists in multiJoin.
if (multiJoin.getMultiJoinRel().isFullOuterJoin() || outerJoinConditionExists(multiJoin)) {
containOuterJoinPlan = addOuterJoinToTop(bestPlanForInnerJoin, multiJoin, relBuilder);
}
else {
containOuterJoinPlan = bestPlanForInnerJoin;
}
JoinPlan finalPlan;
// Add all cross join factors whose join condition is true to the top.
if (containOuterJoinPlan.factorIds.size() != multiJoin.getNumJoinFactors()) {
finalPlan = addCrossJoinToTop(containOuterJoinPlan, multiJoin, relBuilder);
} else {
finalPlan = containOuterJoinPlan;
}
final List<String> fieldNames = multiJoin.getMultiJoinRel().getRowType().getFieldNames();
return createTopProject(relBuilder, multiJoin, finalPlan, fieldNames);
}
| 3.26 |
flink_FlinkBushyJoinReorderRule_foundNextLevel_rdh
|
/**
* Found possible join plans for the next level based on the found plans in the prev levels.
*/
private static Map<Set<Integer>, JoinPlan> foundNextLevel(RelBuilder relBuilder, List<Map<Set<Integer>, JoinPlan>> foundPlans, LoptMultiJoin multiJoin) {
Map<Set<Integer>, JoinPlan> currentLevelJoinPlanMap = new LinkedHashMap<>();
int foundPlansLevel = foundPlans.size() - 1;int joinLeftSideLevel = 0;
int joinRightSideLevel = foundPlansLevel;
while (joinLeftSideLevel <= joinRightSideLevel) {
List<JoinPlan> joinLeftSidePlans
= new ArrayList<>(foundPlans.get(joinLeftSideLevel).values());
int planSize = joinLeftSidePlans.size();
for (int i = 0; i < planSize; i++) {
JoinPlan joinLeftSidePlan = joinLeftSidePlans.get(i);
List<JoinPlan> joinRightSidePlans;
if (joinLeftSideLevel == joinRightSideLevel) {
// If left side level number equals right side level number. We can remove those
// top 'i' plans which already judged in right side plans to decrease search
// spaces.
joinRightSidePlans = new ArrayList<>(joinLeftSidePlans);
if (i > 0) {
joinRightSidePlans.subList(0, i).clear();
}
} else {
joinRightSidePlans = new ArrayList<>(foundPlans.get(joinRightSideLevel).values());
}
for (JoinPlan joinRightSidePlan : joinRightSidePlans) {
Optional<JoinPlan> newJoinPlan = buildInnerJoin(relBuilder, joinLeftSidePlan, joinRightSidePlan, multiJoin);
if (newJoinPlan.isPresent()) {
JoinPlan existingPlanInCurrentLevel = currentLevelJoinPlanMap.get(newJoinPlan.get().factorIds);
// check if it's the first plan for the factor set, or it's a better plan
// than the existing one due to lower cost.
if ((existingPlanInCurrentLevel == null) || newJoinPlan.get().betterThan(existingPlanInCurrentLevel)) {
currentLevelJoinPlanMap.put(newJoinPlan.get().factorIds, newJoinPlan.get());
}
}
}
}
joinLeftSideLevel++;
joinRightSideLevel--;
}
return currentLevelJoinPlanMap;
}
| 3.26 |
flink_FlinkBushyJoinReorderRule_createTopProject_rdh
|
/**
* Creates the topmost projection that will sit on top of the selected join ordering. The
* projection needs to match the original join ordering. Also, places any post-join filters on
* top of the project.
*/
private static RelNode createTopProject(RelBuilder relBuilder, LoptMultiJoin multiJoin, JoinPlan finalPlan, List<String> fieldNames)
{
List<RexNode> newProjExprs = new ArrayList<>();
RexBuilder rexBuilder = multiJoin.getMultiJoinRel().getCluster().getRexBuilder();
List<Integer> newJoinOrder = new ArrayList<>(finalPlan.factorIds);
int nJoinFactors = multiJoin.getNumJoinFactors();
List<RelDataTypeField> fields = multiJoin.getMultiJoinFields();
// create a mapping from each factor to its field offset in the join
// ordering
final Map<Integer, Integer>
factorToOffsetMap = new HashMap<>();
for (int pos = 0, fieldStart = 0; pos < nJoinFactors; pos++) {
factorToOffsetMap.put(newJoinOrder.get(pos), fieldStart);
fieldStart += multiJoin.getNumFieldsInJoinFactor(newJoinOrder.get(pos));
}
for (int currFactor
= 0; currFactor < nJoinFactors; currFactor++) {
// if the factor is the right factor in a removable self-join,
// then where possible, remap references to the right factor to
// the corresponding reference in the left factor
Integer leftFactor = null;
if (multiJoin.isRightFactorInRemovableSelfJoin(currFactor)) {
leftFactor = multiJoin.getOtherSelfJoinFactor(currFactor);
}
for (int fieldPos = 0; fieldPos < multiJoin.getNumFieldsInJoinFactor(currFactor); fieldPos++) {
int newOffset = requireNonNull(factorToOffsetMap.get(currFactor), () -> "factorToOffsetMap.get(currFactor)") + fieldPos;
if (leftFactor != null) {
Integer leftOffset = multiJoin.getRightColumnMapping(currFactor, fieldPos);
if (leftOffset != null) {newOffset = requireNonNull(factorToOffsetMap.get(leftFactor), "factorToOffsetMap.get(leftFactor)") + leftOffset;
}
}
newProjExprs.add(rexBuilder.makeInputRef(fields.get(newProjExprs.size()).getType(), newOffset));}
}
relBuilder.clear();
relBuilder.push(finalPlan.relNode);
relBuilder.project(newProjExprs, fieldNames);
// Place the post-join filter (if it exists) on top of the final projection.
RexNode postJoinFilter = multiJoin.getMultiJoinRel().getPostJoinFilter();
if (postJoinFilter != null) {
relBuilder.filter(postJoinFilter);
}
return relBuilder.build();
}
| 3.26 |
flink_FlinkBushyJoinReorderRule_reorderInnerJoin_rdh
|
/**
* Reorder all the inner join type input factors in the multiJoin.
*
* <p>The result contains the selected join order of each layer and is stored in a HashMap. The
* number of layers is equals to the number of inner join type input factors in the multiJoin.
* E.g. for inner join case ((A IJ B) IJ C):
*
* <p>The stored HashMap of first layer in the result list is: [(Set(0), JoinPlan(Set(0), A)),
* (Set(1), JoinPlan(Set(1), B)), (Set(2), JoinPlan(Set(2), C))].
*
* <p>The stored HashMap of second layer is [(Set(0, 1), JoinPlan(Set(0, 1), (A J B))), (Set(0,
* 2), JoinPlan(Set(0, 2), (A J C))), (Set(1, 2), JoinPlan(Set(1, 2), (B J C)))].
*
* <p>The stored HashMap of third layer is [(Set(1, 0, 2), JoinPlan(Set(1, 0, 2), ((B J A) J
* C)))].
*/
private static List<Map<Set<Integer>, JoinPlan>> reorderInnerJoin(RelBuilder relBuilder, LoptMultiJoin multiJoin) {
int numJoinFactors = multiJoin.getNumJoinFactors();
List<Map<Set<Integer>, JoinPlan>> foundPlans = new ArrayList<>();
// First, we put all join factors in MultiJoin into level 0.
Map<Set<Integer>, JoinPlan> firstLevelJoinPlanMap = new LinkedHashMap<>();
for (int i = 0; i <
numJoinFactors; i++) {
if (!multiJoin.isNullGenerating(i)) {
Set<Integer> set1 = new HashSet<>();
Set<Integer> set2 = new
LinkedHashSet<>();
set1.add(i);
set2.add(i);
RelNode joinFactor = multiJoin.getJoinFactor(i);
firstLevelJoinPlanMap.put(set1, new JoinPlan(set2, joinFactor));
}
}
foundPlans.add(firstLevelJoinPlanMap);
// If multiJoin is full outer join, we will reorder it in method addOuterJoinToTop().
if (multiJoin.getMultiJoinRel().isFullOuterJoin()) {
return foundPlans;
}
// Build plans for next levels until the found plans size equals the number of join factors,
// or no possible plan exists for next level.
while (foundPlans.size() < numJoinFactors) {
Map<Set<Integer>, JoinPlan> nextLevelJoinPlanMap =
foundNextLevel(relBuilder, new ArrayList<>(foundPlans), multiJoin);
if (nextLevelJoinPlanMap.size() == 0) {break;
}
foundPlans.add(nextLevelJoinPlanMap);
}
return
foundPlans;
}
| 3.26 |
flink_FlinkBushyJoinReorderRule_getBestPlan_rdh
|
/**
* Get the best plan for current level by comparing cost.
*/
private static JoinPlan getBestPlan(Map<Set<Integer>, JoinPlan> levelPlan) {
JoinPlan bestPlan = null;
for (Map.Entry<Set<Integer>, JoinPlan> entry : levelPlan.entrySet()) {
if ((bestPlan ==
null) || entry.getValue().betterThan(bestPlan)) {
bestPlan = entry.getValue();
}
}
return bestPlan;
}
| 3.26 |
flink_RowtimeValidator_getRowtimeComponents_rdh
|
// utilities
public static Optional<Tuple2<TimestampExtractor, WatermarkStrategy>>
getRowtimeComponents(DescriptorProperties properties, String prefix) {
// create timestamp extractor
TimestampExtractor extractor;
Optional<String> t = properties.getOptionalString(prefix + ROWTIME_TIMESTAMPS_TYPE);
if (!t.isPresent()) {
return Optional.empty();
}
switch (t.get()) {
case ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD :
String field = properties.getString(prefix +
ROWTIME_TIMESTAMPS_FROM);
extractor = new ExistingField(field);
break;
case ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_SOURCE :
extractor = StreamRecordTimestamp.INSTANCE;
break;
case ROWTIME_TIMESTAMPS_TYPE_VALUE_CUSTOM :
Class<TimestampExtractor> clazz = properties.getClass(prefix +
ROWTIME_TIMESTAMPS_CLASS, TimestampExtractor.class);
extractor = EncodingUtils.decodeStringToObject(properties.getString(prefix + ROWTIME_TIMESTAMPS_SERIALIZED), clazz);
break;
default :
throw new ValidationException("Unsupported rowtime timestamps type: " + t.get());
}
// create watermark strategy
WatermarkStrategy v10;String s = properties.getString(prefix + ROWTIME_WATERMARKS_TYPE);
switch (s) {case ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_ASCENDING :
v10 = new AscendingTimestamps();
break;
case ROWTIME_WATERMARKS_TYPE_VALUE_PERIODIC_BOUNDED :
long delay = properties.getLong(prefix + ROWTIME_WATERMARKS_DELAY);
v10 = new BoundedOutOfOrderTimestamps(delay);
break;
case ROWTIME_WATERMARKS_TYPE_VALUE_FROM_SOURCE :
v10 = PreserveWatermarks.INSTANCE;
break;
case ROWTIME_WATERMARKS_TYPE_VALUE_CUSTOM :
Class<WatermarkStrategy> clazz = properties.getClass(prefix + ROWTIME_WATERMARKS_CLASS, WatermarkStrategy.class);
v10 = EncodingUtils.decodeStringToObject(properties.getString(prefix + ROWTIME_WATERMARKS_SERIALIZED), clazz);
break;
default :
throw new RuntimeException("Unsupported rowtime timestamps type: " + s);
}
return Optional.of(new Tuple2<>(extractor, v10));
}
| 3.26 |
flink_PlanProjectOperator_map_rdh
|
// TODO We should use code generation for this.
@SuppressWarnings("unchecked")
@Override
public R
map(Tuple inTuple) throws Exception {
for (int i = 0; i < fields.length; i++) {f2.setField(inTuple.getField(f1[i]), i);
}
return ((R) (f2));
}
| 3.26 |
flink_PartitionOperatorBase_getPartitionMethod_rdh
|
// --------------------------------------------------------------------------------------------
public PartitionMethod getPartitionMethod() {return this.partitionMethod;
}
| 3.26 |
flink_PartitionOperatorBase_executeOnCollections_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected List<IN> executeOnCollections(List<IN> inputData, RuntimeContext runtimeContext, ExecutionConfig executionConfig) {
return inputData;
}
| 3.26 |
flink_TaskStatsRequestCoordinator_handleSuccessfulResponse_rdh
|
/**
* Handles the successfully returned tasks stats response by collecting the corresponding
* subtask samples.
*
* @param requestId
* ID of the request.
* @param executionIds
* ID of the sampled task.
* @param result
* Result of stats request returned by an individual task.
* @throws IllegalStateException
* If unknown request ID and not recently finished or cancelled
* sample.
*/
public void handleSuccessfulResponse(int requestId, ImmutableSet<ExecutionAttemptID> executionIds, T
result) {
synchronized(lock) {
if (isShutDown) {
return;
}
final String ids = executionIds.stream().map(ExecutionAttemptID::toString).collect(Collectors.joining(", "));
if (log.isDebugEnabled())
{
log.debug("Collecting stats sample {} of tasks {}", requestId, ids);
}
PendingStatsRequest<T, V> pending = pendingRequests.get(requestId);
if (pending != null) {
pending.m0(executionIds, result);
// Publish the sample
if (pending.isComplete()) {
pendingRequests.remove(requestId);
rememberRecentRequestId(requestId);
pending.completePromiseAndDiscard();
}
} else if (recentPendingRequestIds.contains(requestId)) {
if (log.isDebugEnabled()) {
log.debug("Received late stats sample {} of tasks {}", requestId, ids);
}
} else if (log.isDebugEnabled()) {
log.debug(String.format("Unknown request ID %d.", requestId));
}
}
}
| 3.26 |
flink_TaskStatsRequestCoordinator_shutDown_rdh
|
/**
* Shuts down the coordinator.
*
* <p>After shut down, no further operations are executed.
*/
public void shutDown() {
synchronized(lock) {
if (!isShutDown) {
log.info("Shutting down task stats request coordinator.");
for (PendingStatsRequest<T, V> pending : pendingRequests.values()) {pending.discard(new RuntimeException("Shut down"));
}
pendingRequests.clear();
recentPendingRequestIds.clear();
isShutDown = true;
}
}}
| 3.26 |
flink_TaskStatsRequestCoordinator_m0_rdh
|
/**
* Collects result from one of the tasks.
*
* @param executionId
* ID of the Task.
* @param taskStatsResult
* Result of the stats sample from the Task.
*/protected void m0(ImmutableSet<ExecutionAttemptID> executionId, T taskStatsResult) {
checkDiscarded();
if (pendingTasks.remove(executionId)) {
statsResultByTaskGroup.put(executionId, taskStatsResult);
} else if (isComplete()) {
throw new IllegalStateException("Completed");
} else {
throw new IllegalArgumentException("Unknown task " + executionId);
}
}
| 3.26 |
flink_TaskStatsRequestCoordinator_handleFailedResponse_rdh
|
/**
* Handles the failed stats response by canceling the corresponding unfinished pending request.
*
* @param requestId
* ID of the request to cancel.
* @param cause
* Cause of the cancelling (can be <code>null</code>).
*/
public void handleFailedResponse(int requestId, @Nullable
Throwable cause) {
synchronized(lock) {
if (isShutDown) {
return;
}PendingStatsRequest<T, V> pendingRequest = pendingRequests.remove(requestId);
if (pendingRequest !=
null) {
log.info("Cancelling request {}", requestId, cause);pendingRequest.discard(cause);
rememberRecentRequestId(requestId);
}
}
}
| 3.26 |
flink_KvStateInfo_getKeySerializer_rdh
|
/**
*
* @return The serializer for the key the state is associated to.
*/
public TypeSerializer<K> getKeySerializer() {
return keySerializer;
}
| 3.26 |
flink_KvStateInfo_duplicate_rdh
|
/**
* Creates a deep copy of the current {@link KvStateInfo} by duplicating all the included
* serializers.
*
* <p>This method assumes correct implementation of the {@link TypeSerializer#duplicate()}
* method of the included serializers.
*/
public KvStateInfo<K, N, V> duplicate() {
final TypeSerializer<K> dupKeySerializer = keySerializer.duplicate();
final TypeSerializer<N> dupNamespaceSerializer = namespaceSerializer.duplicate();
final TypeSerializer<V> dupSVSerializer = stateValueSerializer.duplicate();
if (((dupKeySerializer == keySerializer) && (dupNamespaceSerializer == namespaceSerializer)) && (dupSVSerializer == stateValueSerializer)) {
return this;
}
return new KvStateInfo<>(dupKeySerializer, dupNamespaceSerializer, dupSVSerializer);
}
| 3.26 |
flink_KvStateInfo_getStateValueSerializer_rdh
|
/**
*
* @return The serializer for the values kept in the state.
*/
public TypeSerializer<V> getStateValueSerializer() {
return stateValueSerializer;
}
| 3.26 |
flink_KMeansDataGenerator_main_rdh
|
/**
* Main method to generate data for the {@link KMeans} example program.
*
* <p>The generator creates to files:
*
* <ul>
* <li><code>< output-path >/points</code> for the data points
* <li><code>< output-path >/centers</code> for the cluster centers
* </ul>
*
* @param args
* <ol>
* <li>Int: Number of data points
* <li>Int: Number of cluster centers
* <li><b>Optional</b> String: Output path, default value is {tmp.dir}
* <li><b>Optional</b> Double: Standard deviation of data points
* <li><b>Optional</b> Double: Value range of cluster centers
* <li><b>Optional</b> Long: Random seed
* </ol>
* @throws IOException
*/
public static void main(String[] args) throws IOException { // check parameter count
if (args.length < 2) {
System.out.println("KMeansDataGenerator -points <num> -k <num clusters> [-output <output-path>] [-stddev <relative stddev>] [-range <centroid range>] [-seed <seed>]");System.exit(1);
}
// parse parameters
final ParameterTool params = ParameterTool.fromArgs(args);
final int numDataPoints = params.getInt("points");final int k = params.getInt("k");
final String v3 = params.get("output", System.getProperty("java.io.tmpdir"));
final double stddev = params.getDouble("stddev", RELATIVE_STDDEV);
final double range = params.getDouble("range", DEFAULT_VALUE_RANGE);
final long firstSeed = params.getLong("seed", DEFAULT_SEED);
final double absoluteStdDev = stddev *
range;
final Random random = new Random(firstSeed);
// the means around which data points are distributed
final double[][] means = uniformRandomCenters(random, k, f0, range);
// write the points out
BufferedWriter pointsOut = null;
try {
pointsOut = new BufferedWriter(new FileWriter(new File((v3 + "/") + POINTS_FILE)));
StringBuilder buffer = new
StringBuilder();
double[] point = new double[f0];
int nextCentroid = 0;
for (int i = 1; i <= numDataPoints; i++) {
// generate a point for the current centroid
double[] centroid = means[nextCentroid];
for (int d = 0; d < f0; d++) {point[d] = (random.nextGaussian() * absoluteStdDev) + centroid[d];
}
writePoint(point, buffer, pointsOut);
nextCentroid = (nextCentroid + 1) % k;
}
} finally {
if (pointsOut != null) {
pointsOut.close();
}
}
// write the uniformly distributed centers to a file
BufferedWriter centersOut = null;try {
centersOut = new BufferedWriter(new FileWriter(new File((v3 + "/") + CENTERS_FILE)));
StringBuilder buffer = new StringBuilder();
double[][] centers = uniformRandomCenters(random, k, f0, range);
for (int i = 0; i < k; i++) {
writeCenter(i + 1, centers[i], buffer, centersOut);
}
} finally {
if (centersOut != null) {
centersOut.close();
}
}
System.out.println((((("Wrote " + numDataPoints) + " data points to ") + v3) + "/") + POINTS_FILE);
System.out.println((((("Wrote " + k) + " cluster centers to ") + v3) + "/") + CENTERS_FILE);
}
| 3.26 |
flink_MetadataV2Serializer_serializeOperatorState_rdh
|
// ------------------------------------------------------------------------
// version-specific serialization
// ------------------------------------------------------------------------
@Override
protected void serializeOperatorState(OperatorState operatorState, DataOutputStream dos) throws IOException {
checkState(!operatorState.isFullyFinished(), "Could not support finished Operator state in state serializers.");
// Operator ID
dos.writeLong(operatorState.getOperatorID().getLowerPart());
dos.writeLong(operatorState.getOperatorID().getUpperPart());
// Parallelism
int parallelism = operatorState.getParallelism();
dos.writeInt(parallelism);
dos.writeInt(operatorState.getMaxParallelism());
// this field was "chain length" before Flink 1.3, and it is still part
// of the format, despite being unused
dos.writeInt(1);
// Sub task states
Map<Integer, OperatorSubtaskState> subtaskStateMap = operatorState.getSubtaskStates();
dos.writeInt(subtaskStateMap.size());
for (Map.Entry<Integer, OperatorSubtaskState> entry : subtaskStateMap.entrySet()) {
dos.writeInt(entry.getKey());
serializeSubtaskState(entry.getValue(), dos);
}
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.