name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_WindowReader_reduce_rdh
|
/**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid
* The uid of the operator.
* @param function
* The reduce function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param reduceType
* The type information of the reduce function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the reduce function.
* @param <OUT>
* The output type of the reduce function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataSource<OUT> reduce(String uid, ReduceFunction<T> function, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> reduceType, TypeInformation<OUT> outputType) throws IOException
{
WindowReaderOperator<?, K, T, W, OUT> operator = WindowReaderOperator.reduce(function, readerFunction, keyType, windowSerializer, reduceType);
return readWindowOperator(uid, outputType, operator);
}
| 3.26 |
flink_WindowReader_aggregate_rdh
|
/**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid
* The uid of the operator.
* @param aggregateFunction
* The aggregate function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param accType
* The type information of the accumulator function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the values that are aggregated.
* @param <ACC>
* The type of the accumulator (intermediate aggregate state).
* @param <R>
* The type of the aggregated result.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, ACC, R, OUT> DataSource<OUT> aggregate(String uid, AggregateFunction<T, ACC, R> aggregateFunction, WindowReaderFunction<R, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<ACC> accType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, R, W,
OUT> v1 = WindowReaderOperator.aggregate(aggregateFunction, readerFunction, keyType, windowSerializer, accType);
return readWindowOperator(uid, outputType, v1);
}
| 3.26 |
flink_MultipleFuturesAvailabilityHelper_anyOf_rdh
|
/**
* Combine {@code availabilityFuture} using anyOf logic with other previously registered
* futures.
*/
public void anyOf(final int idx, CompletableFuture<?> availabilityFuture) {
if ((futuresToCombine[idx] == null) || futuresToCombine[idx].isDone()) {
futuresToCombine[idx] = availabilityFuture;
assertNoException(availabilityFuture.thenRun(this::notifyCompletion));
}
}
| 3.26 |
flink_MultipleFuturesAvailabilityHelper_getAvailableFuture_rdh
|
/**
*
* @return combined future using anyOf logic
*/
public CompletableFuture<?> getAvailableFuture() {
return availableFuture;
}
| 3.26 |
flink_RouteResult_params_rdh
|
/**
* Extracts all params in {@code pathParams} and {@code queryParams} matching the name.
*
* @return Unmodifiable list; the list is empty if there's no match
*/
public List<String> params(String name) {
List<String> values = queryParams.get(name);
String value = pathParams.get(name);
if (values == null) {
return value == null ? Collections.<String>emptyList() : Collections.singletonList(value);
}if (value == null) {
return Collections.unmodifiableList(values);
} else {
List<String> aggregated = new ArrayList<String>(values.size() + 1);
aggregated.addAll(values);
aggregated.add(value);
return Collections.unmodifiableList(aggregated);
} }
| 3.26 |
flink_RouteResult_uri_rdh
|
/**
* Returns the original request URI.
*/
public String uri() {
return uri;
}
| 3.26 |
flink_RouteResult_queryParams_rdh
|
/**
* Returns all params in the query part of the request URI.
*/
public Map<String, List<String>> queryParams() {
return queryParams;
}
| 3.26 |
flink_RouteResult_param_rdh
|
/**
* Extracts the param in {@code pathParams} first, then falls back to the first matching param
* in {@code queryParams}.
*
* @return {@code null} if there's no match
*/
public String param(String name) {
String pathValue = pathParams.get(name);
return pathValue == null ? queryParam(name) : pathValue;
}
| 3.26 |
flink_RouteResult_queryParam_rdh
|
// ----------------------------------------------------------------------------
// Utilities to get params.
/**
* Extracts the first matching param in {@code queryParams}.
*
* @return {@code null} if there's no match
*/
public String queryParam(String name) {
List<String> values = queryParams.get(name);
return values == null ? null : values.get(0);
}
| 3.26 |
flink_RouteResult_pathParams_rdh
|
/**
* Returns all params embedded in the request path.
*/
public Map<String, String> pathParams() {
return pathParams;
}
| 3.26 |
flink_ShadeParser_parseShadeOutput_rdh
|
/**
* Parses the output of a Maven build where {@code shade:shade} was used, and returns a set of
* bundled dependencies for each module.
*
* <p>The returned dependencies will NEVER contain the scope or optional flag.
*
* <p>This method only considers the {@code shade-flink} and {@code shade-dist} executions,
* because all artifacts we produce that are either published or referenced are created by these
* executions. In other words, all artifacts from other executions are only used internally by
* the module that created them.
*/
public static Map<String, Set<Dependency>> parseShadeOutput(Path buildOutput) throws IOException {try (Stream<String> lines = Files.lines(buildOutput)) {
return parseShadeOutput(lines);
}
}
| 3.26 |
flink_MapPartitionNode_computeOperatorSpecificDefaultEstimates_rdh
|
/**
* Computes the estimates for the MapPartition operator. We assume that by default, Map takes
* one value and transforms it into another value. The cardinality consequently stays the same.
*/
@Overrideprotected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
// we really cannot make any estimates here
}
| 3.26 |
flink_SessionDispatcherLeaderProcess_create_rdh
|
// ---------------------------------------------------------------
// Factory methods
// ---------------------------------------------------------------
public static SessionDispatcherLeaderProcess create(UUID
leaderSessionId, DispatcherGatewayServiceFactory dispatcherFactory, JobGraphStore jobGraphStore, JobResultStore jobResultStore,
Executor ioExecutor, FatalErrorHandler fatalErrorHandler) {
return new SessionDispatcherLeaderProcess(leaderSessionId,
dispatcherFactory, jobGraphStore, jobResultStore, ioExecutor, fatalErrorHandler);
}
| 3.26 |
flink_CatalogSourceTable_createAnonymous_rdh
|
/**
* Create a {@link CatalogSourceTable} from an anonymous {@link ContextResolvedTable}. This is
* required to manually create a preparing table skipping the calcite catalog resolution.
*/
public static CatalogSourceTable createAnonymous(FlinkRelBuilder relBuilder, ContextResolvedTable contextResolvedTable, boolean isBatchMode) {
Preconditions.checkArgument(contextResolvedTable.isAnonymous(), "ContextResolvedTable must be anonymous");
// Statistics are unknown for anonymous tables
// Look at DatabaseCalciteSchema#getStatistic for more details
FlinkStatistic flinkStatistic = FlinkStatistic.unknown(contextResolvedTable.getResolvedSchema()).build();
CatalogSchemaTable catalogSchemaTable = new CatalogSchemaTable(contextResolvedTable, flinkStatistic, !isBatchMode);
return new CatalogSourceTable(relBuilder.getRelOptSchema(), contextResolvedTable.getIdentifier().toList(), catalogSchemaTable.getRowType(relBuilder.getTypeFactory()), catalogSchemaTable);
}
| 3.26 |
flink_SinkTestSuiteBase_compareSinkMetrics_rdh
|
/**
* Compare the metrics.
*/
private boolean compareSinkMetrics(MetricQuerier metricQuerier, TestEnvironment testEnv, DataStreamSinkExternalContext<T> context, JobID jobId, String sinkName, String metricsName,
long expectedSize) throws Exception {
double sumNumRecordsOut = metricQuerier.getAggregatedMetricsByRestAPI(testEnv.getRestEndpoint(), jobId, sinkName, metricsName, getSinkMetricFilter(context));
if (Precision.equals(expectedSize, sumNumRecordsOut)) {
return true;
} else
{
LOG.info("expected:<{}> but was <{}>({})", expectedSize, sumNumRecordsOut, metricsName);
return false;
}
}
| 3.26 |
flink_SinkTestSuiteBase_generateTestData_rdh
|
// ----------------------------- Helper Functions ---------------------------------
/**
* Generate a set of test records.
*
* @param testingSinkSettings
* sink settings
* @param externalContext
* External context
* @return Collection of generated test records
*/
protected List<T> generateTestData(TestingSinkSettings testingSinkSettings, DataStreamSinkExternalContext<T> externalContext) {
return externalContext.generateTestData(testingSinkSettings, ThreadLocalRandom.current().nextLong());
}
| 3.26 |
flink_SinkTestSuiteBase_testBasicSink_rdh
|
// ----------------------------- Basic test cases ---------------------------------
/**
* Test DataStream connector sink.
*
* <p>The following tests will create a sink in the external system, generate a collection of
* test data and write them to this sink by the Flink Job.
*
* <p>In order to pass these tests, the number of records produced by Flink need to be equals to
* the generated test data. And the records in the sink will be compared to the test data by the
* different semantics. There's no requirement for records order.
*/
@TestTemplate
@DisplayName("Test data stream sink")
public void testBasicSink(TestEnvironment testEnv, DataStreamSinkExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
TestingSinkSettings sinkSettings = getTestingSinkSettings(semantic);
final List<T> testRecords = generateTestData(sinkSettings, externalContext);
// Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build());
execEnv.enableCheckpointing(50);
DataStream<T> dataStream = execEnv.fromCollection(testRecords).name("sourceInSinkTest").setParallelism(1).returns(externalContext.getProducedType());
tryCreateSink(dataStream, externalContext, sinkSettings).setParallelism(1).name("sinkInSinkTest");
final JobClient jobClient =
execEnv.executeAsync("DataStream Sink Test");
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.FINISHED));
// Check test result
checkResultWithSemantic(externalContext.createSinkDataReader(sinkSettings), testRecords, semantic);
}
| 3.26 |
flink_SinkTestSuiteBase_getSinkMetricFilter_rdh
|
/**
* Return the filter used to filter the sink metric.
*
* <ul>
* <li>Sink v1: return null.
* <li>Sink v2: return the "Writer" prefix in the `SinkTransformationTranslator`.
* </ul>
*/
private String getSinkMetricFilter(DataStreamSinkExternalContext<T> context) {
if (context instanceof DataStreamSinkV1ExternalContext) {
return null;
} else if (context instanceof DataStreamSinkV2ExternalContext) {
// See class `SinkTransformationTranslator`
return "Writer";
} else {
throw new IllegalArgumentException(String.format("Get unexpected sink context: %s", context.getClass()));
}
}
| 3.26 |
flink_SinkTestSuiteBase_testScaleUp_rdh
|
/**
* Test connector sink restart from a completed savepoint with a higher parallelism.
*
* <p>This test will create a sink in the external system, generate a collection of test data
* and write a half part of them to this sink by the Flink Job with parallelism 2 at first. Then
* stop the job, restart the same job from the completed savepoint with a higher parallelism 4.
* After the job has been running, write the other part to the sink and compare the result.
*
* <p>In order to pass this test, the number of records produced by Flink need to be equals to
* the generated test data. And the records in the sink will be compared to the test data by the
* different semantic. There's no requirement for record order.
*/
@TestTemplate
@DisplayName("Test sink restarting with a higher parallelism")
public void testScaleUp(TestEnvironment testEnv, DataStreamSinkExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 2, 4);
}
| 3.26 |
flink_SinkTestSuiteBase_testMetrics_rdh
|
/**
* Test connector sink metrics.
*
* <p>This test will create a sink in the external system, generate test data and write them to
* the sink via a Flink job. Then read and compare the metrics.
*
* <p>Now test: numRecordsOut
*/ @TestTemplate
@DisplayName("Test sink metrics")
public void testMetrics(TestEnvironment testEnv, DataStreamSinkExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
TestingSinkSettings sinkSettings = getTestingSinkSettings(semantic);
int parallelism = 1;
final List<T> testRecords = generateTestData(sinkSettings, externalContext);
// make sure use different names when executes multi times
String sinkName = "metricTestSink" + testRecords.hashCode();
final StreamExecutionEnvironment env
= testEnv.createExecutionEnvironment(TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build());
env.enableCheckpointing(50);
DataStreamSource<T> source = env.fromSource(new FromElementsSource<>(Boundedness.CONTINUOUS_UNBOUNDED, testRecords, testRecords.size()), WatermarkStrategy.noWatermarks(), "metricTestSource").setParallelism(1);
DataStream<T> dataStream = source.returns(externalContext.getProducedType());
tryCreateSink(dataStream, externalContext, sinkSettings).name(sinkName).setParallelism(parallelism);
final JobClient jobClient = env.executeAsync("Metrics Test");
final MetricQuerier queryRestClient = new MetricQuerier(new Configuration());
final ExecutorService executorService = Executors.newCachedThreadPool();
try {
waitForAllTaskRunning(() -> getJobDetails(new RestClient(new Configuration(), executorService), testEnv.getRestEndpoint(), jobClient.getJobID()));
waitUntilCondition(() -> {
// test metrics
try {
return compareSinkMetrics(queryRestClient, testEnv, externalContext, jobClient.getJobID(), sinkName, MetricNames.NUM_RECORDS_SEND, testRecords.size());
} catch (Exception e) {
// skip failed assert try
return false;
}
});
} finally
{
// Clean up
executorService.shutdown();
killJob(jobClient);
}
}
| 3.26 |
flink_SinkTestSuiteBase_pollAndAppendResultData_rdh
|
/**
* Poll records from the sink.
*
* @param result
* Append records to which list
* @param reader
* The sink reader
* @param expected
* The expected list which help to stop polling
* @param retryTimes
* The retry times
* @param semantic
* The semantic
* @return Collection of records in the Sink
*/
private List<T> pollAndAppendResultData(List<T> result, ExternalSystemDataReader<T> reader, List<T> expected, int retryTimes, CheckpointingMode semantic) {
long timeoutMs = 1000L;
int retryIndex = 0;
while (((retryIndex++) < retryTimes) && (!checkGetEnoughRecordsWithSemantic(expected, result, semantic))) {
result.addAll(reader.poll(Duration.ofMillis(timeoutMs)));
}
return result;
}
| 3.26 |
flink_SinkTestSuiteBase_checkResultWithSemantic_rdh
|
/**
* Compare the test data with actual data in given semantic.
*
* @param reader
* the data reader for the sink
* @param testData
* the test data
* @param semantic
* the supported semantic, see {@link CheckpointingMode}
*/
protected void checkResultWithSemantic(ExternalSystemDataReader<T>
reader, List<T> testData, CheckpointingMode semantic) throws Exception {
final ArrayList<T> result = new ArrayList<>();
waitUntilCondition(() -> {
pollAndAppendResultData(result, reader, testData, 30, semantic);
try {
CollectIteratorAssertions.assertThat(sort(result).iterator()).matchesRecordsFromSource(Arrays.asList(sort(testData)), semantic);
return true;
} catch (Throwable t) {
return false;
}
});
}
| 3.26 |
flink_SinkTestSuiteBase_testScaleDown_rdh
|
/**
* Test connector sink restart from a completed savepoint with a lower parallelism.
*
* <p>This test will create a sink in the external system, generate a collection of test data
* and write a half part of them to this sink by the Flink Job with parallelism 4 at first. Then
* stop the job, restart the same job from the completed savepoint with a lower parallelism 2.
* After the job has been running, write the other part to the sink and compare the result.
*
* <p>In order to pass this test, the number of records produced by Flink need to be equals to
* the generated test data. And the records in the sink will be compared to the test data by the
* different semantic. There's no requirement for record order.
*/
@TestTemplate
@DisplayName("Test sink restarting with a lower parallelism")
public void testScaleDown(TestEnvironment testEnv,
DataStreamSinkExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 2);
}
| 3.26 |
flink_SinkTestSuiteBase_checkGetEnoughRecordsWithSemantic_rdh
|
/**
* Check whether the polling should stop.
*
* @param expected
* The expected list which help to stop polling
* @param result
* The records that have been read
* @param semantic
* The semantic
* @return Whether the polling should stop
*/
private boolean checkGetEnoughRecordsWithSemantic(List<T> expected, List<T> result, CheckpointingMode semantic) {
checkNotNull(expected);
checkNotNull(result);
if (EXACTLY_ONCE.equals(semantic))
{
return expected.size() <= result.size();
} else if (AT_LEAST_ONCE.equals(semantic)) {
Set<Integer> matchedIndex = new HashSet<>();
for (T record : expected) {
int before = matchedIndex.size();
for (int i = 0; i
< result.size(); i++) {
if (matchedIndex.contains(i)) {continue;
}
if (record.equals(result.get(i))) {
matchedIndex.add(i);
break;
}
}
// if not find the record in the result
if (before == matchedIndex.size()) {
return false;
}
}
return true;
}
throw new IllegalStateException(String.format("%s delivery guarantee doesn't support test.", semantic.name()));
}
| 3.26 |
flink_ThriftObjectConversions_toTRowSet_rdh
|
/**
* Similar to {@link SerDeUtils#toThriftPayload(Object, ObjectInspector, int)} that converts the
* returned Rows to JSON string. The only difference is the current implementation also keep the
* type for primitive type.
*/
public static TRowSet toTRowSet(TProtocolVersion version, ResolvedSchema schema, List<RowData> data) {
for (RowData row : data) {
if (row.getRowKind() != RowKind.INSERT) {
throw new UnsupportedOperationException("HiveServer2 Endpoint only supports to serialize the INSERT-ONLY RowData.");
}
}
List<RowData.FieldGetter> fieldGetters = new ArrayList<>();
for (int i = 0; i < schema.getColumnCount(); i++) {
fieldGetters.add(RowData.createFieldGetter(schema.getColumnDataTypes().get(i).getLogicalType(), i));
}
List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).collect(Collectors.toList());
if (version.getValue() < HIVE_CLI_SERVICE_PROTOCOL_V6.getVersion().getValue()) {
return toRowBasedSet(fieldTypes, fieldGetters, data);
} else {
return toColumnBasedSet(fieldTypes, fieldGetters, data);
}
}
| 3.26 |
flink_ThriftObjectConversions_toFetchOrientation_rdh
|
// --------------------------------------------------------------------------------------------
// Statement related conversions
// --------------------------------------------------------------------------------------------
public static FetchOrientation toFetchOrientation(TFetchOrientation fetchOrientation) {
switch (fetchOrientation) {
case FETCH_PRIOR :
return FetchOrientation.FETCH_PRIOR;
case FETCH_NEXT :
return FetchOrientation.FETCH_NEXT;
default :
throw new UnsupportedOperationException(String.format("Unsupported fetch orientation: %s.", fetchOrientation));
}
}
/**
* Similar logic in the {@code org.apache.hive.service.cli.ColumnDescriptor}
| 3.26 |
flink_ThriftObjectConversions_hasTypeQualifiers_rdh
|
/**
* Only the type that has length, precision or scale has {@link TTypeQualifiers}.
*/
private static boolean hasTypeQualifiers(LogicalType type) {
switch (type.getTypeRoot()) {
case DECIMAL :
case CHAR :
case VARCHAR :
return true;
default :
return false;
}
}
| 3.26 |
flink_ThriftObjectConversions_toTOperationHandle_rdh
|
// --------------------------------------------------------------------------------------------
// Flink SessionHandle && OperationHandle from/to Hive OperationHandle
// --------------------------------------------------------------------------------------------
/**
* Convert {@link SessionHandle} and {@link OperationHandle} to {@link TOperationHandle}.
*
* <p>Hive uses {@link TOperationHandle} to retrieve the {@code Operation} related information.
* However, SqlGateway uses {@link SessionHandle} and {@link OperationHandle} to determine.
* Therefore, the {@link TOperationHandle} needs to contain both {@link SessionHandle} and
* {@link OperationHandle}.
*
* <p>Currently all operations in the {@link SqlGatewayService} has data. Therefore, set the
* {@code TOperationHandle#hasResultSet} true.
*/
public static TOperationHandle toTOperationHandle(SessionHandle sessionHandle, OperationHandle operationHandle, TOperationType operationType) {
return new TOperationHandle(toTHandleIdentifier(operationHandle.getIdentifier(), sessionHandle.getIdentifier()), operationType, true);
}
| 3.26 |
flink_ThriftObjectConversions_toFlinkTableKinds_rdh
|
// --------------------------------------------------------------------------------------------
// Catalog API related conversions
// --------------------------------------------------------------------------------------------
/**
* Counterpart of the {@code org.apache.hive.service.cli.operation.TableTypeMapping}.
*/
public static Set<TableKind> toFlinkTableKinds(@Nullable
List<String> tableTypes) {
Set<TableKind> tableKinds = new HashSet<>();
if ((tableTypes == null) || tableTypes.isEmpty())
{
tableKinds.addAll(Arrays.asList(TableKind.values()));
return tableKinds;
}
for (String tableType : tableTypes) {
if (!TABLE_TYPE_MAPPINGS.containsKey(tableType)) {
throw new UnsupportedOperationException(String.format("Can not find the mapping from the TableType '%s' to the Flink TableKind. Please remove it from the specified tableTypes.", tableType));
}
tableKinds.add(TABLE_TYPE_MAPPINGS.get(tableType));
}
return tableKinds;
}
| 3.26 |
flink_ThriftObjectConversions_toTSessionHandle_rdh
|
// --------------------------------------------------------------------------------------------
// Flink SessionHandle from/to Hive SessionHandle
// --------------------------------------------------------------------------------------------
public static TSessionHandle toTSessionHandle(SessionHandle sessionHandle) {
return new TSessionHandle(toTHandleIdentifier(sessionHandle.getIdentifier(), SECRET_ID));
}
| 3.26 |
flink_ThriftObjectConversions_toTTypeQualifiers_rdh
|
/**
* Create {@link TTypeQualifiers} from {@link LogicalType}. The logic is almost same in the
* {@code org.apache.hive.service.cli#toTTypeQualifiers}.
*/
private static TTypeQualifiers toTTypeQualifiers(LogicalType type) {
Map<String, TTypeQualifierValue> qualifiers = new HashMap<>();
switch (type.getTypeRoot()) {
case DECIMAL :
qualifiers.put(TCLIServiceConstants.PRECISION, TTypeQualifierValue.i32Value(((DecimalType) (type)).getPrecision()));
qualifiers.put(TCLIServiceConstants.SCALE, TTypeQualifierValue.i32Value(((DecimalType) (type)).getScale()));
break;
case VARCHAR :
qualifiers.put(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH, TTypeQualifierValue.i32Value(((VarCharType) (type)).getLength()));
break;
case CHAR :
qualifiers.put(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH, TTypeQualifierValue.i32Value(((CharType) (type)).getLength()));
break;
}
return new TTypeQualifiers(qualifiers);
}
| 3.26 |
flink_ThriftObjectConversions_toTHandleIdentifier_rdh
|
// --------------------------------------------------------------------------------------------
private static THandleIdentifier toTHandleIdentifier(UUID publicId, UUID secretId) {
byte[] guid = new byte[16];
byte[] secret = new byte[16];
ByteBuffer guidBB = ByteBuffer.wrap(guid);
ByteBuffer
secretBB = ByteBuffer.wrap(secret);
guidBB.putLong(publicId.getMostSignificantBits());
guidBB.putLong(publicId.getLeastSignificantBits());
secretBB.putLong(secretId.getMostSignificantBits());
secretBB.putLong(secretId.getLeastSignificantBits());
return new THandleIdentifier(ByteBuffer.wrap(guid), ByteBuffer.wrap(secret));
}
| 3.26 |
flink_StreamTaskCancellationContext_alwaysRunning_rdh
|
/**
* Factory for a context that always returns {@code false} when {@link #isCancelled()} is
* called.
*
* @return context
*/static StreamTaskCancellationContext alwaysRunning() {
return () -> false;
}
| 3.26 |
flink_UpsertTestFileUtil_getNumberOfRecords_rdh
|
/**
* Returns the total number of records written using the {@link UpsertTestSinkWriter} to the
* given File.
*
* @param file
* The File to read from
* @return the number of records
* @throws IOException
*/
public static int getNumberOfRecords(File file) throws IOException {
checkNotNull(file);
FileInputStream fs = new FileInputStream(file);
BufferedInputStream bis = new BufferedInputStream(fs);
return getNumberOfRecords(bis);
}
/**
* Reads records that were written using the {@link UpsertTestSinkWriter} from the given
* InputStream and converts them using the provided {@link DeserializationSchema}
| 3.26 |
flink_UpsertTestFileUtil_writeRecords_rdh
|
/**
* Writes a Map of records serialized by the {@link UpsertTestSinkWriter} to the given
* BufferedOutputStream.
*
* @param bos
* the BufferedOutputStream to write to
* @param records
* the Map of records created by the UpsertTestSinkWriter
* @throws IOException
*/
public static void writeRecords(BufferedOutputStream bos, Map<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> records) throws IOException {
checkNotNull(bos);
for (Map.Entry<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> record : records.entrySet()) {byte[] key = record.getKey().array();
byte[] v2 = record.getValue().array();
bos.write(MAGIC_BYTE);
bos.write(key.length);
bos.write(key);
bos.write(v2.length);
bos.write(v2);
}
bos.flush();
}
| 3.26 |
flink_UpsertTestFileUtil_readRecords_rdh
|
/**
* Reads records that were written using the {@link UpsertTestSinkWriter} from the given
* InputStream.
*
* @param bis
* The BufferedInputStream to read from
* @return Map containing the read ImmutableByteArrayWrapper key-value pairs
* @throws IOException
*/
private static Map<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> readRecords(BufferedInputStream bis) throws IOException {
checkNotNull(bis);
Map<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> records = new
HashMap<>();
int magicByte;
while ((magicByte = bis.read()) != (-1)) {
if (magicByte != MAGIC_BYTE) {
throw new
IOException("Data was serialized incorrectly or is corrupted.");
}int keyLength = bis.read(); byte[] key
= new byte[keyLength];
bis.read(key);
int valueLength = bis.read();
byte[] value = new byte[valueLength];
bis.read(value);
records.put(new ImmutableByteArrayWrapper(key), new ImmutableByteArrayWrapper(value));
}
return records;
}
| 3.26 |
flink_ComponentClosingUtils_tryShutdownExecutorElegantly_rdh
|
/**
* A util method that tries to shut down an {@link ExecutorService} elegantly within the given
* timeout. If the executor has not been shut down before it hits timeout or the thread is
* interrupted when waiting for the termination, a forceful shutdown will be attempted on the
* executor.
*
* @param executor
* the {@link ExecutorService} to shut down.
* @param timeout
* the timeout duration.
* @return true if the given executor has been successfully closed, false otherwise.
*/
@SuppressWarnings("ResultOfMethodCallIgnored")
public static boolean tryShutdownExecutorElegantly(ExecutorService executor, Duration timeout) {
try {
executor.shutdown();
executor.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
// Let it go.
}
if (!executor.isTerminated()) {
shutdownExecutorForcefully(executor, Duration.ZERO, false);
}
return executor.isTerminated();
}
| 3.26 |
flink_ComponentClosingUtils_shutdownExecutorForcefully_rdh
|
/**
* Shutdown the given executor forcefully within the given timeout.
*
* @param executor
* the executor to shut down.
* @param timeout
* the timeout duration.
* @param interruptable
* when set to true, the method can be interrupted. Each interruption to
* the thread results in another {@code ExecutorService.shutdownNow()} call to the shutting
* down executor.
* @return true if the given executor is terminated, false otherwise.
*/
public static boolean shutdownExecutorForcefully(ExecutorService executor, Duration timeout, boolean interruptable) {
Deadline deadline = Deadline.fromNowWithClock(timeout, clock);
boolean isInterrupted = false;
do {
executor.shutdownNow();
try {
executor.awaitTermination(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
isInterrupted = interruptable;
}
} while (((!isInterrupted) && deadline.hasTimeLeft()) && (!executor.isTerminated()) );return executor.isTerminated();
}
| 3.26 |
flink_ComponentClosingUtils_m0_rdh
|
// ========= Method visible for testing ========
@VisibleForTesting
static void m0(Clock clock) {
ComponentClosingUtils.clock = clock;}
| 3.26 |
flink_AllocatedSlot_getTaskManagerId_rdh
|
/**
* Gets the ID of the TaskManager on which this slot was allocated.
*
* <p>This is equivalent to {@link #getTaskManagerLocation()}.{@link #getTaskManagerId()}.
*
* @return This slot's TaskManager's ID.
*/
public ResourceID getTaskManagerId() {
return getTaskManagerLocation().getResourceID();
}
| 3.26 |
flink_AllocatedSlot_isUsed_rdh
|
/**
* Returns true if this slot is being used (e.g. a logical slot is allocated from this slot).
*
* @return true if a logical slot is allocated from this slot, otherwise false
*/
public boolean isUsed() {
return payloadReference.get() != null;
}
| 3.26 |
flink_AllocatedSlot_getSlotId_rdh
|
// ------------------------------------------------------------------------
/**
* Gets the Slot's unique ID defined by its TaskManager.
*/
public SlotID getSlotId() {
return new SlotID(getTaskManagerId(), physicalSlotNumber);
}
| 3.26 |
flink_AllocatedSlot_hashCode_rdh
|
// ------------------------------------------------------------------------
/**
* This always returns a reference hash code.
*/
@Override
public final int hashCode() {
return super.hashCode();
}
| 3.26 |
flink_AllocatedSlot_releasePayload_rdh
|
/**
* Triggers the release of the assigned payload. If the payload could be released, then it is
* removed from the slot.
*
* @param cause
* of the release operation
*/
public void releasePayload(Throwable cause) {
final Payload payload = payloadReference.get();
if (payload != null) {
payload.release(cause);payloadReference.set(null);
}
}
| 3.26 |
flink_GateNotificationHelper_notifyPriority_rdh
|
/**
* Must be called under lock to ensure integrity of priorityAvailabilityHelper.
*/public void notifyPriority() {
toNotifyPriority = inputGate.priorityAvailabilityHelper.getUnavailableToResetAvailable();
}
| 3.26 |
flink_AsynchronousBlockWriterWithCallback_writeBlock_rdh
|
/**
* Issues a asynchronous write request to the writer.
*
* @param segment
* The segment to be written.
* @throws IOException
* Thrown, when the writer encounters an I/O error. Due to the asynchronous
* nature of the writer, the exception thrown here may have been caused by an earlier write
* request.
*/
@Override
public void writeBlock(MemorySegment segment) throws IOException {
addRequest(new SegmentWriteRequest(this, segment));
}
| 3.26 |
flink_TSetClientInfoReq_findByName_rdh
|
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
| 3.26 |
flink_TSetClientInfoReq_isSetConfiguration_rdh
|
/**
* Returns true if field configuration is set (has been assigned a value) and false otherwise
*/
public boolean isSetConfiguration() {
return this.configuration != null;
}
| 3.26 |
flink_TSetClientInfoReq_isSetSessionHandle_rdh
|
/**
* Returns true if field sessionHandle is set (has been assigned a value) and false otherwise
*/
public boolean isSetSessionHandle() {
return this.f0 != null;
}
| 3.26 |
flink_TSetClientInfoReq_findByThriftIdOrThrow_rdh
|
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException(("Field "
+ fieldId) + " doesn't exist!");
return fields;
}
| 3.26 |
flink_TSetClientInfoReq_isSet_rdh
|
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SESSION_HANDLE :
return isSetSessionHandle();
case CONFIGURATION :
return isSetConfiguration();
} throw new IllegalStateException();
}
| 3.26 |
flink_TSetClientInfoReq_findByThriftId_rdh
|
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1 :
// SESSION_HANDLE
return SESSION_HANDLE;
case 2 :
// CONFIGURATION
return CONFIGURATION;
default :
return null;
}
}
| 3.26 |
flink_OneInputTransformationTranslator_translateForBatchInternal_rdh
|
/**
* A {@link TransformationTranslator} for the {@link OneInputTransformation}.
*
* @param <IN>
* The type of the elements in the input {@code Transformation} of the transformation to
* translate.
* @param <OUT>
* The type of the elements that result from the provided {@code OneInputTransformation}.
*/
@Internalpublic final class OneInputTransformationTranslator<IN, OUT> extends AbstractOneInputTransformationTranslator<IN, OUT, OneInputTransformation<IN, OUT>> {
@Override
public Collection<Integer> translateForBatchInternal(final OneInputTransformation<IN, OUT> transformation, final Context context) {
KeySelector<IN, ?> keySelector = transformation.getStateKeySelector();
Collection<Integer> ids = translateInternal(transformation, transformation.getOperatorFactory(), transformation.getInputType(), keySelector, transformation.getStateKeyType(), context);boolean isKeyed = keySelector != null;
if (isKeyed) {
BatchExecutionUtils.applyBatchExecutionSettings(transformation.getId(), context, InputRequirement.SORTED);
}
return ids;
}
| 3.26 |
flink_CatalogDatabaseImpl_m0_rdh
|
/**
* Get a map of properties associated with the database.
*/
public Map<String, String> m0() {
return properties;
}
| 3.26 |
flink_CatalogDatabaseImpl_getDetailedDescription_rdh
|
/**
* Get a detailed description of the database.
*
* @return an optional long description of the database
*/
public Optional<String> getDetailedDescription()
{
return Optional.ofNullable(comment);
}
| 3.26 |
flink_CatalogDatabaseImpl_copy_rdh
|
/**
* Get a deep copy of the CatalogDatabase instance.
*
* @return a copy of CatalogDatabase instance
*/
public CatalogDatabase copy() {
return copy(m0());
}
| 3.26 |
flink_CatalogDatabaseImpl_getDescription_rdh
|
/**
* Get a brief description of the database.
*
* @return an optional short description of the database
*/
public Optional<String> getDescription() {
return Optional.ofNullable(comment);
}
| 3.26 |
flink_BulkIterationBase_getNextPartialSolution_rdh
|
/**
*
* @return The operator representing the next partial solution.
*/
public Operator<T> getNextPartialSolution() {
return this.iterationResult;
}
| 3.26 |
flink_BulkIterationBase_getBroadcastInputs_rdh
|
/**
* The BulkIteration meta operator cannot have broadcast inputs.
*
* @return An empty map.
*/
public Map<String, Operator<?>> getBroadcastInputs() {
return Collections.emptyMap();
}
| 3.26 |
flink_BulkIterationBase_setTerminationCriterion_rdh
|
/**
*
* @param criterion
*/
public <X> void setTerminationCriterion(Operator<X> criterion) {TypeInformation<X> type = criterion.getOperatorInfo().getOutputType();
FlatMapOperatorBase<X, X, TerminationCriterionMapper<X>> mapper = new FlatMapOperatorBase<X, X, TerminationCriterionMapper<X>>(new TerminationCriterionMapper<X>(), new UnaryOperatorInformation<X, X>(type, type), "Termination Criterion Aggregation Wrapper");
mapper.setInput(criterion);
this.terminationCriterion = mapper;
this.getAggregators().registerAggregationConvergenceCriterion(TERMINATION_CRITERION_AGGREGATOR_NAME, new TerminationCriterionAggregator(), new TerminationCriterionAggregationConvergence());
}
| 3.26 |
flink_BulkIterationBase_setBroadcastVariable_rdh
|
/**
* The BulkIteration meta operator cannot have broadcast inputs. This method always throws an
* exception.
*
* @param name
* Ignored.
* @param root
* Ignored.
*/public void setBroadcastVariable(String name, Operator<?> root) {
throw new UnsupportedOperationException("The BulkIteration meta operator cannot have broadcast inputs.");
}
| 3.26 |
flink_BulkIterationBase_setBroadcastVariables_rdh
|
/**
* The BulkIteration meta operator cannot have broadcast inputs. This method always throws an
* exception.
*
* @param inputs
* Ignored
*/
public <X> void setBroadcastVariables(Map<String, Operator<X>> inputs) {
throw new UnsupportedOperationException("The BulkIteration meta operator cannot have broadcast inputs.");
}
| 3.26 |
flink_BulkIterationBase_getPartialSolution_rdh
|
// --------------------------------------------------------------------------------------------
/**
*
* @return The operator representing the partial solution.
*/
public Operator<T> getPartialSolution() {
return this.inputPlaceHolder;
}
| 3.26 |
flink_BulkIterationBase_getTerminationCriterion_rdh
|
/**
*
* @return The operator representing the termination criterion.
*/
public Operator<?> getTerminationCriterion() {
return this.terminationCriterion;
}
| 3.26 |
flink_BulkIterationBase_validate_rdh
|
/**
*
* @throws InvalidProgramException
*/
public void validate() throws InvalidProgramException {
if (this.input == null) {
throw new RuntimeException("Operator for initial partial solution is not set.");
}
if (this.iterationResult == null) {
throw new InvalidProgramException("Operator producing the next version of the partial " + "solution (iteration result) is not set.");
}
if ((this.terminationCriterion == null) && (this.numberOfIterations <= 0)) {
throw new InvalidProgramException("No termination condition is set " + "(neither fix number of iteration nor termination criterion).");
}
}
| 3.26 |
flink_BulkIterationBase_setNextPartialSolution_rdh
|
/**
*
* @param result
*/
public void setNextPartialSolution(Operator<T> result) {
if (result == null) {
throw new NullPointerException("Operator producing the next partial solution must not be null.");
}
this.iterationResult = result;
}
| 3.26 |
flink_BulkIterationBase_setMaximumNumberOfIterations_rdh
|
/**
*
* @param num
*/
public void setMaximumNumberOfIterations(int
num) {
if (num < 1) {
throw new IllegalArgumentException("The number of iterations must be at least one.");
}
this.numberOfIterations = num;
}
| 3.26 |
flink_RocksDBMemoryConfiguration_validate_rdh
|
// ------------------------------------------------------------------------
/**
* Validates if the configured options are valid with respect to one another.
*/
public void validate() {
// As FLINK-15512 introduce a new mechanism to calculate the cache capacity,
// the relationship of write_buffer_manager_capacity and cache_capacity has changed to:
// write_buffer_manager_capacity / cache_capacity = 2 * writeBufferRatio / (3 -
// writeBufferRatio)
// we should ensure the sum of write buffer manager capacity and high priority pool less
// than cache capacity.
// TODO change the formula once FLINK-15532 resolved.
if (((writeBufferRatio != null) && (highPriorityPoolRatio != null)) && ((((2 * writeBufferRatio) / (3 - writeBufferRatio)) + highPriorityPoolRatio) >= 1.0)) {
throw new IllegalArgumentException(String.format("Invalid configuration: writeBufferRatio %s with highPriPoolRatio %s", writeBufferRatio, highPriorityPoolRatio));
}
}
| 3.26 |
flink_RocksDBMemoryConfiguration_setWriteBufferRatio_rdh
|
/**
* Sets the fraction of the total memory to be used for write buffers. This only has an effect
* is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)}
* are set.
*
* <p>See {@link RocksDBOptions#WRITE_BUFFER_RATIO} for details.
*/
public void setWriteBufferRatio(double writeBufferRatio) {
Preconditions.checkArgument((writeBufferRatio > 0) && (writeBufferRatio < 1.0), "Write Buffer ratio %s must be in (0, 1)", writeBufferRatio);
this.writeBufferRatio = writeBufferRatio;
}
| 3.26 |
flink_RocksDBMemoryConfiguration_setUseManagedMemory_rdh
|
// ------------------------------------------------------------------------
/**
* Configures RocksDB to use the managed memory of a slot. See {@link RocksDBOptions#USE_MANAGED_MEMORY} for details.
*/
public void setUseManagedMemory(boolean useManagedMemory) {
this.useManagedMemory = useManagedMemory;
}
| 3.26 |
flink_RocksDBMemoryConfiguration_getWriteBufferRatio_rdh
|
/**
* Gets the fraction of the total memory to be used for write buffers. This only has an effect
* is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)}
* are set.
*
* <p>See {@link RocksDBOptions#WRITE_BUFFER_RATIO} for details.
*/
public double getWriteBufferRatio() {
return writeBufferRatio != null ? writeBufferRatio : RocksDBOptions.WRITE_BUFFER_RATIO.defaultValue();}
| 3.26 |
flink_RocksDBMemoryConfiguration_fromOtherAndConfiguration_rdh
|
// ------------------------------------------------------------------------
/**
* Derives a RocksDBMemoryConfiguration from another object and a configuration. The values set
* on the other object take precedence, and the values from the configuration are used if no
* values are set on the other config object.
*/
public static RocksDBMemoryConfiguration fromOtherAndConfiguration(RocksDBMemoryConfiguration other, ReadableConfig config) {
final RocksDBMemoryConfiguration newConfig = new RocksDBMemoryConfiguration();
newConfig.useManagedMemory = (other.useManagedMemory != null) ? other.useManagedMemory : config.get(RocksDBOptions.USE_MANAGED_MEMORY);
newConfig.fixedMemoryPerSlot = (other.fixedMemoryPerSlot != null) ? other.fixedMemoryPerSlot : config.get(RocksDBOptions.FIX_PER_SLOT_MEMORY_SIZE);
newConfig.writeBufferRatio = (other.writeBufferRatio != null) ? other.writeBufferRatio : config.get(RocksDBOptions.WRITE_BUFFER_RATIO);
newConfig.highPriorityPoolRatio = (other.highPriorityPoolRatio != null) ? other.highPriorityPoolRatio : config.get(RocksDBOptions.HIGH_PRIORITY_POOL_RATIO);
newConfig.usePartitionedIndexFilters = (other.usePartitionedIndexFilters != null) ? other.usePartitionedIndexFilters : config.get(RocksDBOptions.USE_PARTITIONED_INDEX_FILTERS);
return newConfig;
}
| 3.26 |
flink_RocksDBMemoryConfiguration_getHighPriorityPoolRatio_rdh
|
/**
* Gets the fraction of the total memory to be used for high priority blocks like indexes,
* dictionaries, etc. This only has an effect is either {@link #setUseManagedMemory(boolean)} or
* {@link #setFixedMemoryPerSlot(MemorySize)} are set.
*
* <p>See {@link RocksDBOptions#HIGH_PRIORITY_POOL_RATIO} for details.
*/
public double getHighPriorityPoolRatio() {
return highPriorityPoolRatio != null ? highPriorityPoolRatio
: RocksDBOptions.HIGH_PRIORITY_POOL_RATIO.defaultValue();
}
/**
* Gets whether the state backend is configured to use partitioned index/filters for RocksDB.
*
* <p>See {@link RocksDBOptions#USE_PARTITIONED_INDEX_FILTERS}
| 3.26 |
flink_RocksDBMemoryConfiguration_setHighPriorityPoolRatio_rdh
|
/**
* Sets the fraction of the total memory to be used for high priority blocks like indexes,
* dictionaries, etc. This only has an effect is either {@link #setUseManagedMemory(boolean)} or
* {@link #setFixedMemoryPerSlot(MemorySize)} are set.
*
* <p>See {@link RocksDBOptions#HIGH_PRIORITY_POOL_RATIO} for details.
*/
public void setHighPriorityPoolRatio(double highPriorityPoolRatio) {
Preconditions.checkArgument((highPriorityPoolRatio > 0) && (highPriorityPoolRatio < 1.0), "High priority pool ratio %s must be in (0, 1)", highPriorityPoolRatio);
this.highPriorityPoolRatio = highPriorityPoolRatio;
}
| 3.26 |
flink_RocksDBMemoryConfiguration_m0_rdh
|
/**
* Gets whether the state backend is configured to use a fixed amount of memory shared between
* all RocksDB instances (in all tasks and operators) of a slot. See {@link RocksDBOptions#FIX_PER_SLOT_MEMORY_SIZE} for details.
*/
public boolean m0() {
return fixedMemoryPerSlot != null;
}
| 3.26 |
flink_RocksDBMemoryConfiguration_setFixedMemoryPerSlot_rdh
|
/**
* Configures RocksDB to use a fixed amount of memory shared between all instances (operators)
* in a slot. See {@link #setFixedMemoryPerSlot(MemorySize)} for details.
*/
public void setFixedMemoryPerSlot(String totalMemoryPerSlotStr) {
setFixedMemoryPerSlot(MemorySize.parse(totalMemoryPerSlotStr));}
| 3.26 |
flink_RocksDBMemoryConfiguration_isUsingManagedMemory_rdh
|
/**
* Gets whether the state backend is configured to use the managed memory of a slot for RocksDB.
* See {@link RocksDBOptions#USE_MANAGED_MEMORY} for details.
*/
public boolean isUsingManagedMemory() {
return useManagedMemory != null ? useManagedMemory : RocksDBOptions.USE_MANAGED_MEMORY.defaultValue();
}
| 3.26 |
flink_GenericCsvInputFormat_getNumberOfFieldsTotal_rdh
|
// --------------------------------------------------------------------------------------------
public int getNumberOfFieldsTotal() {
return this.fieldIncluded.length;
}
| 3.26 |
flink_GenericCsvInputFormat_initializeSplit_rdh
|
// --------------------------------------------------------------------------------------------
// Runtime methods
// --------------------------------------------------------------------------------------------
@Override
protected void initializeSplit(FileInputSplit split, Long offset) throws
IOException {
super.initializeSplit(split, offset);
// instantiate the parsers
FieldParser<?>[] parsers = new FieldParser<?>[fieldTypes.length];for (int i = 0; i < fieldTypes.length; i++) {
if (fieldTypes[i] != null) {
Class<? extends FieldParser<?>> parserType = FieldParser.getParserForType(fieldTypes[i]);
if (parserType == null) {throw
new RuntimeException(("No parser available for type '"
+ fieldTypes[i].getName()) + "'.");
}
FieldParser<?> p = InstantiationUtil.instantiate(parserType, FieldParser.class);
p.setCharset(getCharset());
if (this.quotedStringParsing) {
if (p instanceof StringParser) {
((StringParser) (p)).enableQuotedStringParsing(this.quoteCharacter);
} else if (p instanceof StringValueParser) {
((StringValueParser) (p)).enableQuotedStringParsing(this.quoteCharacter);
}
}
parsers[i] = p;
}
}
this.fieldParsers = parsers;
// skip the first line, if we are at the beginning of a file and have the option set
if (this.skipFirstLineAsHeader && (((offset == null) && (split.getStart() == 0)) || ((offset != null) && (offset == 0)))) {
readLine();// read and ignore
}
}
| 3.26 |
flink_FlinkHintStrategies_createHintStrategyTable_rdh
|
/**
* Customize the {@link HintStrategyTable} which contains hint strategies supported by Flink.
*/
public static HintStrategyTable createHintStrategyTable() {
return // TODO semi/anti join with CORRELATE is not supported
// internal join hint used for alias
// currently, only correlate&join hints care about query block alias
// Configure to always throw when we encounter any hint errors
// (either the non-registered hint or the hint format).
HintStrategyTable.builder().errorHandler(Litmus.THROW).hintStrategy(FlinkHints.HINT_NAME_OPTIONS, HintStrategy.builder(HintPredicates.TABLE_SCAN).optionChecker(OPTIONS_KV_OPTION_CHECKER).build()).hintStrategy(FlinkHints.HINT_NAME_JSON_AGGREGATE_WRAPPED, HintStrategy.builder(HintPredicates.AGGREGATE).excludedRules(WrapJsonAggFunctionArgumentsRule.INSTANCE).build()).hintStrategy(FlinkHints.HINT_ALIAS, HintStrategy.builder(HintPredicates.or(HintPredicates.CORRELATE, HintPredicates.JOIN)).optionChecker(fixedSizeListOptionChecker(1)).build()).hintStrategy(JoinStrategy.BROADCAST.getJoinHintName(), HintStrategy.builder(HintPredicates.JOIN).optionChecker(NON_EMPTY_LIST_OPTION_CHECKER).build()).hintStrategy(JoinStrategy.SHUFFLE_HASH.getJoinHintName(), HintStrategy.builder(HintPredicates.JOIN).optionChecker(NON_EMPTY_LIST_OPTION_CHECKER).build()).hintStrategy(JoinStrategy.SHUFFLE_MERGE.getJoinHintName(), HintStrategy.builder(HintPredicates.JOIN).optionChecker(NON_EMPTY_LIST_OPTION_CHECKER).build()).hintStrategy(JoinStrategy.NEST_LOOP.getJoinHintName(), HintStrategy.builder(HintPredicates.JOIN).optionChecker(NON_EMPTY_LIST_OPTION_CHECKER).build()).hintStrategy(JoinStrategy.LOOKUP.getJoinHintName(), HintStrategy.builder(HintPredicates.or(HintPredicates.CORRELATE, HintPredicates.JOIN)).optionChecker(LOOKUP_NON_EMPTY_KV_OPTION_CHECKER).build()).build();
}
| 3.26 |
flink_TableSink_m0_rdh
|
/**
* Returns the data type consumed by this {@link TableSink}.
*
* @return The data type expected by this {@link TableSink}.
*/
default DataType m0() {
final TypeInformation<T> legacyType = getOutputType();
if (legacyType == null) {
throw new TableException("Table sink does not implement a consumed data type.");
}
return fromLegacyInfoToDataType(legacyType);
}
/**
*
* @deprecated This method will be removed in future versions as it uses the old type system. It
is recommended to use {@link #getConsumedDataType()} instead which uses the new type
system based on {@link DataTypes}
| 3.26 |
flink_TableSink_getFieldNames_rdh
|
/**
*
* @deprecated Use the field names of {@link #getTableSchema()} instead.
*/
@Deprecated
default String[] getFieldNames() {
return null;
}
| 3.26 |
flink_TableSink_getFieldTypes_rdh
|
/**
*
* @deprecated Use the field types of {@link #getTableSchema()} instead.
*/
@Deprecated
default TypeInformation<?>[] getFieldTypes() {
return null;
}
/**
* Returns a copy of this {@link TableSink} configured with the field names and types of the
* table to emit.
*
* @param fieldNames
* The field names of the table to emit.
* @param fieldTypes
* The field types of the table to emit.
* @return A copy of this {@link TableSink}
| 3.26 |
flink_InputGateMetrics_refreshAndGetMin_rdh
|
/**
* Iterates over all input channels and collects the minimum number of queued buffers in a
* channel in a best-effort way.
*
* @return minimum number of queued buffers per channel (<tt>0</tt> if no channels exist)
*/
int refreshAndGetMin() {
int min = Integer.MAX_VALUE;
Collection<InputChannel> v4 = inputGate.getInputChannels().values();
for (InputChannel channel : v4) {
if
(channel instanceof RemoteInputChannel)
{
RemoteInputChannel rc = ((RemoteInputChannel) (channel));
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
min = Math.min(min, size);
}
}
if (min == Integer.MAX_VALUE) {
// in case all channels are local, or the channel collection
// was empty
return 0;
}
return min;
}
| 3.26 |
flink_InputGateMetrics_refreshAndGetMax_rdh
|
/**
* Iterates over all input channels and collects the maximum number of queued buffers in a
* channel in a best-effort way.
*
* @return maximum number of queued buffers per channel
*/
int refreshAndGetMax() {
int max = 0;
for (InputChannel v9 : inputGate.getInputChannels().values()) {
if (v9 instanceof RemoteInputChannel) {
RemoteInputChannel rc = ((RemoteInputChannel) (v9));
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
max = Math.max(max, size);
}
}
return max;
}
| 3.26 |
flink_InputGateMetrics_refreshAndGetTotal_rdh
|
// ------------------------------------------------------------------------
// these methods are package private to make access from the nested classes faster
/**
* Iterates over all input channels and collects the total number of queued buffers in a
* best-effort way.
*
* @return total number of queued buffers
*/
long refreshAndGetTotal() {
long v0
= 0;
for (InputChannel channel : inputGate.getInputChannels().values()) {
if
(channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = ((RemoteInputChannel) (channel));
v0 += rc.unsynchronizedGetNumberOfQueuedBuffers();
}
}
return v0;
}
| 3.26 |
flink_InputGateMetrics_refreshAndGetAvg_rdh
|
/**
* Iterates over all input channels and collects the average number of queued buffers in a
* channel in a best-effort way.
*
* @return average number of queued buffers per channel
*/
float refreshAndGetAvg() {
long total = 0;
int count = 0;
for (InputChannel channel : inputGate.getInputChannels().values()) {
if (channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = ((RemoteInputChannel) (channel));
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
total += size;
++count;
}
}
return count == 0 ? 0 : total / ((float) (count));
}
| 3.26 |
flink_InputGateMetrics_getTotalQueueLenGauge_rdh
|
// ------------------------------------------------------------------------
// Gauges to access the stats
// ------------------------------------------------------------------------
private Gauge<Long> getTotalQueueLenGauge() {return new Gauge<Long>() {
@Override
public Long getValue() {
return refreshAndGetTotal();
}
};
}
| 3.26 |
flink_InputGateMetrics_registerQueueLengthMetrics_rdh
|
// ------------------------------------------------------------------------
// Static access
// ------------------------------------------------------------------------
public static void registerQueueLengthMetrics(MetricGroup parent, SingleInputGate[] gates) {
for (int i = 0; i < gates.length; i++) {
InputGateMetrics metrics = new InputGateMetrics(gates[i]);
MetricGroup group = parent.addGroup(i);
group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge());
group.gauge("minQueueLen", metrics.getMinQueueLenGauge());
group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge());
group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge());
}
}
| 3.26 |
flink_FileInputFormat_createInputSplits_rdh
|
/**
* Computes the input splits for the file. By default, one file block is one split. If more
* splits are requested than blocks are available, then a split may be a fraction of a block and
* splits may cross block boundaries.
*
* @param minNumSplits
* The minimum desired number of file splits.
* @return The computed file splits.
* @see org.apache.flink.api.common.io.InputFormat#createInputSplits(int)
*/
@Override
public FileInputSplit[] createInputSplits(int minNumSplits) throws IOException {
if (minNumSplits < 1) {
throw new IllegalArgumentException("Number of input splits has to be at least 1.");
}
// take the desired number of splits into account
minNumSplits = Math.max(minNumSplits, this.numSplits);
final
List<FileInputSplit> inputSplits = new ArrayList<FileInputSplit>(minNumSplits);
// get all the files that are involved in the splits
List<FileStatus> files = new ArrayList<>();
long totalLength = 0;
for (Path path : getFilePaths()) {
final FileSystem fs = path.getFileSystem();
final FileStatus pathFile = fs.getFileStatus(path);
if (pathFile.isDir()) {
totalLength += addFilesInDir(path, files, true);
} else {
testForUnsplittable(pathFile);
files.add(pathFile);totalLength += pathFile.getLen();
}
}
// returns if unsplittable
if (unsplittable) {
int splitNum = 0;
for (final FileStatus file : files) {
final FileSystem fs = file.getPath().getFileSystem();
final BlockLocation[] blocks
= fs.getFileBlockLocations(file, 0, file.getLen());
Set<String> hosts = new HashSet<String>();
for (BlockLocation block : blocks) {
hosts.addAll(Arrays.asList(block.getHosts()));
}long len = file.getLen();
if (testForUnsplittable(file)) {
len = READ_WHOLE_SPLIT_FLAG;
}
FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), 0, len, hosts.toArray(new String[hosts.size()]));
inputSplits.add(fis);
}
return inputSplits.toArray(new FileInputSplit[inputSplits.size()]);
}
final long maxSplitSize = (totalLength / minNumSplits) + ((totalLength % minNumSplits) == 0 ? 0 : 1);
// now that we have the files, generate the splits
int splitNum = 0;
for (final FileStatus file : files) {
final FileSystem fs = file.getPath().getFileSystem();
final long len = file.getLen();
final long blockSize = file.getBlockSize();
final long minSplitSize;
if (this.minSplitSize <= blockSize) {
minSplitSize = this.minSplitSize;
} else {
if (LOG.isWarnEnabled()) {
LOG.warn(((("Minimal split size of " + this.minSplitSize) + " is larger than the block size of ") + blockSize) + ". Decreasing minimal split size to block size.");
}
minSplitSize = blockSize;
}
final long splitSize = Math.max(minSplitSize, Math.min(maxSplitSize, blockSize));
final long halfSplit = splitSize >>> 1;
final long maxBytesForLastSplit = ((long) (splitSize * MAX_SPLIT_SIZE_DISCREPANCY));
if (len > 0) {
// get the block locations and make sure they are in order with respect to their
// offset
final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, len);
Arrays.sort(blocks);
long bytesUnassigned = len;long position = 0;
int blockIndex = 0;
while (bytesUnassigned > maxBytesForLastSplit) {
// get the block containing the majority of the data
blockIndex = getBlockIndexForPosition(blocks, position, halfSplit, blockIndex);
// create a new split
FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), position, splitSize, blocks[blockIndex].getHosts());
inputSplits.add(fis);
// adjust the positions
position += splitSize;
bytesUnassigned -= splitSize;
}
// assign the last split
if (bytesUnassigned > 0) {
blockIndex = getBlockIndexForPosition(blocks,
position, halfSplit, blockIndex);
final FileInputSplit fis =
new FileInputSplit(splitNum++, file.getPath(), position, bytesUnassigned, blocks[blockIndex].getHosts());
inputSplits.add(fis);
}
} else {
// special case with a file of zero bytes size
final BlockLocation[] blocks = fs.getFileBlockLocations(file, 0, 0);
String[] hosts;
if (blocks.length > 0) {
hosts = blocks[0].getHosts();
} else {
hosts = new String[0];
}
final FileInputSplit fis = new FileInputSplit(splitNum++, file.getPath(), 0, 0, hosts);
inputSplits.add(fis);
}
}
return inputSplits.toArray(new FileInputSplit[inputSplits.size()]);
}
| 3.26 |
flink_FileInputFormat_acceptFile_rdh
|
/**
* A simple hook to filter files and directories from the input. The method may be overridden.
* Hadoop's FileInputFormat has a similar mechanism and applies the same filters by default.
*
* @param fileStatus
* The file status to check.
* @return true, if the given file or directory is accepted
*/
public boolean acceptFile(FileStatus fileStatus) {
final String name = fileStatus.getPath().getName();
return ((!name.startsWith("_")) &&
(!name.startsWith("."))) && (!filesFilter.filterPath(fileStatus.getPath()));
}
| 3.26 |
flink_FileInputFormat_getSplitStart_rdh
|
// --------------------------------------------------------------------------------------------
// Getting information about the split that is currently open
// --------------------------------------------------------------------------------------------
/**
* Gets the start of the current split.
*
* @return The start of the split.
*/
public long getSplitStart() {
return splitStart;
}
| 3.26 |
flink_FileInputFormat_abortWait_rdh
|
/**
* Double checked procedure setting the abort flag and closing the stream.
*/
private void abortWait() {
this.aborted = true;
final FSDataInputStream inStream = this.fdis;
this.fdis = null;
if (inStream != null)
{
try {
inStream.close();
} catch (Throwable t) {
}
}
}
| 3.26 |
flink_FileInputFormat_configure_rdh
|
// --------------------------------------------------------------------------------------------
// Pre-flight: Configuration, Splits, Sampling
// --------------------------------------------------------------------------------------------
/**
* Configures the file input format by reading the file path from the configuration.
*
* @see org.apache.flink.api.common.io.InputFormat#configure(org.apache.flink.configuration.Configuration)
*/
@Override
public void configure(Configuration parameters) {
if (getFilePaths().length == 0) {
// file path was not specified yet. Try to set it from the parameters.
String filePath = parameters.getString(FILE_PARAMETER_KEY, null);
if (filePath == null) {
throw new IllegalArgumentException("File path was not specified in input format or configuration.");
} else {
setFilePath(filePath);}
}
if (!this.enumerateNestedFiles) {
this.enumerateNestedFiles = parameters.getBoolean(ENUMERATE_NESTED_FILES_FLAG, false);
}
}
| 3.26 |
flink_FileInputFormat_setFilePaths_rdh
|
/**
* Sets multiple paths of files to be read.
*
* @param filePaths
* The paths of the files to read.
*/
public void setFilePaths(Path... filePaths) {
if ((!supportsMultiPaths()) && (filePaths.length > 1)) {
throw new UnsupportedOperationException("Multiple paths are not supported by this FileInputFormat.");
}
if (filePaths.length < 1) {
throw
new IllegalArgumentException("At least one file path must be specified.");
}
if (filePaths.length == 1) {
// set for backwards compatibility
this.filePath = filePaths[0];
} else {// clear file path in case it had been set before
this.filePath =
null;
}
this.filePaths = filePaths;
}
| 3.26 |
flink_FileInputFormat_setFilePath_rdh
|
/**
* Sets a single path of a file to be read.
*
* @param filePath
* The path of the file to read.
*/
public void setFilePath(Path filePath) {
if (filePath == null) {
throw new IllegalArgumentException("File path must not be null.");
}
setFilePaths(filePath);
}
| 3.26 |
flink_FileInputFormat_initDefaultsFromConfiguration_rdh
|
/**
* Initialize defaults for input format. Needs to be a static method because it is configured
* for local cluster execution.
*
* @param configuration
* The configuration to load defaults from
*/
private static void initDefaultsFromConfiguration(Configuration configuration) {
final long to = configuration.getLong(ConfigConstants.FS_STREAM_OPENING_TIMEOUT_KEY, ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT);
if (to < 0) {
LOG.error((("Invalid timeout value for filesystem stream opening: " + to) + ". Using default value of ") + ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT);
DEFAULT_OPENING_TIMEOUT = ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT;
} else if (to == 0) {
DEFAULT_OPENING_TIMEOUT = 300000;// 5 minutes
} else {
DEFAULT_OPENING_TIMEOUT = to;}}
| 3.26 |
flink_FileInputFormat_getSplitLength_rdh
|
/**
* Gets the length or remaining length of the current split.
*
* @return The length or remaining length of the current split.
*/
public long getSplitLength() {
return f0;
}
| 3.26 |
flink_FileInputFormat_getAverageRecordWidth_rdh
|
/**
* Gets the estimated average number of bytes per record.
*
* @return The average number of bytes per record.
* @see org.apache.flink.api.common.io.statistics.BaseStatistics#getAverageRecordWidth()
*/
@Override
public float getAverageRecordWidth() {
return this.avgBytesPerRecord;
}
| 3.26 |
flink_FileInputFormat_addFilesInDir_rdh
|
/**
* Enumerate all files in the directory and recursive if enumerateNestedFiles is true.
*
* @return the total length of accepted files.
*/
private long addFilesInDir(Path path, List<FileStatus> files, boolean
logExcludedFiles) throws IOException {
final FileSystem fs = path.getFileSystem();
long length = 0;
for (FileStatus dir
: fs.listStatus(path)) {
if (dir.isDir()) {
if (acceptFile(dir) && enumerateNestedFiles) {
length += addFilesInDir(dir.getPath(), files, logExcludedFiles);
} else if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug(("Directory " + dir.getPath().toString()) + " did not pass the file-filter and is excluded.");}
} else if (acceptFile(dir)) {
files.add(dir);
length += dir.getLen();testForUnsplittable(dir);
} else if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug(("Directory " + dir.getPath().toString()) + " did not pass the file-filter and is excluded.");
}
}
return length;
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.