name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_QueryableStateClient_shutdownAndHandle_rdh | /**
* Shuts down the client and returns a {@link CompletableFuture} that will be completed when the
* shutdown process is completed.
*
* <p>If an exception is thrown for any reason, then the returned future will be completed
* exceptionally with that exception.
*
* @return A {@link CompletableFuture} for further handling of the shutdown result.
*/ public CompletableFuture<?> shutdownAndHandle() {
return client.shutdown();
} | 3.26 |
flink_QueryableStateClient_setUserClassLoader_rdh | /**
* * Replaces the existing {@link ClassLoader} (possibly {@code null}), with the provided one.
*
* @param userClassLoader
* The new {@code userClassLoader}.
* @return The old classloader, or {@code null} if none was specified.
*/
public ClassLoader setUserClassLoader(ClassLoader userClassLoader) {
ClassLoader prev = this.userClassLoader;
this.userClassLoader = userClassLoader;
return prev;
} | 3.26 |
flink_QueryableStateClient_getExecutionConfig_rdh | /**
* Gets the {@link ExecutionConfig}.
*/
public ExecutionConfig getExecutionConfig() {
return executionConfig;
} | 3.26 |
flink_QueryableStateClient_setExecutionConfig_rdh | /**
* Replaces the existing {@link ExecutionConfig} (possibly {@code null}), with the provided one.
*
* @param config
* The new {@code configuration}.
* @return The old configuration, or {@code null} if none was specified.
*/
public ExecutionConfig setExecutionConfig(ExecutionConfig config) {
ExecutionConfig prev = executionConfig;
this.executionConfig = config;
return prev;
} | 3.26 |
flink_StreamExecTemporalSort_createSortProcTime_rdh | /**
* Create Sort logic based on processing time.
*/
private Transformation<RowData> createSortProcTime(RowType inputType, Transformation<RowData> inputTransform, ExecNodeConfig config, ClassLoader classLoader) {
// if the order has secondary sorting fields in addition to the proctime
if (sortSpec.getFieldSize() > 1) {
// skip the first field which is the proctime field and would be ordered by timer.
SortSpec specExcludeTime = sortSpec.createSubSortSpec(1);
GeneratedRecordComparator rowComparator = ComparatorCodeGenerator.gen(config, classLoader, "ProcTimeSortComparator", inputType, specExcludeTime);
ProcTimeSortOperator sortOperator = new ProcTimeSortOperator(InternalTypeInfo.of(inputType), rowComparator);
OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform,
createTransformationMeta(TEMPORAL_SORT_TRANSFORMATION, config), sortOperator, InternalTypeInfo.of(inputType), inputTransform.getParallelism(), false);
// as input node is singleton exchange, its parallelism is 1.
if (inputsContainSingleton()) {
transform.setParallelism(1);
transform.setMaxParallelism(1);
}
EmptyRowDataKeySelector selector = EmptyRowDataKeySelector.INSTANCE;
transform.setStateKeySelector(selector);
transform.setStateKeyType(selector.getProducedType());
return transform;
} else {
// if the order is done only on proctime we only need to forward the elements
return inputTransform;
}
} | 3.26 |
flink_StreamExecTemporalSort_createSortRowTime_rdh | /**
* Create Sort logic based on row time.
*/
private Transformation<RowData> createSortRowTime(RowType inputType, Transformation<RowData> inputTransform, ExecNodeConfig config, ClassLoader classLoader) {
GeneratedRecordComparator rowComparator = null;
if (sortSpec.getFieldSize() > 1) {
// skip the first field which is the rowtime field and would be ordered by timer.
SortSpec specExcludeTime = sortSpec.createSubSortSpec(1);
rowComparator = ComparatorCodeGenerator.gen(config, classLoader, "RowTimeSortComparator", inputType, specExcludeTime);
}
RowTimeSortOperator sortOperator = new RowTimeSortOperator(InternalTypeInfo.of(inputType), sortSpec.getFieldSpec(0).getFieldIndex(), rowComparator);
OneInputTransformation<RowData,
RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(TEMPORAL_SORT_TRANSFORMATION, config), sortOperator, InternalTypeInfo.of(inputType), inputTransform.getParallelism(), false);
if (inputsContainSingleton())
{
transform.setParallelism(1);
transform.setMaxParallelism(1);
}
EmptyRowDataKeySelector selector = EmptyRowDataKeySelector.INSTANCE;
transform.setStateKeySelector(selector);
transform.setStateKeyType(selector.getProducedType());
return transform;
} | 3.26 |
flink_AbstractServerBase_m0_rdh | /**
* Starts the server by binding to the configured bind address (blocking).
*
* @throws Exception
* If something goes wrong during the bind operation.
*/
public void m0() throws Throwable {
Preconditions.checkState((f1 == null) && (serverShutdownFuture.get() == null), ((serverName + " is already running @ ") + f1) + ". ");
Iterator<Integer> portIterator = bindPortRange.iterator();
while (portIterator.hasNext() && (!attemptToBind(portIterator.next()))) {
}
if (f1 != null) {
f0.info("Started {} @ {}.", serverName, f1);} else {
f0.info("Unable to start {}. All ports in provided range ({}) are occupied.", serverName, bindPortRange);
throw new FlinkRuntimeException(("Unable to start " + serverName) + ". All ports in provided range are occupied.");
}
} | 3.26 |
flink_AbstractServerBase_createQueryExecutor_rdh | /**
* Creates a thread pool for the query execution.
*
* @return Thread pool for query execution
*/
private ExecutorService createQueryExecutor() {ThreadFactory threadFactory
= new ThreadFactoryBuilder().setDaemon(true).setNameFormat(("Flink " + getServerName()) + " Thread %d").build();
return Executors.newFixedThreadPool(numQueryThreads, threadFactory);
} | 3.26 |
flink_AbstractServerBase_getQueryExecutor_rdh | /**
* Returns the thread-pool responsible for processing incoming requests.
*/
protected ExecutorService getQueryExecutor() {
return queryExecutor;
} | 3.26 |
flink_AbstractServerBase_attemptToBind_rdh | /**
* Tries to start the server at the provided port.
*
* <p>This, in conjunction with {@link #start()}, try to start the server on a free port among
* the port range provided at the constructor.
*
* @param port
* the port to try to bind the server to.
* @throws Exception
* If something goes wrong during the bind operation.
*/
private boolean attemptToBind(final int port) throws Throwable {
f0.debug("Attempting to start {} on port {}.", serverName, port);
this.queryExecutor
= createQueryExecutor();
this.handler = initializeHandler();
final NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads);
final
ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(("Flink " + serverName) + " EventLoop Thread %d").build();
final NioEventLoopGroup v5 = new NioEventLoopGroup(numEventLoopThreads, threadFactory);
this.bootstrap = new ServerBootstrap().localAddress(bindAddress, port).group(v5).channel(NioServerSocketChannel.class).option(ChannelOption.ALLOCATOR, bufferPool).childOption(ChannelOption.ALLOCATOR, bufferPool).childHandler(new ServerChannelInitializer<>(handler));
final int defaultHighWaterMark = 64 * 1024;// from DefaultChannelConfig (not exposed)
// noinspection ConstantConditions
// (ignore warning here to make this flexible in case the configuration values change)
if (LOW_WATER_MARK > defaultHighWaterMark) {
bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK);
} else {
// including (newHighWaterMark < defaultLowWaterMark)
bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK);
}
try {
final ChannelFuture future = bootstrap.bind().sync();
if (future.isSuccess()) {
final InetSocketAddress localAddress = ((InetSocketAddress) (future.channel().localAddress()));
f1 = new InetSocketAddress(localAddress.getAddress(), localAddress.getPort());
return true;
}
// the following throw is to bypass Netty's "optimization magic"
// and catch the bind exception.
// the exception is thrown by the sync() call above.
throw future.cause();
} catch (BindException e) {
f0.debug("Failed to start {} on port {}: {}.", serverName, port, e.getMessage());
try {
// we shutdown the server but we reset the future every time because in
// case of failure to bind, we will call attemptToBind() here, and not resetting
// the flag will interfere with future shutdown attempts.
shutdownServer().whenComplete((ignoredV,
ignoredT) ->
serverShutdownFuture.getAndSet(null)).get();
} catch (Exception r) {
// Here we were seeing this problem:
// https://github.com/netty/netty/issues/4357 if we do a get().
// this is why we now simply wait a bit so that everything is shut down.
f0.warn("Problem while shutting down {}: {}", serverName, r.getMessage());
}
}
// any other type of exception we let it bubble up.
return false;
}
/**
* Shuts down the server and all related thread pools.
*
* @return A {@link CompletableFuture} | 3.26 |
flink_AbstractServerBase_getServerAddress_rdh | /**
* Returns the address of this server.
*
* @return AbstractServerBase address
* @throws IllegalStateException
* If server has not been started yet
*/
public InetSocketAddress getServerAddress() {
Preconditions.checkState(f1 != null, ("Server " + serverName) + " has not been started.");
return f1;
} | 3.26 |
flink_RelativeFileStateHandle_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof RelativeFileStateHandle)) {
return false;
}
RelativeFileStateHandle v0 = ((RelativeFileStateHandle) (o));
return super.equals(o) && relativePath.equals(v0.relativePath);
} | 3.26 |
flink_ListElement_list_rdh | /**
* Creates a list with blocks of text. For example:
*
* <pre>{@code .list(
* text("this is first element of list"),
* text("this is second element of list with a %s", link("https://link"))
* )}</pre>
*
* @param elements
* list of this list entries
* @return list representation
*/
public static ListElement list(InlineElement... elements) {
return new ListElement(Arrays.asList(elements));
} | 3.26 |
flink_TumbleWithSizeOnTime_as_rdh | /**
* Assigns an alias for this window that the following {@code groupBy()} and {@code select()}
* clause can refer to. {@code select()} statement can access window properties such as window
* start or end time.
*
* @param alias
* alias for this window
* @return this window
*/
public TumbleWithSizeOnTimeWithAlias as(String alias) {
return as(unresolvedRef(alias));
} | 3.26 |
flink_ContinuousFileSplitEnumerator_processDiscoveredSplits_rdh | // ------------------------------------------------------------------------
private void processDiscoveredSplits(Collection<FileSourceSplit> splits, Throwable error) {
if (error != null)
{
LOG.error("Failed to enumerate files", error);
return;
}
final Collection<FileSourceSplit> newSplits = splits.stream().filter(split -> pathsAlreadyProcessed.add(split.path())).collect(Collectors.toList());
splitAssigner.addSplits(newSplits);assignSplits();
} | 3.26 |
flink_Predicates_arePublicFinalOfTypeWithAnnotation_rdh | /**
* Tests that the field is {@code public final}, has the fully qualified type name of {@code fqClassName} and is annotated with the {@code annotationType}.
*/
public static DescribedPredicate<JavaField> arePublicFinalOfTypeWithAnnotation(String fqClassName, Class<? extends Annotation> annotationType) {
return arePublicFinalOfType(fqClassName).and(annotatedWith(annotationType));
} | 3.26 |
flink_Predicates_getClassSimpleNameFromFqName_rdh | /**
* Extracts the class name from the given fully qualified class name.
*
* <p>Example:
*
* <pre>
* getClassFromFqName("com.example.MyClass"); // Returns: "MyClass"
* </pre>
*/
public static String getClassSimpleNameFromFqName(String fqClassName) {
// Not using Preconditions to avoid adding non-test flink-core dependency
if (fqClassName == null) {
throw new NullPointerException("Fully qualified class name cannot be null");
}
if (fqClassName.trim().isEmpty()) {
throw new IllegalArgumentException("Fully qualified class name cannot be empty");
}
int lastDotIndex = fqClassName.lastIndexOf('.');
int lastDollarIndex = fqClassName.lastIndexOf('$');
int startIndex = Math.max(lastDotIndex, lastDollarIndex) + 1;
String className
= fqClassName.substring(startIndex);
if (className.trim().isEmpty()) {throw new IllegalArgumentException("Extracted class name is empty from: " + fqClassName);
}
return className;
} | 3.26 |
flink_Predicates_arePublicStaticOfType_rdh | /**
* Tests that the given field is {@code public static} and has the fully qualified type name of
* {@code fqClassName}.
*
* <p>Attention: changing the description will add a rule into the stored.rules.
*/
public static DescribedPredicate<JavaField> arePublicStaticOfType(String fqClassName) {
return areFieldOfType(fqClassName, JavaModifier.PUBLIC, JavaModifier.STATIC);
} | 3.26 |
flink_Predicates_arePublicStaticFinalOfType_rdh | /**
* Tests that the field is {@code public static final} and has the fully qualified type name of
* {@code fqClassName}.
*/
public static DescribedPredicate<JavaField> arePublicStaticFinalOfType(String fqClassName) {
return arePublicStaticOfType(fqClassName).and(isFinal());
} | 3.26 |
flink_Predicates_areFieldOfType_rdh | /**
* Tests that the field has the fully qualified type of {@code fqClassName} with the given
* modifiers.
*
* <p>Attention: changing the description will add a rule into the stored.rules.
*/
public static DescribedPredicate<JavaField> areFieldOfType(String fqClassName, JavaModifier... modifiers) {
return DescribedPredicate.describe(String.format("are %s, and of type %s", Arrays.stream(modifiers).map(JavaModifier::toString).map(String::toLowerCase).collect(Collectors.joining(", ")), getClassSimpleNameFromFqName(fqClassName)), field -> field.getModifiers().containsAll(Arrays.asList(modifiers)) && field.getRawType().getName().equals(fqClassName));
} | 3.26 |
flink_Predicates_containAnyFieldsInClassHierarchyThat_rdh | /**
*
* @return A {@link DescribedPredicate} returning true, if and only if the predicate {@link JavaField} could be found in the {@link JavaClass}.
*/
public static DescribedPredicate<JavaClass> containAnyFieldsInClassHierarchyThat(DescribedPredicate<? super JavaField> predicate) {
return new ContainAnyFieldsThatPredicate<>("fields", JavaClass::getAllFields, predicate);} | 3.26 |
flink_Predicates_arePublicStaticFinalOfTypeWithAnnotation_rdh | /**
* Tests that the field is {@code public static final}, has the fully qualified type name of
* {@code fqClassName} and is annotated with the {@code annotationType}.
*/
public static DescribedPredicate<JavaField> arePublicStaticFinalOfTypeWithAnnotation(String fqClassName, Class<? extends Annotation> annotationType) {
return arePublicStaticFinalOfType(fqClassName).and(annotatedWith(annotationType));
} | 3.26 |
flink_Predicates_exactlyOneOf_rdh | /**
* Returns a {@link DescribedPredicate} that returns true if one and only one of the given
* predicates match.
*/
@SafeVarargs
public static <T> DescribedPredicate<T> exactlyOneOf(final DescribedPredicate<? super T>... other) {
return DescribedPredicate.describe("only one of the following predicates match:\n" + Arrays.stream(other).map(dp -> ("* "
+ dp) + "\n").collect(Collectors.joining()), t -> Arrays.stream(other).map(dp -> dp.test(t)).reduce(false, Boolean::logicalXor));} | 3.26 |
flink_Predicates_areStaticFinalOfTypeWithAnnotation_rdh | /**
* Tests that the field is {@code static final}, has the fully qualified type name of {@code fqClassName} and is annotated with the {@code annotationType}. It doesn't matter if public,
* private or protected.
*/
public static DescribedPredicate<JavaField> areStaticFinalOfTypeWithAnnotation(String fqClassName, Class<? extends Annotation> annotationType) {
return areFieldOfType(fqClassName, JavaModifier.STATIC, JavaModifier.FINAL).and(annotatedWith(annotationType));
} | 3.26 |
flink_Predicates_arePublicFinalOfType_rdh | /**
* Tests that the given field is {@code public final}, not {@code static} and has the given
* fully qualified type name of {@code fqClassName}.
*/public static DescribedPredicate<JavaField> arePublicFinalOfType(String fqClassName) {
return is(ofType(fqClassName)).and(isPublic()).and(isFinal()).and(isNotStatic());
} | 3.26 |
flink_ParserImpl_parse_rdh | /**
* When parsing statement, it first uses {@link ExtendedParser} to parse statements. If {@link ExtendedParser} fails to parse statement, it uses the {@link CalciteParser} to parse
* statements.
*
* @param statement
* input statement.
* @return parsed operations.
*/
@Override
public List<Operation> parse(String statement) {
CalciteParser parser = calciteParserSupplier.get();
FlinkPlannerImpl planner = validatorSupplier.get();Optional<Operation> command =
EXTENDED_PARSER.parse(statement);
if (command.isPresent()) {
return Collections.singletonList(command.get());
}
// parse the sql query
// use parseSqlList here because we need to support statement end with ';' in sql client.
SqlNodeList sqlNodeList = parser.parseSqlList(statement);
List<SqlNode> parsed = sqlNodeList.getList();
Preconditions.checkArgument(parsed.size() == 1, "only single statement supported");
return Collections.singletonList(SqlNodeToOperationConversion.convert(planner, catalogManager, parsed.get(0)).orElseThrow(() -> new TableException("Unsupported query: " + statement)));
} | 3.26 |
flink_RichInputFormat_closeInputFormat_rdh | /**
* Closes this InputFormat instance. This method is called once per parallel instance. Resources
* allocated during {@link #openInputFormat()} should be closed in this method.
*
* @see InputFormat
* @throws IOException
* in case closing the resources failed
*/@PublicEvolving
public void closeInputFormat() throws IOException {
// do nothing here, just for subclasses
} | 3.26 |
flink_RichInputFormat_openInputFormat_rdh | /**
* Opens this InputFormat instance. This method is called once per parallel instance. Resources
* should be allocated in this method. (e.g. database connections, cache, etc.)
*
* @see InputFormat
* @throws IOException
* in case allocating the resources failed.
*/
@PublicEvolving
public void openInputFormat() throws IOException {
// do nothing here, just for subclasses
} | 3.26 |
flink_PackagedProgramUtils_createJobGraph_rdh | /**
* Creates a {@link JobGraph} with a random {@link JobID} from the given {@link PackagedProgram}.
*
* @param packagedProgram
* to extract the JobGraph from
* @param configuration
* to use for the optimizer and job graph generator
* @param defaultParallelism
* for the JobGraph
* @param suppressOutput
* Whether to suppress stdout/stderr during interactive JobGraph creation.
* @return JobGraph extracted from the PackagedProgram
* @throws ProgramInvocationException
* if the JobGraph generation failed
*/
public static JobGraph createJobGraph(PackagedProgram packagedProgram, Configuration configuration, int defaultParallelism, boolean suppressOutput) throws ProgramInvocationException {
return createJobGraph(packagedProgram, configuration, defaultParallelism, null, suppressOutput);
} | 3.26 |
flink_MapOperatorBase_executeOnCollections_rdh | // --------------------------------------------------------------------------------------------
@Override
protected List<OUT> executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {MapFunction<IN, OUT> function = this.userFunction.getUserCodeObject();
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
ArrayList<OUT> result = new ArrayList<OUT>(inputData.size());
TypeSerializer<IN> inSerializer = getOperatorInfo().getInputType().createSerializer(executionConfig);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
for (IN element : inputData) {
IN v5 = inSerializer.copy(element); OUT out = function.map(v5);
result.add(outSerializer.copy(out));
}
FunctionUtils.closeFunction(function);
return result;
} | 3.26 |
flink_DoubleHashSet_contains_rdh | /**
* See {@link Double#equals(Object)}.
*/
public boolean contains(final double k) {
long longKey = Double.doubleToLongBits(k);
if (longKey == 0L) {
return this.containsZero;
} else {
double[] key = this.key;
long curr;
int v7;
if ((curr = Double.doubleToLongBits(key[v7
= ((int)
(MurmurHashUtil.fmix(longKey))) & this.mask])) == 0L) {
return false;
} else if (longKey
== curr) {return true;
} else {
while ((curr = Double.doubleToLongBits(key[v7 = (v7 + 1) & this.mask])) != 0L) {
if (longKey ==
curr) {
return true;
}
}
return false;
}
}
} | 3.26 |
flink_DoubleHashSet_add_rdh | /**
* See {@link Double#equals(Object)}.
*/
public boolean add(final double k) {
long longKey = Double.doubleToLongBits(k);
if (longKey == 0L) {
if (this.containsZero) {
return false;
}
this.containsZero = true;
} else {
double[] key = this.key;
int pos;
long curr;if ((curr = Double.doubleToLongBits(key[pos = ((int) (MurmurHashUtil.fmix(longKey))) & this.mask])) !=
0L) {
if (curr == longKey) {
return false;
}
while ((curr = Double.doubleToLongBits(key[pos = (pos + 1) & this.mask])) != 0L) {
if (curr == longKey) {
return false;
}
}
}
key[pos] = k;
}
if ((this.size++) >= this.maxFill) {
this.rehash(OptimizableHashSet.arraySize(this.size + 1, this.f));
}
return true;
} | 3.26 |
flink_MergingWindowSet_getStateWindow_rdh | /**
* Returns the state window for the given in-flight {@code Window}. The state window is the
* {@code Window} in which we keep the actual state of a given in-flight window. Windows might
* expand but we keep to original state window for keeping the elements of the window to avoid
* costly state juggling.
*
* @param window
* The window for which to get the state window.
*/
public W getStateWindow(W window) {
return mapping.get(window);
} | 3.26 |
flink_MergingWindowSet_persist_rdh | /**
* Persist the updated mapping to the given state if the mapping changed since initialization.
*/
public void persist() throws Exception {
if (!mapping.equals(initialMapping)) {state.update(mapping.entrySet().stream().map(w -> new Tuple2<>(w.getKey(), w.getValue())).collect(Collectors.toList()));
}
} | 3.26 |
flink_MergingWindowSet_retireWindow_rdh | /**
* Removes the given window from the set of in-flight windows.
*
* @param window
* The {@code Window} to remove.
*/
public void retireWindow(W window)
{
W removed = this.mapping.remove(window);
if (removed == null) {
throw new IllegalStateException(("Window " + window) + " is not in in-flight window set.");
}
}
/**
* Adds a new {@code Window} to the set of in-flight windows. It might happen that this triggers
* merging of previously in-flight windows. In that case, the provided {@link MergeFunction} is
* called.
*
* <p>This returns the window that is the representative of the added window after adding. This
* can either be the new window itself, if no merge occurred, or the newly merged window. Adding
* an element to a window or calling trigger functions should only happen on the returned
* representative. This way, we never have to deal with a new window that is immediately
* swallowed up by another window.
*
* <p>If the new window is merged, the {@code MergeFunction} callback arguments also don't
* contain the new window as part of the list of merged windows.
*
* @param newWindow
* The new {@code Window} to add.
* @param mergeFunction
* The callback to be invoked in case a merge occurs.
* @return The {@code Window} that new new {@code Window} ended up in. This can also be the new
{@code Window} | 3.26 |
flink_TriConsumerWithException_unchecked_rdh | /**
* Convert a {@link TriConsumerWithException} into a {@link TriConsumer}.
*
* @param triConsumerWithException
* TriConsumer with exception to convert into a {@link TriConsumer}.
* @param <A>
* first input type
* @param <B>
* second input type
* @param <C>
* third input type
* @return {@link TriConsumer} which rethrows all checked exceptions as unchecked.
*/
static <A, B, C> TriConsumer<A, B, C> unchecked(TriConsumerWithException<A, B, C, ?> triConsumerWithException) {
return (A a,B b,C c) -> {try {
triConsumerWithException.accept(a, b, c);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
} | 3.26 |
flink_WritableComparator_checkKryoInitialized_rdh | // --------------------------------------------------------------------------------------------
private void checkKryoInitialized() {
if (this.kryo == null) {
this.kryo = new Kryo();
Kryo.DefaultInstantiatorStrategy instantiatorStrategy = new Kryo.DefaultInstantiatorStrategy();
instantiatorStrategy.setFallbackInstantiatorStrategy(new StdInstantiatorStrategy());
kryo.setInstantiatorStrategy(instantiatorStrategy);
this.kryo.setAsmEnabled(true);
this.kryo.register(type);
}
} | 3.26 |
flink_WritableComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_MetricOptions_forReporter_rdh | /**
* Returns a view over the given configuration via which options can be set/retrieved for the
* given reporter.
*
* <pre>
* Configuration config = ...
* MetricOptions.forReporter(config, "my_reporter")
* .set(MetricOptions.REPORTER_INTERVAL, Duration.ofSeconds(10))
* ...
* </pre>
*
* @param configuration
* backing configuration
* @param reporterName
* reporter name
* @return view over configuration
*/
@Experimental
public static Configuration forReporter(Configuration configuration, String reporterName) {
return new DelegatingConfiguration(configuration, (ConfigConstants.METRICS_REPORTER_PREFIX + reporterName) + ".");
} | 3.26 |
flink_OrInputTypeStrategy_commonMax_rdh | /**
* Returns the common maximum argument count or null if undefined.
*/
@Nullable
private static Integer commonMax(List<ArgumentCount> counts) {// max=5, max=3, max=0 -> max=5
// max=5, max=3, max=0, max=null -> max=null
int commonMax = Integer.MIN_VALUE;
for (ArgumentCount count : counts) {
final Optional<Integer> max = count.getMaxCount();
if (!max.isPresent()) {
return null;
}
commonMax = Math.max(commonMax, max.get());
}
if (commonMax == Integer.MIN_VALUE) {
return null;
}
return commonMax;} | 3.26 |
flink_OrInputTypeStrategy_commonMin_rdh | // --------------------------------------------------------------------------------------------
/**
* Returns the common minimum argument count or null if undefined.
*/
@Nullable
private static Integer commonMin(List<ArgumentCount> counts) {
// min=5, min=3, min=0 -> min=0
// min=5, min=3, min=0, min=null -> min=null
int commonMin = Integer.MAX_VALUE;
for (ArgumentCount count : counts) {
final Optional<Integer> min = count.getMinCount();
if (!min.isPresent()) {
return null;
}
commonMin = Math.min(commonMin, min.get());
}
if (commonMin == Integer.MAX_VALUE) {return null;
}
return commonMin;} | 3.26 |
flink_AsyncClient_query_rdh | /**
* A simple asynchronous client that simulates interacting with an unreliable external service.
*/public class AsyncClient {
public CompletableFuture<String> query(int key) {
return CompletableFuture.supplyAsync(() -> {
long sleep = ((long) (ThreadLocalRandom.current().nextFloat() * 100));
try {
Thread.sleep(sleep);
} catch (InterruptedException e) {
throw new RuntimeException("AsyncClient was interrupted", e);
}
if
(ThreadLocalRandom.current().nextFloat() < 0.001F) {
throw new
RuntimeException("wahahahaha...");
} else {
return "key" + (key % 10);
}
});
} | 3.26 |
flink_CheckpointOptions_getCheckpointType_rdh | // ------------------------------------------------------------------------
/**
* Returns the type of checkpoint to perform.
*/
public SnapshotType getCheckpointType() {
return checkpointType;} | 3.26 |
flink_CheckpointOptions_hashCode_rdh | // ------------------------------------------------------------------------
@Overridepublic int hashCode() {
return Objects.hash(targetLocation, checkpointType, alignmentType,
f0);
} | 3.26 |
flink_CheckpointOptions_getTargetLocation_rdh | /**
* Returns the target location for the checkpoint.
*/
public CheckpointStorageLocationReference getTargetLocation() {return targetLocation;
} | 3.26 |
flink_ScalaFutureUtils_toJava_rdh | /**
* Converts a Scala {@link Future} to a {@link CompletableFuture}.
*
* @param scalaFuture
* to convert to a Java 8 CompletableFuture
* @param <T>
* type of the future value
* @param <U>
* type of the original future
* @return Java 8 CompletableFuture
*/
public static <T, U extends T> CompletableFuture<T> toJava(Future<U> scalaFuture) {
final
CompletableFuture<T> result = new CompletableFuture<>();
scalaFuture.onComplete(new OnComplete<U>() {
@Override
public void onComplete(Throwable failure, U success) {
if (failure != null) {
result.completeExceptionally(failure);
} else {
result.complete(success);
}
}
}, DirectExecutionContext.INSTANCE);
return result;
} | 3.26 |
flink_ContextResolvedFunction_isTemporary_rdh | /**
*
* @return true if the function is temporary. An anonymous function is always temporary.
*/
public boolean isTemporary() {
return isTemporary;
} | 3.26 |
flink_HiveTableInputFormat_addSchemaToConf_rdh | // Hive readers may rely on the schema info in configuration
private void addSchemaToConf(JobConf jobConf) {
// set columns/types -- including partition cols
List<String> typeStrs = Arrays.stream(fieldTypes).map(t -> HiveTypeUtil.toHiveTypeInfo(t, true).toString()).collect(Collectors.toList());
jobConf.set(IOConstants.COLUMNS, String.join(",", fieldNames));
jobConf.set(IOConstants.COLUMNS_TYPES, String.join(",", typeStrs));
// set schema evolution -- excluding partition cols
int numNonPartCol = fieldNames.length - partitionKeys.size();
jobConf.set(SCHEMA_EVOLUTION_COLUMNS, String.join(",", Arrays.copyOfRange(fieldNames, 0, numNonPartCol)));
jobConf.set(SCHEMA_EVOLUTION_COLUMNS_TYPES, String.join(",", typeStrs.subList(0, numNonPartCol)));
// in older versions, parquet reader also expects the selected col indices in conf,
// excluding part cols
String readColIDs = Arrays.stream(selectedFields).filter(i -> i < numNonPartCol).mapToObj(String::valueOf).collect(Collectors.joining(","));
jobConf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, readColIDs);
} | 3.26 |
flink_AbstractOrcNoHiveVector_createFlinkVectorFromConstant_rdh | /**
* Create flink vector by hive vector from constant.
*/
public static ColumnVector createFlinkVectorFromConstant(LogicalType
type, Object value, int batchSize) {
return createFlinkVector(createHiveVectorFromConstant(type, value, batchSize));
} | 3.26 |
flink_AbstractOrcNoHiveVector_createHiveVectorFromConstant_rdh | /**
* Create a orc vector from partition spec value. See hive {@code VectorizedRowBatchCtx#addPartitionColsToBatch}.
*/
private static ColumnVector createHiveVectorFromConstant(LogicalType type, Object value, int batchSize) {
switch (type.getTypeRoot()) {
case CHAR :
case VARCHAR :
case BINARY :
case VARBINARY :
return createBytesVector(batchSize, value);
case BOOLEAN :
return createLongVector(batchSize, ((Boolean) (value)) ? 1 : 0);
case TINYINT :
case SMALLINT :
case INTEGER :
case BIGINT :
return createLongVector(batchSize, value);
case DECIMAL :
DecimalType decimalType = ((DecimalType) (type));
return createDecimalVector(batchSize, decimalType.getPrecision(), decimalType.getScale(), value);
case FLOAT :
case DOUBLE :
return createDoubleVector(batchSize, value);
case DATE :
if (value instanceof LocalDate) {
value = Date.valueOf(((LocalDate) (value)));
}
return createLongVector(batchSize, toInternal(((Date) (value))));
case TIMESTAMP_WITHOUT_TIME_ZONE :
return
createTimestampVector(batchSize, value);
default :
throw new UnsupportedOperationException("Unsupported type: " + type);
}
} | 3.26 |
flink_RankProcessStrategy_analyzeRankProcessStrategies_rdh | /**
* Gets {@link RankProcessStrategy} based on input, partitionKey and orderKey.
*/
static List<RankProcessStrategy> analyzeRankProcessStrategies(StreamPhysicalRel rank, ImmutableBitSet partitionKey, RelCollation orderKey) {
FlinkRelMetadataQuery mq = ((FlinkRelMetadataQuery) (rank.getCluster().getMetadataQuery()));List<RelFieldCollation> fieldCollations = orderKey.getFieldCollations();
boolean isUpdateStream = !ChangelogPlanUtils.inputInsertOnly(rank);
RelNode input = rank.getInput(0);
if (isUpdateStream) {
Set<ImmutableBitSet> upsertKeys = mq.getUpsertKeysInKeyGroupRange(input, partitionKey.toArray());
if (((upsertKeys == null) || upsertKeys.isEmpty()) || // upsert key should contains partition key
upsertKeys.stream().noneMatch(k -> k.contains(partitionKey))) {
// and we fall back to using retract rank
return Collections.singletonList(RETRACT_STRATEGY);
} else
{
FlinkRelMetadataQuery fmq = FlinkRelMetadataQuery.reuseOrCreate(mq);
RelModifiedMonotonicity monotonicity = fmq.getRelModifiedMonotonicity(input);
boolean isMonotonic = false;
if ((monotonicity != null) && (!fieldCollations.isEmpty())) {
isMonotonic = fieldCollations.stream().allMatch(collation ->
{
SqlMonotonicity fieldMonotonicity = monotonicity.fieldMonotonicities()[collation.getFieldIndex()];
RelFieldCollation.Direction direction = collation.direction;
if (((fieldMonotonicity == SqlMonotonicity.DECREASING) || (fieldMonotonicity
== SqlMonotonicity.STRICTLY_DECREASING)) && (direction == RelFieldCollation.Direction.ASCENDING)) {
// sort field is ascending and its monotonicity
// is decreasing
return true;
} else if (((fieldMonotonicity == SqlMonotonicity.INCREASING) || (fieldMonotonicity == SqlMonotonicity.STRICTLY_INCREASING)) && (direction == RelFieldCollation.Direction.DESCENDING)) {
// sort field is descending and its monotonicity
// is increasing
return true;
} else {// sort key is a grouping key of upstream agg,
// it is monotonic
return fieldMonotonicity == SqlMonotonicity.CONSTANT;
}});
}
if (isMonotonic) {
// TODO: choose a set of primary key
return Arrays.asList(new UpdateFastStrategy(upsertKeys.iterator().next().toArray()), RETRACT_STRATEGY);
} else {
return Collections.singletonList(RETRACT_STRATEGY);}
}
} else {
return
Collections.singletonList(APPEND_FAST_STRATEGY);
}
} | 3.26 |
flink_HiveDeclarativeAggregateFunction_getTypeInference_rdh | /**
* This method is used to infer result type when generate {@code AggregateCall} of calcite.
*/
public TypeInference getTypeInference(DataTypeFactory factory) {
return TypeInference.newBuilder().outputTypeStrategy(new HiveAggregateFunctionOutputStrategy(this)).build();
} | 3.26 |
flink_KMeans_getCentroidDataSet_rdh | // *************************************************************************
// DATA SOURCE READING (POINTS AND CENTROIDS)
// *************************************************************************
private static DataSet<Centroid> getCentroidDataSet(ParameterTool params, ExecutionEnvironment env) {
DataSet<Centroid> centroids;
if (params.has("centroids")) {
centroids = env.readCsvFile(params.get("centroids")).fieldDelimiter(" ").pojoType(KMeans.Centroid.class, "id", "x", "y");
} else {
System.out.println("Executing K-Means example with default centroid data set.");
System.out.println("Use --centroids to specify file input.");
centroids = KMeansData.getDefaultCentroidDataSet(env);
}
return centroids;
} | 3.26 |
flink_KMeans_open_rdh | /**
* Reads the centroid values from a broadcast variable into a collection.
*/
@Override
public void open(OpenContext openContext) throws Exception
{
this.centroids = getRuntimeContext().getBroadcastVariable("centroids");
} | 3.26 |
flink_StateMetadata_validateStateMetadata_rdh | /**
* Validate deserialized state metadata from json content of {@link org.apache.flink.table.api.CompiledPlan}.
*
* @param inputNumOfOperator
* the input number of the stateful operator that the exec node to
* translate to.
* @param stateMetadataList
* the deserialized state metadata list.
*/
private static void validateStateMetadata(int inputNumOfOperator, List<StateMetadata> stateMetadataList)
{
// the state metadata list size should be equal to the input number of the operator
Preconditions.checkArgument(inputNumOfOperator == stateMetadataList.size(), String.format("The compiled plan contains inconsistent state metadata configuration.\n" + "Received %s state meta for a %sInputStreamOperator.", stateMetadataList.size(), inputNumOfOperator > 2 ? "Multiple" : inputNumOfOperator == 2
? "Two" : "One"));
// the state index should not contain duplicates, and should start from 0 to inputNum - 1
List<Integer> normalizedIndexList = stateMetadataList.stream().map(StateMetadata::getStateIndex).sorted().distinct().collect(Collectors.toList());
Preconditions.checkArgument(((normalizedIndexList.size() == inputNumOfOperator) && (normalizedIndexList.get(0) == 0)) && (normalizedIndexList.get(inputNumOfOperator - 1) == (inputNumOfOperator - 1)), ("The compiled plan contains inconsistent state metadata configuration.\n" + "The state index should not contain duplicates and start from 0 (inclusive) ") + "and monotonically increase to the input size (exclusive) of the operator.");
} | 3.26 |
flink_PythonEnvUtils_preparePythonEnvironment_rdh | /**
* Prepares PythonEnvironment to start python process.
*
* @param config
* The Python configurations.
* @param entryPointScript
* The entry point script, optional.
* @param tmpDir
* The temporary directory which files will be copied to.
* @return PythonEnvironment the Python environment which will be executed in Python process.
*/
static PythonEnvironment preparePythonEnvironment(ReadableConfig config, String entryPointScript, String tmpDir) throws IOException {
PythonEnvironment env = new PythonEnvironment();// 1. set the path of python interpreter.
String v1 = config.getOptional(PYTHON_CLIENT_EXECUTABLE).orElse(System.getenv(PYFLINK_CLIENT_EXECUTABLE));
if (v1
!= null) {env.pythonExec = v1;
}
// 2. setup temporary local directory for the user files
tmpDir = new File(tmpDir).getAbsolutePath();
Path tmpDirPath = new Path(tmpDir);
tmpDirPath.getFileSystem().mkdirs(tmpDirPath);
env.tempDirectory = tmpDir;
// 3. append the internal lib files to PYTHONPATH.
if
(System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) != null) {
String pythonLibDir = (System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) + File.separator) + "python";
env.pythonPath = getLibFiles(pythonLibDir).stream().map(p -> p.toFile().getAbsolutePath()).collect(Collectors.joining(File.pathSeparator));
}
// 4. copy relevant python files to tmp dir and set them in PYTHONPATH.
if (config.getOptional(PYTHON_FILES).isPresent()) {
List<Path> pythonFiles = Arrays.stream(config.get(PYTHON_FILES).split(FILE_DELIMITER)).map(Path::new).collect(Collectors.toList());
addToPythonPath(env, pythonFiles);
}
// 5. set the archives directory as the working directory, then user could access the
// content of the archives via relative path
if (config.getOptional(PYTHON_ARCHIVES).isPresent() && (config.getOptional(PYTHON_CLIENT_EXECUTABLE).isPresent() || (!StringUtils.isNullOrWhitespaceOnly(System.getenv(PYFLINK_CLIENT_EXECUTABLE))))) {
env.archivesDirectory = String.join(File.separator, tmpDir, PYTHON_ARCHIVES_DIR);
// extract archives to archives directory
config.getOptional(PYTHON_ARCHIVES).ifPresent(pyArchives -> {
for (String archive : pyArchives.split(FILE_DELIMITER)) {
final Path archivePath;
final String targetDirName;
final String originalFileName;
if (archive.contains(PythonDependencyUtils.PARAM_DELIMITER)) {String[] filePathAndTargetDir = archive.split(PythonDependencyUtils.PARAM_DELIMITER, 2);
archivePath = new Path(filePathAndTargetDir[0]);
targetDirName = filePathAndTargetDir[1];
originalFileName
= archivePath.getName();
} else {
archivePath = new Path(archive);
originalFileName
= archivePath.getName();
targetDirName = originalFileName;
}
Path localArchivePath =
archivePath;
try {
if (archivePath.getFileSystem().isDistributedFS()) {
localArchivePath = new Path(env.tempDirectory, String.join(File.separator, UUID.randomUUID().toString(), originalFileName));
FileUtils.copy(archivePath, localArchivePath, false);
}
} catch (IOException e) {
String msg = String.format("Error occurred when copying %s to %s.", archivePath, localArchivePath);
throw new <e>RuntimeException(msg);
}
try {
CompressionUtils.extractFile(localArchivePath.getPath(), String.join(File.separator, env.archivesDirectory, targetDirName), originalFileName);
} catch (IOException e) {
throw new <e>RuntimeException("Extract archives to archives directory failed.");
}
}
});
}
// 4. append configured python.pythonpath to the PYTHONPATH.
if (config.getOptional(PYTHON_PATH).isPresent()) {
env.pythonPath = String.join(File.pathSeparator, config.getOptional(PYTHON_PATH).get(), env.pythonPath);
}
if (entryPointScript != null) {
addToPythonPath(env, Collections.singletonList(new Path(entryPointScript)));
}
return env;
} | 3.26 |
flink_PythonEnvUtils_createSymbolicLink_rdh | /**
* Creates symbolLink in working directory for pyflink lib.
*
* @param libPath
* the pyflink lib file path.
* @param symbolicLinkPath
* the symbolic link to pyflink lib.
*/
private static void createSymbolicLink(Path libPath, Path symbolicLinkPath) throws IOException {
try {
Files.createSymbolicLink(symbolicLinkPath, libPath);
} catch (IOException e) {
LOG.warn("Create symbol link from {} to {} failed and copy instead.", symbolicLinkPath,
libPath, e);
Files.copy(libPath, symbolicLinkPath);
}} | 3.26 |
flink_PythonEnvUtils_startPythonProcess_rdh | /**
* Starts python process.
*
* @param pythonEnv
* the python Environment which will be in a process.
* @param commands
* the commands that python process will execute.
* @return the process represent the python process.
* @throws IOException
* Thrown if an error occurred when python process start.
*/
static Process startPythonProcess(PythonEnvironment pythonEnv, List<String> commands, boolean redirectToPipe) throws IOException {
ProcessBuilder pythonProcessBuilder = new ProcessBuilder();
Map<String, String> env = pythonProcessBuilder.environment();
if (pythonEnv.pythonPath != null) {
String defaultPythonPath = env.get("PYTHONPATH");
if (Strings.isNullOrEmpty(defaultPythonPath)) {
env.put("PYTHONPATH", pythonEnv.pythonPath);
} else {
env.put("PYTHONPATH", String.join(File.pathSeparator, pythonEnv.pythonPath, defaultPythonPath));
}
}if (pythonEnv.archivesDirectory != null) {
pythonProcessBuilder.directory(new File(pythonEnv.archivesDirectory));
}
pythonEnv.systemEnv.forEach(env::put);
commands.add(0, pythonEnv.pythonExec);
pythonProcessBuilder.command(commands);
// redirect the stderr to stdout
pythonProcessBuilder.redirectErrorStream(true);
if (redirectToPipe) {
pythonProcessBuilder.redirectOutput(ProcessBuilder.Redirect.PIPE);
} else {
// set the child process the output same as the parent process.
pythonProcessBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
}
LOG.info("Starting Python process with environment variables: {{}}, command: {}", env.entrySet().stream().map(e -> (e.getKey() + "=") + e.getValue()).collect(Collectors.joining(", ")), String.join(" ", commands));
Process process = pythonProcessBuilder.start();
if (!process.isAlive()) {
throw new
RuntimeException("Failed to start Python process. ");
}
return process;
} | 3.26 |
flink_PythonEnvUtils_resetCallbackClientExecutorService_rdh | /**
* Reset a daemon thread to the callback client thread pool so that the callback server can be
* terminated when gate way server is shutting down. We need to shut down the none-daemon thread
* firstly, then set a new thread created in a daemon thread to the ExecutorService.
*
* @param gatewayServer
* the gateway which creates the callback server.
*/
private static void resetCallbackClientExecutorService(GatewayServer gatewayServer) throws NoSuchFieldException, IllegalAccessException, NoSuchMethodException, InvocationTargetException {
CallbackClient callbackClient = ((CallbackClient) (gatewayServer.getCallbackClient()));
// The Java API of py4j does not provide approach to set "daemonize_connections" parameter.
// Use reflect to daemonize the connection thread.
Field executor = CallbackClient.class.getDeclaredField("executor");
executor.setAccessible(true);
((ScheduledExecutorService)
(executor.get(callbackClient))).shutdown();
executor.set(callbackClient, Executors.newScheduledThreadPool(1, Thread::new));
Method setupCleaner = CallbackClient.class.getDeclaredMethod("setupCleaner");
setupCleaner.setAccessible(true);
setupCleaner.invoke(callbackClient);
} | 3.26 |
flink_PythonEnvUtils_resetCallbackClient_rdh | /**
* Reset the callback client of gatewayServer with the given callbackListeningAddress and
* callbackListeningPort after the callback server started.
*
* @param callbackServerListeningAddress
* the listening address of the callback server.
* @param callbackServerListeningPort
* the listening port of the callback server.
*/
public static void resetCallbackClient(GatewayServer gatewayServer, String callbackServerListeningAddress, int callbackServerListeningPort) throws UnknownHostException, InvocationTargetException, NoSuchMethodException, IllegalAccessException, NoSuchFieldException { gatewayServer.resetCallbackClient(InetAddress.getByName(callbackServerListeningAddress), callbackServerListeningPort);
resetCallbackClientExecutorService(gatewayServer);
} | 3.26 |
flink_CheckpointStatsTracker_createSnapshot_rdh | /**
* Creates a new snapshot of the available stats.
*
* @return The latest statistics snapshot.
*/public CheckpointStatsSnapshot createSnapshot() {
CheckpointStatsSnapshot
snapshot = latestSnapshot;
// Only create a new snapshot if dirty and no update in progress,
// because we don't want to block the coordinator.
if (dirty && statsReadWriteLock.tryLock()) {
try {
// Create a new snapshot
snapshot = new CheckpointStatsSnapshot(counts.createSnapshot(), summary.createSnapshot(), history.createSnapshot(), f0);
latestSnapshot = snapshot;
dirty = false;
} finally {
statsReadWriteLock.unlock();
}
}
return snapshot;
} | 3.26 |
flink_CheckpointStatsTracker_reportRestoredCheckpoint_rdh | /**
* Callback when a checkpoint is restored.
*
* @param restored
* The restored checkpoint stats.
*/
void reportRestoredCheckpoint(RestoredCheckpointStats restored) {
checkNotNull(restored, "Restored checkpoint");
statsReadWriteLock.lock();
try {
counts.incrementRestoredCheckpoints();
f0 = restored;
dirty = true;
} finally {
statsReadWriteLock.unlock();
}
} | 3.26 |
flink_CheckpointStatsTracker_m0_rdh | /**
* Callback when a checkpoint failure without in progress checkpoint. For example, it should be
* callback when triggering checkpoint failure before creating PendingCheckpoint.
*/
public void m0() {
statsReadWriteLock.lock();
try {
counts.incrementFailedCheckpointsWithoutInProgress();
dirty = true;} finally {
statsReadWriteLock.unlock();
}
} | 3.26 |
flink_CheckpointStatsTracker_reportPendingCheckpoint_rdh | // ------------------------------------------------------------------------
// Callbacks
// ------------------------------------------------------------------------
/**
* Creates a new pending checkpoint tracker.
*
* @param checkpointId
* ID of the checkpoint.
* @param triggerTimestamp
* Trigger timestamp of the checkpoint.
* @param props
* The checkpoint properties.
* @param vertexToDop
* mapping of {@link JobVertexID} to DOP
* @return Tracker for statistics gathering.
*/
PendingCheckpointStats reportPendingCheckpoint(long checkpointId, long triggerTimestamp, CheckpointProperties props, Map<JobVertexID, Integer> vertexToDop) {
PendingCheckpointStats pending
= new PendingCheckpointStats(checkpointId, triggerTimestamp, props, vertexToDop);
statsReadWriteLock.lock();
try {
counts.incrementInProgressCheckpoints();
history.addInProgressCheckpoint(pending);
dirty = true;
} finally {
statsReadWriteLock.unlock();
}
return pending;
} | 3.26 |
flink_CheckpointStatsTracker_registerMetrics_rdh | /**
* Register the exposed metrics.
*
* @param metricGroup
* Metric group to use for the metrics.
*/
private void registerMetrics(MetricGroup metricGroup) {
metricGroup.gauge(NUMBER_OF_CHECKPOINTS_METRIC, new CheckpointsCounter());
metricGroup.gauge(NUMBER_OF_IN_PROGRESS_CHECKPOINTS_METRIC, new InProgressCheckpointsCounter());
metricGroup.gauge(NUMBER_OF_COMPLETED_CHECKPOINTS_METRIC, new CompletedCheckpointsCounter());
metricGroup.gauge(NUMBER_OF_FAILED_CHECKPOINTS_METRIC, new FailedCheckpointsCounter());
metricGroup.gauge(LATEST_RESTORED_CHECKPOINT_TIMESTAMP_METRIC, new LatestRestoredCheckpointTimestampGauge());
metricGroup.gauge(LATEST_COMPLETED_CHECKPOINT_SIZE_METRIC, new LatestCompletedCheckpointSizeGauge());
metricGroup.gauge(LATEST_COMPLETED_CHECKPOINT_FULL_SIZE_METRIC, new LatestCompletedCheckpointFullSizeGauge());
metricGroup.gauge(LATEST_COMPLETED_CHECKPOINT_DURATION_METRIC, new LatestCompletedCheckpointDurationGauge());
metricGroup.gauge(LATEST_COMPLETED_CHECKPOINT_PROCESSED_DATA_METRIC, new
LatestCompletedCheckpointProcessedDataGauge());metricGroup.gauge(LATEST_COMPLETED_CHECKPOINT_PERSISTED_DATA_METRIC, new LatestCompletedCheckpointPersistedDataGauge());
metricGroup.gauge(f1, new LatestCompletedCheckpointExternalPathGauge());
metricGroup.gauge(LATEST_COMPLETED_CHECKPOINT_ID_METRIC, new LatestCompletedCheckpointIdGauge());
} | 3.26 |
flink_CheckpointStatsTracker_reportCompletedCheckpoint_rdh | /**
* Callback when a checkpoint completes.
*
* @param completed
* The completed checkpoint stats.
*/
void reportCompletedCheckpoint(CompletedCheckpointStats completed) {
statsReadWriteLock.lock();
try {
latestCompletedCheckpoint = completed;
counts.incrementCompletedCheckpoints();
history.replacePendingCheckpointById(completed);
summary.updateSummary(completed);
dirty = true;
logCheckpointStatistics(completed);
} finally {
statsReadWriteLock.unlock();}
} | 3.26 |
flink_CheckpointStatsTracker_reportFailedCheckpoint_rdh | /**
* Callback when a checkpoint fails.
*
* @param failed
* The failed checkpoint stats.
*/
void reportFailedCheckpoint(FailedCheckpointStats failed) {
statsReadWriteLock.lock();
try {
counts.incrementFailedCheckpoints();
history.replacePendingCheckpointById(failed);
dirty = true;
logCheckpointStatistics(failed);
} finally {
statsReadWriteLock.unlock();}
} | 3.26 |
flink_UniqueConstraint_asSummaryString_rdh | /**
* Returns constraint's summary. All constraints summary will be formatted as
*
* <pre>
* CONSTRAINT [constraint-name] [constraint-type] ([constraint-definition])
*
* E.g CONSTRAINT pk PRIMARY KEY (f0, f1)
* </pre>
*/
@Overridepublic final String asSummaryString() {
final String typeString;
switch (m0())
{
case PRIMARY_KEY :
typeString = "PRIMARY KEY";
break;
case UNIQUE_KEY :
typeString = "UNIQUE";
break;
default :
throw new IllegalStateException("Unknown key type: "
+ m0());
}
return String.format("CONSTRAINT %s %s (%s)", getName(), typeString, String.join(", ", columns));
} | 3.26 |
flink_UniqueConstraint_getColumns_rdh | /**
* List of column names for which the primary key was defined.
*/
public List<String> getColumns() {
return columns;
} | 3.26 |
flink_UniqueConstraint_primaryKey_rdh | /**
* Creates a non enforced {@link ConstraintType#PRIMARY_KEY} constraint.
*/
public static UniqueConstraint primaryKey(String name, List<String> columns) {
return new UniqueConstraint(name, false, ConstraintType.PRIMARY_KEY, columns);
} | 3.26 |
flink_SharedResourceHolder_release_rdh | /**
* Releases an instance of the given resource.
*
* <p>The instance must have been obtained from {@link #get(Resource)}. Otherwise will throw
* IllegalArgumentException.
*
* <p>Caller must not release a reference more than once. It's advisory that you clear the
* reference to the instance with the null returned by this method.
*
* @param resource
* the singleton Resource object that identifies the released static resource
* @param instance
* the released static resource
* @return a null which the caller can use to clear the reference to that instance.
*/
public static <T> T release(final Resource<T> resource, final T instance) {
return holder.releaseInternal(resource, instance);} | 3.26 |
flink_SharedResourceHolder_m0_rdh | /**
* Visible to unit tests.
*
* @see #get(Resource)
*/
@SuppressWarnings("unchecked")
synchronized <T> T m0(Resource<T> resource) {
Instance instance = instances.get(resource);
if (instance == null) {
instance = new Instance(resource.create());
instances.put(resource, instance);
}
if (instance.destroyTask != null) {
instance.destroyTask.cancel(false);
instance.destroyTask = null;
}
instance.refcount++;
return ((T) (instance.payload));
} | 3.26 |
flink_SharedResourceHolder_get_rdh | /**
* Try to get an existing instance of the given resource. If an instance does not exist, create
* a new one with the given factory.
*
* @param resource
* the singleton object that identifies the requested static resource
*/
public static <T> T get(Resource<T> resource) {
return holder.m0(resource);
} | 3.26 |
flink_SharedResourceHolder_releaseInternal_rdh | /**
* Visible to unit tests.
*/
synchronized <T>
T releaseInternal(final Resource<T> resource, final T instance) {
final Instance cached = instances.get(resource);
if (cached == null) {
throw new IllegalArgumentException("No cached instance found for " + resource);
}Preconditions.checkArgument(instance == cached.payload, "Releasing the wrong instance");
Preconditions.checkState(cached.refcount > 0, "Refcount has already reached zero");
cached.refcount--;
if (cached.refcount == 0) {
Preconditions.checkState(cached.destroyTask == null, "Destroy task already scheduled");
// Schedule a delayed task to destroy the resource.
if (destroyer == null) {
destroyer = destroyerFactory.createScheduledExecutor();
}
cached.destroyTask = destroyer.schedule(new LogExceptionRunnable(new Runnable() {
@Override
public void run() {
synchronized(SharedResourceHolder.this) {
// Refcount may have gone up since the task was
// scheduled. Re-check it.
if (cached.refcount == 0) {
try {
resource.close(instance);
} finally {
instances.remove(resource);
if (instances.isEmpty()) {
destroyer.shutdown();
destroyer = null;
}
}
}
}
}
}), DESTROY_DELAY_SECONDS, TimeUnit.SECONDS);}
// Always returning null
return null;
} | 3.26 |
flink_TopNBuffer_containsKey_rdh | /**
* Returns {@code true} if the buffer contains a mapping for the specified key.
*
* @param key
* key whose presence in the buffer is to be tested
* @return {@code true} if the buffer contains a mapping for the specified key
*/ public boolean containsKey(RowData key) {
return f0.containsKey(key);
} | 3.26 |
flink_TopNBuffer_entrySet_rdh | /**
* Returns a {@link Set} view of the mappings contained in the buffer.
*/public Set<Map.Entry<RowData, Collection<RowData>>> entrySet() {
return f0.entrySet();
} | 3.26 |
flink_TopNBuffer_getCurrentTopNum_rdh | /**
* Gets number of total records.
*
* @return the number of total records.
*/
public int getCurrentTopNum() {
return currentTopNum;
} | 3.26 |
flink_TopNBuffer_getElement_rdh | /**
* Gets record which rank is given value.
*
* @param rank
* rank value to search
* @return the record which rank is given value
*/
public RowData getElement(int rank) {
int curRank = 0;
for (Map.Entry<RowData, Collection<RowData>> entry : f0.entrySet()) {Collection<RowData> collection = entry.getValue();
if ((curRank + collection.size()) >= rank) {
for (RowData elem :
collection) {
curRank += 1;
if (curRank == rank) {
return elem;
}
}
} else {
curRank += collection.size();
}
}
return null;
} | 3.26 |
flink_TopNBuffer_lastEntry_rdh | /**
* Returns the last Entry in the buffer. Returns null if the TreeMap is empty.
*/
public Map.Entry<RowData, Collection<RowData>> lastEntry() { return f0.lastEntry();
} | 3.26 |
flink_TopNBuffer_put_rdh | /**
* Appends a record into the buffer.
*
* @param sortKey
* sort key with which the specified value is to be associated
* @param value
* record which is to be appended
* @return the size of the collection under the sortKey.
*/
public int put(RowData sortKey, RowData value) {
currentTopNum += 1;
// update treeMap
Collection<RowData> collection = f0.get(sortKey);
if (collection == null) {
collection = valueSupplier.get();
f0.put(sortKey, collection);
}
collection.add(value);
return collection.size();
} | 3.26 |
flink_TopNBuffer_removeAll_rdh | /**
* Removes all record list from the buffer under the sortKey.
*
* @param sortKey
* key to remove
*/
public void removeAll(RowData sortKey) {
Collection<RowData> collection = f0.get(sortKey);
if (collection != null) {
currentTopNum -=
collection.size(); f0.remove(sortKey);
}
} | 3.26 |
flink_TopNBuffer_removeLast_rdh | /**
* Removes the last record of the last Entry in the buffer.
*
* @return removed record
*/
public RowData removeLast() {
Map.Entry<RowData, Collection<RowData>> last = f0.lastEntry();
RowData lastElement = null;
if (last != null) {
Collection<RowData> collection = last.getValue();
if (collection != null) {
if (collection instanceof List) {
// optimization for List
List<RowData> list = ((List<RowData>) (collection));
if (!list.isEmpty()) {
lastElement
= list.remove(list.size() - 1);
currentTopNum -= 1;
if (list.isEmpty()) {
f0.remove(last.getKey());
}
}
} else {
lastElement = m0(collection);
if (lastElement != null)
{
if (collection.remove(lastElement)) {
currentTopNum -= 1;
}
if (collection.size() == 0) {
f0.remove(last.getKey());
}
}
}
}
}
return lastElement;
} | 3.26 |
flink_TopNBuffer_getSortKeyComparator_rdh | /**
* Gets sort key comparator used by buffer.
*
* @return sort key comparator used by buffer
*/
public Comparator<RowData> getSortKeyComparator() {
return sortKeyComparator;
} | 3.26 |
flink_TopNBuffer_checkSortKeyInBufferRange_rdh | /**
* Checks whether the record should be put into the buffer.
*
* @param sortKey
* sortKey to test
* @param topNum
* buffer to add
* @return true if the record should be put into the buffer.
*/
public boolean checkSortKeyInBufferRange(RowData sortKey, long topNum) {
Comparator<RowData> comparator = getSortKeyComparator();
Map.Entry<RowData, Collection<RowData>> worstEntry = lastEntry();
if (worstEntry == null) {
// return true if the buffer is empty.
return true;
} else {
RowData worstKey = worstEntry.getKey();
int compare = comparator.compare(sortKey, worstKey);
if (compare < 0) { return true;
} else {
return getCurrentTopNum() <
topNum;
}
}
} | 3.26 |
flink_TopNBuffer_putAll_rdh | /**
* Puts a record list into the buffer under the sortKey. Note: if buffer already contains
* sortKey, putAll will overwrite the previous value
*
* @param sortKey
* sort key with which the specified values are to be associated
* @param values
* record lists to be associated with the specified key
*/
public void putAll(RowData sortKey, Collection<RowData> values) {
Collection<RowData> oldValues = f0.get(sortKey);
if (oldValues != null) {
currentTopNum -= oldValues.size();
}
f0.put(sortKey,
values);currentTopNum +=
values.size();
} | 3.26 |
flink_TopNBuffer_get_rdh | /**
* Gets the record list from the buffer under the sortKey.
*
* @param sortKey
* key to get
* @return the record list from the buffer under the sortKey
*/
public Collection<RowData> get(RowData sortKey) {
return f0.get(sortKey);
} | 3.26 |
flink_EventTimeTriggers_withEarlyFirings_rdh | /**
* Creates a new {@code Trigger} like the this, except that it fires repeatedly whenever the
* given {@code Trigger} fires before the watermark has passed the end of the window.
*/
public AfterEndOfWindowNoLate<W> withEarlyFirings(Trigger<W> earlyFirings) {
checkNotNull(earlyFirings);
return new AfterEndOfWindowNoLate<>(earlyFirings);
} | 3.26 |
flink_EventTimeTriggers_afterEndOfWindow_rdh | /**
* Creates a trigger that fires when the watermark passes the end of the window.
*/
public static <W extends Window> AfterEndOfWindow<W> afterEndOfWindow() {
return new AfterEndOfWindow<>();
} | 3.26 |
flink_EventTimeTriggers_withLateFirings_rdh | /**
* Creates a new {@code Trigger} like the this, except that it fires repeatedly whenever the
* given {@code Trigger} fires after the watermark has passed the end of the window.
*/
public Trigger<W> withLateFirings(Trigger<W> lateFirings) {
checkNotNull(lateFirings);
if (lateFirings instanceof ElementTriggers.EveryElement) {
// every-element late firing can be ignored
return this;
} else {
return new AfterEndOfWindowEarlyAndLate<>(earlyTrigger, lateFirings);
}
} | 3.26 |
flink_Expander_expanded_rdh | /**
* Expands identifiers in a given SQL string, returning a {@link Expanded}.
*/
public Expanded expanded(String ori) {
final Map<SqlParserPos, SqlIdentifier> identifiers = new HashMap<>();
final Map<String, SqlIdentifier> funcNameToId = new HashMap<>();
final SqlNode
oriNode = planner.parser().parse(ori);// parse again because validation is stateful, that means the node tree was probably
// mutated.
final SqlNode validated = planner.validate(planner.parser().parse(ori));
validated.accept(new SqlBasicVisitor<Void>() {
@Override
public Void visit(SqlCall call) {
SqlOperator operator = call.getOperator();
if (operator
instanceof
BridgingSqlFunction) {
final SqlIdentifier functionID = ((BridgingSqlFunction) (operator)).getSqlIdentifier();
if (!functionID.isSimple())
{ funcNameToId.put(Util.last(functionID.names), functionID);}
}
return super.visit(call);
}
@Override
public Void visit(SqlIdentifier identifier) {
// See SqlUtil#deriveAliasFromOrdinal, there is no good solution
// to distinguish between system alias (EXPR${number}) and user defines,
// and we stop expanding all of them.
if (!identifier.names.get(0).startsWith("EXPR$")) {
identifiers.putIfAbsent(identifier.getParserPosition(), identifier);
}
return null;
}
});
return new Expanded(oriNode, identifiers, funcNameToId);
} | 3.26 |
flink_Expander_substitute_rdh | /**
* Returns the SQL string with identifiers replaced according to the given unparse function.
*/
public String substitute(Function<SqlNode, String> fn) {
final SqlShuttle shuttle = new SqlShuttle() {
@Override
public SqlNode visit(SqlCall call) {
SqlOperator operator = call.getOperator();
if (operator instanceof SqlUnresolvedFunction) {
final SqlUnresolvedFunction unresolvedFunction = ((SqlUnresolvedFunction) (operator));
final SqlIdentifier functionID = unresolvedFunction.getSqlIdentifier();if
(functionID.isSimple() && funcNameToId.containsKey(functionID.getSimple())) {
SqlUnresolvedFunction newFunc = new SqlUnresolvedFunction(funcNameToId.get(functionID.getSimple()), unresolvedFunction.getReturnTypeInference(), unresolvedFunction.getOperandTypeInference(), unresolvedFunction.getOperandTypeChecker(), unresolvedFunction.getParamTypes(), unresolvedFunction.getFunctionType());
return newFunc.createCall(call.getFunctionQuantifier(), call.getParserPosition(), call.getOperandList().toArray(new SqlNode[0]));
}
}
return super.visit(call);
}
@Override
public SqlNode visit(SqlIdentifier
id) {
if (id.isStar()) {
return id;
}
final SqlIdentifier toReplace = identifiersMap.get(id.getParserPosition());
if ((toReplace == null) || (id.names.size() >= toReplace.names.size()))
{
return id;}
return toReplace;
}
};
final SqlNode substituted = this.oriNode.accept(shuttle);
return
fn.apply(substituted);
} | 3.26 |
flink_Expander_create_rdh | /**
* Creates an Expander. *
*/
public static Expander create(FlinkPlannerImpl planner) {
return new Expander(planner);
} | 3.26 |
flink_CatalogBaseTable_getUnresolvedSchema_rdh | /**
* Returns the schema of the table or view.
*
* <p>The schema can reference objects from other catalogs and will be resolved and validated by
* the framework when accessing the table or view.
*
* @see ResolvedCatalogTable
* @see ResolvedCatalogView
*/
default Schema getUnresolvedSchema() {
final TableSchema oldSchema = getSchema();
if (oldSchema == null) {
throw new UnsupportedOperationException("A CatalogBaseTable must implement getUnresolvedSchema().");
}
return oldSchema.toSchema();
} | 3.26 |
flink_AvailabilityProvider_resetUnavailable_rdh | /**
* Judges to reset the current available state as unavailable.
*/
public void resetUnavailable() {
if (isAvailable()) {
availableFuture = new CompletableFuture<>();
}
} | 3.26 |
flink_AvailabilityProvider_resetAvailable_rdh | /**
* Resets the constant completed {@link #AVAILABLE} as the current state.
*/
public void resetAvailable() {
availableFuture = AVAILABLE;
} | 3.26 |
flink_AvailabilityProvider_isAvailable_rdh | /**
* In order to best-effort avoid volatile access in {@link CompletableFuture#isDone()}, we check
* the condition of <code>future == AVAILABLE</code> firstly for getting probable performance
* benefits while hot looping.
*
* <p>It is always safe to use this method in performance nonsensitive scenarios to get the
* precise state.
*
* @return true if this instance is available for further processing.
*/
default boolean isAvailable() {
CompletableFuture<?> future = getAvailableFuture();
return (future == AVAILABLE) || future.isDone();
} | 3.26 |
flink_AvailabilityProvider_isApproximatelyAvailable_rdh | /**
* Checks whether this instance is available only via constant {@link #AVAILABLE} to avoid
* performance concern caused by volatile access in {@link CompletableFuture#isDone()}. So it is
* mainly used in the performance sensitive scenarios which do not always need the precise
* state.
*
* <p>This method is still safe to get the precise state if {@link #getAvailableFuture()} was
* touched via (.get(), .wait(), .isDone(), ...) before, which also has a "happen-before"
* relationship with this call.
*
* @return true if this instance is available for further processing.
*/ default boolean isApproximatelyAvailable() {
return getAvailableFuture() == AVAILABLE;
} | 3.26 |
flink_AvailabilityProvider_getAvailableFuture_rdh | /**
*
* @return a future that is completed if the respective provider is available.
*/
@Override
public CompletableFuture<?> getAvailableFuture() {
return availableFuture;
} | 3.26 |
flink_AvailabilityProvider_getUnavailableToResetAvailable_rdh | /**
* Returns the previously not completed future and resets the constant completed {@link #AVAILABLE} as the current state.
*/public CompletableFuture<?>
getUnavailableToResetAvailable() {
CompletableFuture<?> toNotify = availableFuture;
availableFuture = AVAILABLE;
return toNotify;
} | 3.26 |
flink_BinaryHashTable_m0_rdh | // ================================ internal method ===========================================
/**
* Determines the number of buffers to be used for asynchronous write behind. It is currently
* computed as the logarithm of the number of buffers to the base 4, rounded up, minus 2. The
* upper limit for the number of write behind buffers is however set to six.
*
* @param numBuffers
* The number of available buffers.
* @return The number
*/
@VisibleForTesting
static int m0(int numBuffers) {
int numIOBufs = ((int) ((Math.log(numBuffers) / Math.log(4)) - 1.5));return numIOBufs > 6 ? 6 : numIOBufs;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.