name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_GlobalConfiguration_loadYAMLResource_rdh | /**
* Loads a YAML-file of key-value pairs.
*
* <p>Colon and whitespace ": " separate key and value (one per line). The hash tag "#" starts a
* single-line comment.
*
* <p>Example:
*
* <pre>
* jobmanager.rpc.address: localhost # network address for communication with the job manager
* jobmanager.rpc.port : 6123 # network port to connect to for communication with the job manager
* taskmanager.rpc.port : 6122 # network port the task manager expects incoming IPC connections
* </pre>
*
* <p>This does not span the whole YAML specification, but only the *syntax* of simple YAML
* key-value pairs (see issue #113 on GitHub). If at any point in time, there is a need to go
* beyond simple key-value pairs syntax compatibility will allow to introduce a YAML parser
* library.
*
* @param file
* the YAML file to read from
* @see <a href="http://www.yaml.org/spec/1.2/spec.html">YAML 1.2 specification</a>
*/
private static Configuration loadYAMLResource(File file) {
final Configuration config = new Configuration();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(file)))) {
String line;
int lineNo = 0;
while ((line = reader.readLine()) != null)
{
lineNo++;
// 1. check for comments
String[] comments = line.split("#",
2);
String conf = comments[0].trim();
// 2. get key and value
if (conf.length() > 0) {
String[] kv = conf.split(": ", 2);
// skip line with no valid key-value pair
if (kv.length ==
1) {
LOG.warn(((("Error while trying to split key and value in configuration file "
+ file) + ":") + lineNo) + ": Line is not a key-value pair (missing space after ':'?)");
continue;
}
String key = kv[0].trim();String value = kv[1].trim();
// sanity check
if ((key.length() == 0) || (value.length() == 0)) {
LOG.warn(((("Error after splitting key and value in configuration file " + file) + ":") + lineNo) + ": Key or value was empty");
continue;
}config.setString(key, value);
}
}
} catch (IOException e) {
throw new RuntimeException("Error parsing YAML configuration.", e);
}
return config;
} | 3.26 |
flink_SharedObjectsExtension_create_rdh | /**
* Creates a new instance. Usually that should be done inside a JUnit test class as an
* instance-field annotated with {@link org.junit.Rule}.
*/
public static SharedObjectsExtension create() {
return new SharedObjectsExtension(LAST_ID.getAndIncrement());
} | 3.26 |
flink_SharedObjectsExtension_add_rdh | /**
* Adds a new object to this {@code SharedObjects}. Although not necessary, it is recommended to
* only access the object through the returned {@link SharedReference}.
*/
public <T> SharedReference<T> add(T object) {
SharedReference<T> tag = new SharedObjectsExtension.DefaultTag<>(id, objects.size());objects.put(tag, object);return tag;
} | 3.26 |
flink_PrioritizedDeque_clear_rdh | /**
* Removes all priority and non-priority elements.
*/
public void clear() {
deque.clear();
numPriorityElements = 0;
} | 3.26 |
flink_PrioritizedDeque_poll_rdh | /**
* Polls the first priority element or non-priority element if the former does not exist.
*
* @return the first element or null.
*/
@Nullable
public T poll() {
final T polled = deque.poll();
if ((polled != null) && (numPriorityElements > 0)) {
numPriorityElements--;
}
return polled;
} | 3.26 |
flink_PrioritizedDeque_peekLast_rdh | /**
* Returns the last non-priority element or priority element if the former does not exist.
*
* @return the last element or null.
*/
@Nullable
public T peekLast() {
return deque.peekLast();
} | 3.26 |
flink_PrioritizedDeque_m0_rdh | /**
* Returns whether the given element is contained in this list. Test is performed by identity.
*/
public boolean m0(T element) {
if (deque.isEmpty()) {
return false;
}
final Iterator<T> iterator = deque.iterator();
while (iterator.hasNext()) {
if (iterator.next() == element) {
return true;
}
}
return false;
} | 3.26 |
flink_PrioritizedDeque_addPriorityElement_rdh | /**
* Adds a priority element to this deque, such that it will be polled after all existing
* priority elements but before any non-priority element.
*
* @param element
* the element to add
*/
public void addPriorityElement(T element) {
// priority elements are rather rare and short-lived, so most of there are none
if (numPriorityElements == 0) {
deque.addFirst(element);
} else if (numPriorityElements == deque.size()) {
// no non-priority elements
deque.add(element);
} else {
// remove all priority elements
final ArrayDeque<T> priorPriority
= new ArrayDeque<>(numPriorityElements); for (int index
= 0; index < numPriorityElements; index++) {
priorPriority.addFirst(deque.poll());
}
deque.addFirst(element);
// read them before the newly added element
for (final T priorityEvent : priorPriority) {
deque.addFirst(priorityEvent);
}
}
numPriorityElements++;
} | 3.26 |
flink_PrioritizedDeque_size_rdh | /**
* Returns the number of priority and non-priority elements.
*/
public int size() {
return
deque.size();
} | 3.26 |
flink_PrioritizedDeque_getAndRemove_rdh | /**
* Find first element matching the {@link Predicate}, remove it from the {@link PrioritizedDeque} and return it.
*
* @return removed element
*/
public T getAndRemove(Predicate<T> preCondition) {
Iterator<T> iterator = deque.iterator();
for (int i = 0; iterator.hasNext(); i++) {
T next = iterator.next();
if (preCondition.test(next)) {
if (i < numPriorityElements) {
numPriorityElements--;
}
iterator.remove();
return next;
}
}
throw new NoSuchElementException();
} | 3.26 |
flink_PrioritizedDeque_peek_rdh | /**
* Returns the first priority element or non-priority element if the former does not exist.
*
* @return the first element or null.
*/
@Nullable
public T peek() {
return deque.peek();
} | 3.26 |
flink_PrioritizedDeque_prioritize_rdh | /**
* Prioritizes an already existing element. Note that this method assumes identity.
*
* <p>{@implNote Since this method removes the element and reinserts it in a priority position
* in general, some optimizations for special cases are used.}
*
* @param element
* the element to prioritize.
*/
public void prioritize(T element) {
final Iterator<T> iterator = deque.iterator();
// Already prioritized? Then, do not reorder elements.
for (int i = 0; (i < numPriorityElements) && iterator.hasNext(); i++) {
if
(iterator.next() == element) {
return;
}
}
// If the next non-priority element is the given element, we can simply include it in the
// priority section
if (iterator.hasNext() && (iterator.next() == element)) {
numPriorityElements++;
return;
}
// Remove the given element.
while (iterator.hasNext()) {
if (iterator.next() == element) {
iterator.remove();
break;
}
}
addPriorityElement(element);
} | 3.26 |
flink_PrioritizedDeque_isEmpty_rdh | /**
* Returns true if there are no elements.
*/
public boolean isEmpty() {
return deque.isEmpty();
} | 3.26 |
flink_PrioritizedDeque_getNumPriorityElements_rdh | /**
* Returns the current number of priority elements ([0; {@link #size()}]).
*/
public int getNumPriorityElements() {
return numPriorityElements;} | 3.26 |
flink_PrioritizedDeque_asUnmodifiableCollection_rdh | /**
* Returns an unmodifiable collection view.
*/
public Collection<T> asUnmodifiableCollection() {
return Collections.unmodifiableCollection(deque);
} | 3.26 |
flink_PrioritizedDeque_iterator_rdh | /**
*
* @return read-only iterator
*/
public Iterator<T> iterator() {
return Collections.unmodifiableCollection(deque).iterator();
} | 3.26 |
flink_PrioritizedDeque_add_rdh | /**
* Convenience method for adding an element with optional priority and prior removal.
*
* @param element
* the element to add
* @param priority
* flag indicating if it's a priority or non-priority element
* @param prioritize
* flag that hints that the element is already in this deque, potentially as
* non-priority element.
*/
public void add(T element, boolean priority, boolean prioritize) {
if (!priority) {
add(element);
} else if (prioritize) {
prioritize(element);
} else {
addPriorityElement(element);
}
} | 3.26 |
flink_PrioritizedDeque_containsPriorityElement_rdh | /**
* Returns whether the given element is a known priority element. Test is performed by identity.
*/
public boolean containsPriorityElement(T element) {
if (numPriorityElements == 0) {
return false;
}final Iterator<T> iterator = deque.iterator();
for (int i = 0; (i < numPriorityElements) && iterator.hasNext(); i++) {
if (iterator.next() == element) {
return true;
}
}
return false;
} | 3.26 |
flink_FlinkContainers_m1_rdh | /**
* Gets JobManager's port on the host machine.
*/
public int m1() {
return jobManager.getMappedPort(this.conf.get(RestOptions.PORT));
} | 3.26 |
flink_FlinkContainers_getJobManager_rdh | /**
* Gets JobManager container.
*/
public GenericContainer<?> getJobManager() {
return this.jobManager;
} | 3.26 |
flink_FlinkContainers_start_rdh | /**
* Starts all containers.
*/
public void start() throws Exception {
if (haService != null) {
LOG.debug("Starting HA service container");
this.haService.start();
}
LOG.debug("Starting JobManager container");
this.jobManager.start();
waitUntilJobManagerRESTReachable(jobManager);
LOG.debug("Starting TaskManager containers");
this.taskManagers.parallelStream().forEach(GenericContainer::start);
LOG.debug("Creating REST cluster client");
this.restClusterClient = createClusterClient();
waitUntilAllTaskManagerConnected();
isStarted = true;
} | 3.26 |
flink_FlinkContainers_withFlinkContainersSettings_rdh | /**
* Allows to optionally provide Flink containers settings. {@link FlinkContainersSettings}
* based on defaults will be used otherwise.
*
* @param flinkContainersSettings
* The Flink containers settings.
* @return A reference to this Builder.
*/
public Builder withFlinkContainersSettings(FlinkContainersSettings flinkContainersSettings) {
this.flinkContainersSettings = flinkContainersSettings;
return this;
} | 3.26 |
flink_FlinkContainers_beforeAll_rdh | // ------------------------ JUnit 5 lifecycle management ------------------------
@Override
public void beforeAll(ExtensionContext context) throws Exception {
this.start();
} | 3.26 |
flink_FlinkContainers_createClusterClient_rdh | // ----------------------------- Helper functions --------------------------------
private RestClusterClient<StandaloneClusterId> createClusterClient() throws Exception {
checkState(jobManager.isRunning(), "JobManager should be running for creating a REST client");// Close potentially existing REST cluster client
if (restClusterClient != null) {
restClusterClient.close();
}
final Configuration clientConfiguration = new Configuration();clientConfiguration.set(RestOptions.ADDRESS, getJobManagerHost());
clientConfiguration.set(RestOptions.PORT, jobManager.getMappedPort(conf.get(RestOptions.PORT)));
return new RestClusterClient<>(clientConfiguration, StandaloneClusterId.getInstance());
} | 3.26 |
flink_FlinkContainers_m0_rdh | /**
* Stops all containers.
*/
public void m0() {
isStarted = false;
if (restClusterClient != null) {
restClusterClient.close();
}
this.taskManagers.forEach(GenericContainer::stop);
deleteJobManagerTemporaryFiles();
this.jobManager.stop();if (this.haService != null) {
this.haService.stop();
}
} | 3.26 |
flink_FlinkContainers_submitSQLJob_rdh | /**
* Submits an SQL job to the running cluster.
*
* <p><b>NOTE:</b> You should not use {@code '\t'}.
*/
public void submitSQLJob(SQLJobSubmission job) throws IOException, InterruptedException {
checkState(isStarted(), "SQL job submission is only applicable for a running cluster");
// Create SQL script and copy it to JobManager
final List<String> commands = new ArrayList<>();
Path script = Files.createTempDirectory("sql-script").resolve("script");
Files.write(script, job.getSqlLines());
jobManager.copyFileToContainer(MountableFile.forHostPath(script),
"/tmp/script.sql");
// Construct SQL client command
commands.add("cat /tmp/script.sql | ");
commands.add("bin/sql-client.sh");
for (String jar : job.getJars()) {
commands.add("--jar");
Path path = Paths.get(jar);
String containerPath = "/tmp/" + path.getFileName();
jobManager.copyFileToContainer(MountableFile.forHostPath(path), containerPath);
commands.add(containerPath);
}
// Execute command in JobManager
Container.ExecResult execResult = jobManager.execInContainer("bash", "-c",
String.join(" ", commands));
LOG.info(execResult.getStdout());
LOG.error(execResult.getStderr());
if (execResult.getExitCode() != 0) {
throw new AssertionError("Failed when submitting the SQL job.");
}
} | 3.26 |
flink_FlinkContainers_getRestClusterClient_rdh | /**
* Gets REST client connected to JobManager.
*/
@Nullable
public RestClusterClient<StandaloneClusterId>
getRestClusterClient() {
return this.restClusterClient;
} | 3.26 |
flink_FlinkContainers_withTestcontainersSettings_rdh | /**
* Allows to optionally provide Testcontainers settings. {@link TestcontainersSettings}
* based on defaults will be used otherwise.
*
* @param testcontainersSettings
* The Testcontainers settings.
* @return A reference to this Builder.
*/
public Builder withTestcontainersSettings(TestcontainersSettings testcontainersSettings) {
this.testcontainersSettings = testcontainersSettings;
return this;}
/**
* Returns {@code FlinkContainers} built from the provided settings.
*
* @return {@code FlinkContainers} built with parameters of this {@code FlinkContainers.Builder} | 3.26 |
flink_FlinkContainers_restartTaskManager_rdh | /**
* Restarts all TaskManager containers.
*/
public void restartTaskManager(RunnableWithException afterFailAction) throws Exception
{
taskManagers.forEach(GenericContainer::stop);
afterFailAction.run();
taskManagers.forEach(GenericContainer::start);
} | 3.26 |
flink_FlinkContainers_submitJob_rdh | /**
* Submits the given job to the cluster.
*
* @param job
* job to submit
*/
public JobID submitJob(JobSubmission job) throws IOException, InterruptedException {
final List<String> commands = new ArrayList<>();commands.add("bin/flink");
commands.add("run");
if (job.isDetached()) {
commands.add("-d");
}
if (job.getParallelism() > 0) {
commands.add("-p");
commands.add(String.valueOf(job.getParallelism()));
}
job.getMainClass().ifPresent(mainClass -> {
commands.add("--class");
commands.add(mainClass);
});
final Path jobJar = job.getJar();
final String containerPath = "/tmp/" + jobJar.getFileName();
commands.add(containerPath);
jobManager.copyFileToContainer(MountableFile.forHostPath(jobJar.toAbsolutePath()), containerPath);
commands.addAll(job.getArguments());
LOG.info("Running {}.",
commands.stream().collect(Collectors.joining(" ")));
// Execute command in JobManager
Container.ExecResult execResult = jobManager.execInContainer("bash", "-c", String.join(" ", commands));
final Pattern pattern = (job.isDetached()) ?
Pattern.compile("Job has been submitted with JobID (.*)") : Pattern.compile("Job with JobID (.*) has finished.");
final String stdout = execResult.getStdout();
LOG.info(stdout);LOG.error(execResult.getStderr());
final Matcher matcher = pattern.matcher(stdout);
checkState(matcher.find(), "Cannot extract JobID from stdout.");
return JobID.fromHexString(matcher.group(1));
} | 3.26 |
flink_FlinkContainers_getJobManagerHost_rdh | /**
* Gets JobManager's hostname on the host machine.
*/
public String getJobManagerHost() { return jobManager.getHost();
} | 3.26 |
flink_FlinkContainers_restartJobManager_rdh | /**
* Restarts JobManager container.
*
* <p>Note that the REST port will be changed because the new JM container will be mapped to
* another random port. Please make sure to get the REST cluster client again after this method
* is invoked.
*/
public void restartJobManager(RunnableWithException afterFailAction) throws Exception {
if (this.haService == null) {
LOG.warn("Restarting JobManager without HA service. This might drop all your running jobs");
}
jobManager.stop();
afterFailAction.run();
jobManager.start();
// Recreate client because JobManager REST port might have been changed in new container
waitUntilJobManagerRESTReachable(jobManager);
this.restClusterClient = createClusterClient();
waitUntilAllTaskManagerConnected();
} | 3.26 |
flink_FlinkContainers_getTaskManagers_rdh | /**
* Gets TaskManager containers.
*/
public List<GenericContainer<?>> getTaskManagers() {
return this.taskManagers;
} | 3.26 |
flink_FlinkContainers_isStarted_rdh | /**
* Gets the running state of the cluster.
*/public boolean isStarted() {
return isStarted;
} | 3.26 |
flink_AsynchronousFileIOChannel_checkErroneous_rdh | /**
* Checks the exception state of this channel. The channel is erroneous, if one of its requests
* could not be processed correctly.
*
* @throws IOException
* Thrown, if the channel is erroneous. The thrown exception contains the
* original exception that defined the erroneous state as its cause.
*/
public final void checkErroneous() throws IOException {
if
(this.exception != null) {
throw this.exception;
} } | 3.26 |
flink_AsynchronousFileIOChannel_m0_rdh | /**
* Handles a processed <tt>Buffer</tt>. This method is invoked by the asynchronous IO worker
* threads upon completion of the IO request with the provided buffer and/or an exception that
* occurred while processing the request for that buffer.
*
* @param buffer
* The buffer to be processed.
* @param ex
* The exception that occurred in the I/O threads when processing the buffer's
* request.
*/
protected final void m0(T buffer, IOException ex) {
if (buffer == null) {
return;
}
// even if the callbacks throw an error, we need to maintain our bookkeeping
try {
if ((ex != null) && (this.exception == null)) {
this.exception = ex;
this.resultHandler.requestFailed(buffer, ex);
} else {
this.resultHandler.requestSuccessful(buffer);
}
} finally {
NotificationListener listener = null;
// Decrement the number of outstanding requests. If we are currently closing, notify the
// waiters. If there is a listener, notify her as well.
synchronized(this.closeLock) {
if (this.requestsNotReturned.decrementAndGet() ==
0) {
if (this.closed) {
this.closeLock.notifyAll();
}
synchronized(listenerLock) {
listener = allRequestsProcessedListener; allRequestsProcessedListener =
null;
}
} }
if (listener != null) {listener.onNotification();}
}
} | 3.26 |
flink_AsynchronousFileIOChannel_closeAndDelete_rdh | /**
* This method waits for all pending asynchronous requests to return. When the last request has
* returned, the channel is closed and deleted.
*
* <p>Even if an exception interrupts the closing, such that not all request are handled, the
* underlying <tt>FileChannel</tt> is closed and deleted.
*
* @throws IOException
* Thrown, if an I/O exception occurred while waiting for the buffers, or if
* the closing was interrupted.
*/@Override
public void closeAndDelete() throws IOException {
try {
close();
} finally {
deleteChannel();
}
} | 3.26 |
flink_AsynchronousFileIOChannel_registerAllRequestsProcessedListener_rdh | /**
* Registers a listener to be notified when all outstanding requests have been processed.
*
* <p>New requests can arrive right after the listener got notified. Therefore, it is not safe
* to assume that the number of outstanding requests is still zero after a notification unless
* there was a close right before the listener got called.
*
* <p>Returns <code>true</code>, if the registration was successful. A registration can fail, if
* there are no outstanding requests when trying to register a listener.
*/
protected boolean registerAllRequestsProcessedListener(NotificationListener listener) throws IOException {checkNotNull(listener);
synchronized(listenerLock) {
if (allRequestsProcessedListener == null) {
// There was a race with the processing of the last outstanding request
if
(requestsNotReturned.get() == 0) {
return false;
}
allRequestsProcessedListener =
listener;
return true;
}
}
throw new IllegalStateException("Already subscribed.");
} | 3.26 |
flink_AsynchronousFileIOChannel_close_rdh | /**
* Closes the channel and waits until all pending asynchronous requests are processed. The
* underlying <code>FileChannel</code> is closed even if an exception interrupts the closing.
*
* <p><strong>Important:</strong> the {@link #isClosed()} method returns <code>true</code>
* immediately after this method has been called even when there are outstanding requests.
*
* @throws IOException
* Thrown, if an I/O exception occurred while waiting for the buffers, or if
* the closing was interrupted.
*/
@Override
public void close() throws IOException {
// atomically set the close flag
synchronized(this.closeLock) {
if (this.closed) {
return;
}
this.closed = true;
try {
// wait until as many buffers have been returned as were written
// only then is everything guaranteed to be consistent.
while (this.requestsNotReturned.get() > 0) {
try {
// we add a timeout here, because it is not guaranteed that the
// decrementing during buffer return and the check here are deadlock free.
// the deadlock situation is however unlikely and caught by the timeout
this.closeLock.wait(1000);
checkErroneous();
} catch (InterruptedException iex)
{
throw new IOException("Closing of asynchronous file channel was interrupted.");
}
}
// Additional check because we might have skipped the while loop
checkErroneous();
} finally {
// close the file
if (this.fileChannel.isOpen()) {
this.fileChannel.close();
}
}
}
} | 3.26 |
flink_AsynchronousFileIOChannel_isClosed_rdh | // --------------------------------------------------------------------------------------------
@Override
public boolean isClosed() {return this.closed;
} | 3.26 |
flink_SinkTransformationTranslator_addFailOverRegion_rdh | /**
* Adds a batch exchange that materializes the output first. This is a no-op in STREAMING.
*/private <I> DataStream<I> addFailOverRegion(DataStream<I> input) {
return new DataStream<>(executionEnvironment, new PartitionTransformation<>(input.getTransformation(), new ForwardPartitioner<>(),
StreamExchangeMode.BATCH));
} | 3.26 |
flink_SinkTransformationTranslator_adjustTransformations_rdh | /**
* Since user may set specific parallelism on sub topologies, we have to pay attention to
* the priority of parallelism at different levels, i.e. sub topologies customized
* parallelism > sinkTransformation customized parallelism > environment customized
* parallelism. In order to satisfy this rule and keep these customized parallelism values,
* the environment parallelism will be set to be {@link ExecutionConfig#PARALLELISM_DEFAULT}
* before adjusting transformations. SubTransformations, constructed after that, will have
* either the default value or customized value. In this way, any customized value will be
* discriminated from the default value and, for any subTransformation with the default
* parallelism value, we will then be able to let it inherit the parallelism value from the
* previous sinkTransformation. After the adjustment of transformations is closed, the
* environment parallelism will be restored back to its original value to keep the
* customized parallelism value at environment level.
*/
private <I, R> R adjustTransformations(DataStream<I> inputStream, Function<DataStream<I>, R> action, boolean isExpandedTopology, boolean supportsConcurrentExecutionAttempts) {
// Reset the environment parallelism temporarily before adjusting transformations,
// we can therefore be aware of any customized parallelism of the sub topology
// set by users during the adjustment.
executionEnvironment.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT);
int numTransformsBefore = executionEnvironment.getTransformations().size();
R result = action.apply(inputStream);
List<Transformation<?>> transformations = executionEnvironment.getTransformations();
List<Transformation<?>> expandedTransformations = transformations.subList(numTransformsBefore, transformations.size());
final CustomSinkOperatorUidHashes operatorsUidHashes = transformation.getSinkOperatorsUidHashes();
for (Transformation<?> subTransformation : expandedTransformations) {
String subUid = subTransformation.getUid();
if ((isExpandedTopology && (subUid != null)) && (!subUid.isEmpty())) {
checkState((transformation.getUid() != null) && (!transformation.getUid().isEmpty()), (("Sink " +
transformation.getName()) + " requires to set a uid since its customized topology") + " has set uid for some operators.");
}
// Set the operator uid hashes to support stateful upgrades without prior uids
setOperatorUidHashIfPossible(subTransformation, WRITER_NAME, operatorsUidHashes.getWriterUidHash());
setOperatorUidHashIfPossible(subTransformation, COMMITTER_NAME, operatorsUidHashes.getCommitterUidHash());
setOperatorUidHashIfPossible(subTransformation, StandardSinkTopologies.GLOBAL_COMMITTER_TRANSFORMATION_NAME, operatorsUidHashes.getGlobalCommitterUidHash());
concatUid(subTransformation, Transformation::getUid, Transformation::setUid, subTransformation.getName());
concatProperty(subTransformation, Transformation::getCoLocationGroupKey, Transformation::setCoLocationGroupKey);
concatProperty(subTransformation, Transformation::getName, Transformation::setName);
concatProperty(subTransformation, Transformation::getDescription, Transformation::setDescription);
// handle coLocationGroupKey.
String coLocationGroupKey = transformation.getCoLocationGroupKey();
if ((coLocationGroupKey != null) && (subTransformation.getCoLocationGroupKey() == null)) {
subTransformation.setCoLocationGroupKey(coLocationGroupKey);
}
Optional<SlotSharingGroup> ssg = transformation.getSlotSharingGroup();
if
(ssg.isPresent() && (!subTransformation.getSlotSharingGroup().isPresent())) {
subTransformation.setSlotSharingGroup(ssg.get());
}
// remember that the environment parallelism has been set to be default
// at the beginning. SubTransformations, whose parallelism has been
// customized, will skip this part. The customized parallelism value set by user
// will therefore be kept.
if (subTransformation.getParallelism() == ExecutionConfig.PARALLELISM_DEFAULT) {
// In this case, the subTransformation does not contain any customized
// parallelism value and will therefore inherit the parallelism value
// from the sinkTransformation.
subTransformation.setParallelism(transformation.getParallelism());
}
if ((subTransformation.getMaxParallelism() < 0) && (transformation.getMaxParallelism() > 0)) {
subTransformation.setMaxParallelism(transformation.getMaxParallelism());
}
if (subTransformation instanceof PhysicalTransformation) {
PhysicalTransformation<?> v19 = ((PhysicalTransformation<?>) (subTransformation));
if (transformation.getChainingStrategy() != null) {
v19.setChainingStrategy(transformation.getChainingStrategy());
}
// overrides the supportsConcurrentExecutionAttempts of transformation because
// it's not allowed to specify fine-grained concurrent execution attempts yet
v19.setSupportsConcurrentExecutionAttempts(supportsConcurrentExecutionAttempts);
}
}
// Restore the previous parallelism of the environment before adjusting transformations
if (environmentParallelism.isPresent()) {
executionEnvironment.getConfig().setParallelism(environmentParallelism.get());
} else {
executionEnvironment.getConfig().resetParallelism();
}
return result;
} | 3.26 |
flink_KubernetesCheckpointStoreUtil_checkpointIDToName_rdh | /**
* Convert a checkpoint id into a ConfigMap key.
*
* @param checkpointId
* to convert to the key
* @return key created from the given checkpoint id
*/
@Override
public String checkpointIDToName(long checkpointId) {
return CHECKPOINT_ID_KEY_PREFIX + String.format("%019d", checkpointId);
} | 3.26 |
flink_KubernetesCheckpointStoreUtil_nameToCheckpointID_rdh | /**
* Converts a key in ConfigMap to the checkpoint id.
*
* @param key
* in ConfigMap
* @return Checkpoint id parsed from the key
*/
@Override
public long nameToCheckpointID(String key) {
try {
return Long.parseLong(key.substring(CHECKPOINT_ID_KEY_PREFIX.length()));
} catch (NumberFormatException e) {
LOG.warn("Could not parse checkpoint id from {}. This indicates that the " + "checkpoint id to path conversion has changed.", key);
return INVALID_CHECKPOINT_ID;
}
} | 3.26 |
flink_SolutionSetUpdateBarrierBroker_instance_rdh | /**
*
* @return singleton instance
*/
public static Broker<SolutionSetUpdateBarrier> instance() {
return INSTANCE;
} | 3.26 |
flink_WebMonitorEndpoint_grantLeadership_rdh | // -------------------------------------------------------------------------
// LeaderContender
// -------------------------------------------------------------------------
@Override
public void grantLeadership(final UUID leaderSessionID) {
log.info("{} was granted leadership with leaderSessionID={}", getRestBaseUrl(), leaderSessionID);
leaderElection.confirmLeadership(leaderSessionID, getRestBaseUrl());
} | 3.26 |
flink_HiveParallelismInference_infer_rdh | /**
* Infer parallelism by number of files and number of splits. If {@link HiveOptions#TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM} is not set this method does nothing.
*/
HiveParallelismInference infer(SupplierWithException<Integer, IOException> numFiles, SupplierWithException<Integer, IOException> numSplits) {
if (!f0) {
return this;
}
try {
// `createInputSplits` is costly,
// so we try to avoid calling it by first checking the number of files
// which is the lower bound of the number of splits
int lowerBound = logRunningTime("getNumFiles", numFiles);
if (lowerBound >= inferMaxParallelism) {
parallelism = inferMaxParallelism;
return this;
}
int splitNum = logRunningTime("createInputSplits", numSplits);
parallelism = Math.min(splitNum, inferMaxParallelism);
} catch (IOException e) {
throw new FlinkHiveException(e);}
return this;
} | 3.26 |
flink_HiveParallelismInference_limit_rdh | /**
* Apply limit to calculate the parallelism. Here limit is the limit in query <code>
* SELECT * FROM xxx LIMIT [limit]</code>.
*/
int limit(Long
limit) {
if (!f0) {
return parallelism;
}
if (limit != null) {
parallelism = Math.min(parallelism, ((int) (limit / 1000)));
}
// make sure that parallelism is at least 1
return Math.max(1, parallelism);
} | 3.26 |
flink_JMXReporter_m0_rdh | // ------------------------------------------------------------------------
// adding / removing metrics
// ------------------------------------------------------------------------
@Override
public void m0(Metric metric, String metricName, MetricGroup group) {
final String domain = generateJmxDomain(metricName, group);
final Hashtable<String, String> table = generateJmxTable(group.getAllVariables());
AbstractBean jmxMetric;ObjectName v3;
try {
v3 = new
ObjectName(domain, table);
} catch (MalformedObjectNameException e) {
/* There is an implementation error on our side if this occurs. Either the domain was
modified and no longer conforms to the JMX domain rules or the table wasn't properly
generated.
*/
LOG.debug("Implementation error. The domain or table does not conform to JMX rules.", e);
return;
}
switch (metric.getMetricType()) {
case GAUGE :jmxMetric = new JmxGauge(((Gauge<?>) (metric)));
break;
case COUNTER :
jmxMetric = new JmxCounter(((Counter) (metric)));
break;
case HISTOGRAM :
jmxMetric
= new JmxHistogram(((Histogram) (metric)));
break;
case METER :
jmxMetric = new JmxMeter(((Meter) (metric)));
break;
default :
LOG.error("Cannot add unknown metric type: {}. This indicates that the metric type " + "is not supported by this reporter.",
metric.getClass().getName());
return;
}
try {
synchronized(this) {
mBeanServer.registerMBean(jmxMetric, v3);
registeredMetrics.put(metric, v3);
}
} catch (NotCompliantMBeanException e) {
// implementation error on our side
LOG.debug("Metric did not comply with JMX MBean rules.", e);
} catch (InstanceAlreadyExistsException e) {
LOG.warn(("A metric with the name " + v3) + " was already registered.", e);
} catch (Throwable t) {
LOG.warn("Failed to register metric", t);
}
} | 3.26 |
flink_JMXReporter_open_rdh | // ------------------------------------------------------------------------
// life cycle
// ------------------------------------------------------------------------
@Override
public void open(MetricConfig config) {
} | 3.26 |
flink_JMXReporter_replaceInvalidChars_rdh | /**
* Lightweight method to replace unsupported characters. If the string does not contain any
* unsupported characters, this method creates no new string (and in fact no new objects at
* all).
*
* <p>Replacements:
*
* <ul>
* <li>{@code "} is removed
* <li>{@code space} is replaced by {@code _} (underscore)
* <li>{@code , = ; : ? ' *} are replaced by {@code -} (hyphen)
* </ul>
*/
static String replaceInvalidChars(String str) {
char[] chars = null;
final int v8 = str.length();
int pos = 0;
for (int i = 0; i < v8; i++) {
final char c = str.charAt(i);
switch (c) {
case '>' :
case '<' :
case '"' :
// remove character by not moving cursor
if (chars == null) {
chars = str.toCharArray();
}
break;
case ' ' :
if (chars == null) {
chars = str.toCharArray();
}
chars[pos++] = '_';
break;case ',' :
case '=' :
case ';' :
case ':' :
case '?' :
case '\'' :
case
'*' :
if (chars == null) {
chars = str.toCharArray();
}chars[pos++] = '-';
break; default :
if (chars != null) {
chars[pos] = c;
}
pos++;
}
}
return chars == null ? str : new String(chars, 0, pos);
} | 3.26 |
flink_JMXReporter_generateJmxTable_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
static Hashtable<String, String> generateJmxTable(Map<String,
String> variables) {
Hashtable<String, String> ht = new Hashtable<>(variables.size());for (Map.Entry<String, String> v6 : variables.entrySet()) {
ht.put(replaceInvalidChars(v6.getKey()), replaceInvalidChars(v6.getValue()));
}
return ht;
} | 3.26 |
flink_WriterProperties_getPendingFileRecoverableSerializer_rdh | /**
*
* @return the serializer for the {@link InProgressFileWriter.PendingFileRecoverable}.
*/
public SimpleVersionedSerializer<InProgressFileWriter.PendingFileRecoverable> getPendingFileRecoverableSerializer() {
return pendingFileRecoverableSerializer;
} | 3.26 |
flink_WriterProperties_getInProgressFileRecoverableSerializer_rdh | /**
*
* @return the serializer for the {@link InProgressFileWriter.InProgressFileRecoverable}.
*/
public SimpleVersionedSerializer<InProgressFileWriter.InProgressFileRecoverable> getInProgressFileRecoverableSerializer() {
return inProgressFileRecoverableSerializer;
} | 3.26 |
flink_RestClusterClient_requestJobStatus_rdh | // -------------------------------------------------------------------------
// RestClient Helper
// -------------------------------------------------------------------------
private CompletableFuture<JobStatus> requestJobStatus(JobID jobId) {
final JobStatusInfoHeaders jobStatusInfoHeaders = JobStatusInfoHeaders.getInstance();
final JobMessageParameters params = new JobMessageParameters();
params.jobPathParameter.resolve(jobId);
return sendRequest(jobStatusInfoHeaders, params).thenApply(JobStatusInfo::getJobStatus).thenApply(jobStatus -> {
if (jobStatus == JobStatus.SUSPENDED) {
throw new JobStateUnknownException(String.format("Job %s is in state SUSPENDED", jobId));
} return jobStatus;
});
} | 3.26 |
flink_RestClusterClient_getClusterOverview_rdh | /**
* Get an overview of the Flink cluster.
*
* @return Future with the {@link ClusterOverviewWithVersion cluster overview}.
*/
public CompletableFuture<ClusterOverviewWithVersion> getClusterOverview()
{
return sendRequest(ClusterOverviewHeaders.getInstance(), EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance());
} | 3.26 |
flink_RestClusterClient_updateJobResourceRequirements_rdh | /**
* Update {@link JobResourceRequirements} of a given job.
*
* @param jobId
* jobId specifies the job for which to change the resource requirements
* @param jobResourceRequirements
* new resource requirements for the provided job
* @return Future which is completed upon successful operation.
*/
public CompletableFuture<Acknowledge> updateJobResourceRequirements(JobID jobId, JobResourceRequirements jobResourceRequirements) {
final JobMessageParameters params = new JobMessageParameters();
params.jobPathParameter.resolve(jobId);
return sendRequest(JobResourcesRequirementsUpdateHeaders.INSTANCE, params, new JobResourceRequirementsBody(jobResourceRequirements)).thenApply(ignored -> Acknowledge.get());
} | 3.26 |
flink_RestClusterClient_m0_rdh | /**
* Requests the job details.
*
* @param jobId
* The job id
* @return Job details
*/
public CompletableFuture<JobDetailsInfo> m0(JobID jobId) {
final JobDetailsHeaders detailsHeaders = JobDetailsHeaders.getInstance();
final JobMessageParameters params = new JobMessageParameters();
params.jobPathParameter.resolve(jobId);
return sendRequest(detailsHeaders, params);
} | 3.26 |
flink_RestClusterClient_pollResourceAsync_rdh | /**
* Creates a {@code CompletableFuture} that polls a {@code AsynchronouslyCreatedResource} until
* its {@link AsynchronouslyCreatedResource#queueStatus() QueueStatus} becomes {@link QueueStatus.Id#COMPLETED COMPLETED}. The future completes with the result of {@link AsynchronouslyCreatedResource#resource()}.
*
* @param resourceFutureSupplier
* The operation which polls for the {@code AsynchronouslyCreatedResource}.
* @param <R>
* The type of the resource.
* @param <A>
* The type of the {@code AsynchronouslyCreatedResource}.
* @return A {@code CompletableFuture} delivering the resource.
*/
private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync(final Supplier<CompletableFuture<A>> resourceFutureSupplier) {
return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>(), 0);} | 3.26 |
flink_RestClusterClient_getWebInterfaceURL_rdh | // ======================================
@Override
public String getWebInterfaceURL() {
try {
return getWebMonitorBaseUrl().get().toString();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.checkInterrupted(e);
LOG.warn("Could not retrieve the web interface URL for the cluster.", e);
return "Unknown address.";
}
} | 3.26 |
flink_Lockable_release_rdh | /**
* Releases lock on this object. If no more locks are acquired on it, this method will return
* true.
*
* @return true if no more locks are acquired
*/
boolean release() {
if
(refCounter <= 0) {
return true;
}
refCounter -= 1;
return refCounter == 0;
} | 3.26 |
flink_ArrowReader_getColumnVectors_rdh | /**
* Gets the column vectors.
*/
public ColumnVector[] getColumnVectors() {
return columnVectors;
} | 3.26 |
flink_ArrowReader_read_rdh | /**
* Read the specified row from underlying Arrow format data.
*/
public RowData read(int rowId) {
f0.setRowId(rowId);
return f0;
} | 3.26 |
flink_ExecNodeMetadataUtil_latestAnnotation_rdh | /**
* Returns the {@link ExecNodeMetadata} annotation of the class with the highest (most recent)
* {@link ExecNodeMetadata#version()}.
*/
public static <T extends ExecNode<?>> ExecNodeMetadata latestAnnotation(Class<T> execNodeClass) {
List<ExecNodeMetadata> sortedAnnotations = extractMetadataFromAnnotation(execNodeClass);
if (sortedAnnotations.isEmpty()) {
return null;
}
sortedAnnotations.sort(Comparator.comparingInt(ExecNodeMetadata::version));
return sortedAnnotations.get(sortedAnnotations.size() - 1);
} | 3.26 |
flink_RetractableTopNFunction_processStateStaled_rdh | // ------------- ROW_NUMBER-------------------------------
private void processStateStaled(Iterator<Map.Entry<RowData,
Long>> sortedMapIterator) throws RuntimeException {// Sync with dataState first
sortedMapIterator.remove();
stateStaledErrorHandle();
} | 3.26 |
flink_RetractableTopNFunction_retractRecordWithoutRowNumber_rdh | /**
* Retract the input record and emit updated records. This works for outputting without
* row_number.
*
* @return true if the input record has been removed from {@link #dataState}.
*/
private boolean retractRecordWithoutRowNumber(SortedMap<RowData, Long> sortedMap, RowData sortKey, RowData inputRow, Collector<RowData> out) throws Exception {
Iterator<Map.Entry<RowData, Long>> iterator = sortedMap.entrySet().iterator();
long nextRank = 1L;// the next rank number, should be in the rank range
boolean findsSortKey = false;
while (iterator.hasNext() && isInRankEnd(nextRank)) {
Map.Entry<RowData, Long> v46 = iterator.next();
RowData key = v46.getKey();
if ((!findsSortKey) &&
key.equals(sortKey)) {
List<RowData> inputs = dataState.get(key);
if (inputs == null) {
processStateStaled(iterator);
} else {
Iterator<RowData> inputIter = inputs.iterator();
while (inputIter.hasNext() && isInRankEnd(nextRank)) {
RowData prevRow = inputIter.next();
if ((!findsSortKey) && equaliser.equals(prevRow, inputRow)) {
collectDelete(out, prevRow, nextRank);
nextRank -=
1;
findsSortKey = true;
inputIter.remove();
} else if (findsSortKey) { if (nextRank == rankEnd) {
collectInsert(out, prevRow, nextRank);
}
}
nextRank
+= 1;
}
if (inputs.isEmpty()) {
dataState.remove(key);
} else {
dataState.put(key, inputs);
} }
} else if (findsSortKey) {
long count = v46.getValue();
// gets the rank of last record with same sortKey
long v52 = (nextRank + count) - 1;
if (v52 < rankEnd) {
nextRank = v52 +
1;
} else {
// sends the record if there is a record recently upgrades to Top-N
int index = Long.valueOf(rankEnd - nextRank).intValue();
List<RowData> v54 = dataState.get(key);if (v54 == null) {
processStateStaled(iterator);
} else {RowData toAdd
= v54.get(index);
collectInsert(out, toAdd);
break;
}
}
} else {
nextRank += v46.getValue();
}
}
return findsSortKey;
} | 3.26 |
flink_RetractableTopNFunction_retractRecordWithRowNumber_rdh | /**
* Retract the input record and emit updated records. This works for outputting with row_number.
*
* @return true if the input record has been removed from {@link #dataState}.
*/
private boolean retractRecordWithRowNumber(SortedMap<RowData, Long> sortedMap, RowData sortKey, RowData inputRow, Collector<RowData> out) throws Exception {
Iterator<Map.Entry<RowData, Long>> iterator = sortedMap.entrySet().iterator();
long currentRank = 0L;
RowData prevRow = null;
boolean findsSortKey = false;
while (iterator.hasNext() && isInRankEnd(currentRank)) {
Map.Entry<RowData, Long> entry = iterator.next();
RowData key = entry.getKey();
if ((!findsSortKey) && key.equals(sortKey))
{
List<RowData> inputs = dataState.get(key);
if (inputs == null) {
processStateStaled(iterator);
} else {
Iterator<RowData> inputIter = inputs.iterator();
while (inputIter.hasNext() && isInRankEnd(currentRank)) {
RowData currentRow = inputIter.next();
if ((!findsSortKey) && equaliser.equals(currentRow, inputRow)) {
prevRow = currentRow;
findsSortKey = true;
inputIter.remove();
} else if (findsSortKey) {
collectUpdateBefore(out, prevRow, currentRank);
collectUpdateAfter(out, currentRow, currentRank);
prevRow = currentRow;
}
currentRank += 1;
}
if (inputs.isEmpty()) {
dataState.remove(key);
} else {
dataState.put(key, inputs);
}
}
} else if (findsSortKey) {
List<RowData> inputs = dataState.get(key);
if (inputs == null) {
processStateStaled(iterator);
} else {
int i = 0;
while ((i < inputs.size()) && isInRankEnd(currentRank)) {RowData v42 = inputs.get(i);
collectUpdateBefore(out, prevRow, currentRank);
collectUpdateAfter(out, v42, currentRank);
prevRow = v42;
currentRank += 1;
i++;
}
}
} else {
currentRank += entry.getValue();
}
}
if (isInRankEnd(currentRank))
{
if ((!findsSortKey) && (null == prevRow)) {
stateStaledErrorHandle();
} else {
// there is no enough elements in Top-N, emit DELETE message for the retract record.
collectDelete(out, prevRow, currentRank);
}
}
return findsSortKey;} | 3.26 |
flink_RetractableTopNFunction_stateStaledErrorHandle_rdh | /**
* Handle state staled error by configured lenient option. If option is true, warning log only,
* otherwise a {@link RuntimeException} will be thrown.
*/
private void stateStaledErrorHandle() {
// Skip the data if it's state is cleared because of state ttl.
if (lenient) {
LOG.warn(STATE_CLEARED_WARN_MSG);
} else {
throw new RuntimeException(STATE_CLEARED_WARN_MSG);
}
} | 3.26 |
flink_IntParser_parseField_rdh | /**
* Static utility to parse a field of type int from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes
* The bytes containing the text data that should be parsed.
* @param startPos
* The offset to start the parsing.
* @param length
* The length of the byte sequence (counting from the offset).
* @return The parsed value.
* @throws NumberFormatException
* Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final int parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, ((char) (0xffff)));
} | 3.26 |
flink_DefaultLeaderElectionService_deregisterDriver_rdh | /**
* Returns the driver as an {@link AutoCloseable} for the sake of closing the driver outside of
* the lock.
*/
@GuardedBy("lock")
private AutoCloseable deregisterDriver() {
Preconditions.checkState(leaderContenderRegistry.isEmpty(), "No contender should be registered when deregistering the driver.");
Preconditions.checkState(leaderElectionDriver != null, "There should be a driver instantiated that's ready to be closed.");
issuedLeaderSessionID = null;
final AutoCloseable v2 = leaderElectionDriver;
leaderElectionDriver = null;
return v2;
} | 3.26 |
flink_DefaultLeaderElectionService_getLeaderSessionID_rdh | /**
* Returns the current leader session ID for the given {@code componentId} or {@code null}, if
* the session wasn't confirmed.
*/
@VisibleForTesting
@Nullable
public UUID
getLeaderSessionID(String componentId) {
synchronized(lock) {
return leaderContenderRegistry.containsKey(componentId) ? confirmedLeaderInformation.forComponentIdOrEmpty(componentId).getLeaderSessionID() : null;
}
} | 3.26 |
flink_RawValueData_fromBytes_rdh | /**
* Creates an instance of {@link RawValueData} from the given byte array.
*/
static <T> RawValueData<T> fromBytes(byte[] bytes) {
return BinaryRawValueData.fromBytes(bytes);
} | 3.26 |
flink_RawValueData_fromObject_rdh | // ------------------------------------------------------------------------------------------
// Constructor Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates an instance of {@link RawValueData} from a Java object.
*/
static <T> RawValueData<T> fromObject(T javaObject) {
return BinaryRawValueData.fromObject(javaObject);
} | 3.26 |
flink_NFAStateSerializer_readObject_rdh | /* Backwards compatible deserializing of NFAStateSerializer. */
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
// the nested serializer will be null if this was read from a savepoint taken with versions
// lower than Flink 1.7; in this case, we explicitly create instance for the nested
// serializer.
if (((versionSerializer == null) || (nodeIdSerializer == null)) || (eventIdSerializer == null)) {
this.versionSerializer = DeweyNumberSerializer.INSTANCE;
this.eventIdSerializer = EventIdSerializer.INSTANCE;
this.nodeIdSerializer = new NodeId.NodeIdSerializer();
}
} | 3.26 |
flink_NFAStateSerializer_getVersionSerializer_rdh | /* Getters for internal serializers to use in NFAStateSerializerSnapshot. */
TypeSerializer<DeweyNumber> getVersionSerializer() {
return versionSerializer;
} | 3.26 |
flink_NFAStateSerializer_m1_rdh | /* De/serialization methods */
private void m1(Queue<ComputationState> states, DataOutputView target) throws IOException {
target.writeInt(states.size());
for (ComputationState computationState : states) {
serializeSingleComputationState(computationState, target);
}
} | 3.26 |
flink_AbstractWritableVector_hasDictionary_rdh | /**
* Returns true if this column has a dictionary.
*/
@Override
public boolean hasDictionary() {
return this.dictionary != null;
} | 3.26 |
flink_AbstractWritableVector_setDictionary_rdh | /**
* Update the dictionary.
*/
@Override
public void setDictionary(Dictionary dictionary) {
this.dictionary = dictionary;
} | 3.26 |
flink_CalciteParser_parseIdentifier_rdh | /**
* Parses a SQL string as an identifier into a {@link SqlIdentifier}.
*
* @param identifier
* a sql string to parse as an identifier
* @return a parsed sql node
* @throws SqlParserException
* if an exception is thrown when parsing the identifier
*/
public SqlIdentifier parseIdentifier(String identifier) throws SqlParserException {
try {
SqlAbstractParserImpl flinkParser = createFlinkParser(identifier);
if (flinkParser instanceof FlinkSqlParserImpl) {
return ((FlinkSqlParserImpl) (flinkParser)).TableApiIdentifier();
} else {
throw new IllegalArgumentException("Unrecognized sql parser type " + flinkParser.getClass().getName());
}
} catch (Exception e) {
throw
new SqlParserException(String.format("Invalid SQL identifier %s.", identifier), e);
}
} | 3.26 |
flink_CalciteParser_parseSqlList_rdh | /**
* Parses a SQL string into a {@link SqlNodeList}. The {@link SqlNodeList} is not yet validated.
*
* @param sql
* a sql string to parse
* @return a parsed sql node list
* @throws SqlParserException
* if an exception is thrown when parsing the statement
* @throws SqlParserEOFException
* if the statement is incomplete
*/
public SqlNodeList parseSqlList(String sql) {
try {
SqlParser parser = SqlParser.create(sql, config);
return parser.parseStmtList();} catch (SqlParseException e) {
if (e.getMessage().contains("Encountered \"<EOF>\"")) {throw
new SqlParserEOFException(e.getMessage(), e);
}
throw new SqlParserException("SQL parse failed. " + e.getMessage(), e);
}
} | 3.26 |
flink_CalciteParser_parseExpression_rdh | /**
* Parses a SQL expression into a {@link SqlNode}. The {@link SqlNode} is not yet validated.
*
* @param sqlExpression
* a SQL expression string to parse
* @return a parsed SQL node
* @throws SqlParserException
* if an exception is thrown when parsing the statement
*/
public SqlNode parseExpression(String sqlExpression) throws SqlParserException {
try {
final SqlParser parser = SqlParser.create(sqlExpression, config);
return parser.parseExpression();
} catch (SqlParseException e) {
throw new SqlParserException("SQL parse failed. " + e.getMessage(), e);
}
} | 3.26 |
flink_CalciteParser_createFlinkParser_rdh | /**
* Equivalent to {@link SqlParser#create(Reader, SqlParser.Config)}. The only difference is we
* do not wrap the {@link FlinkSqlParserImpl} with {@link SqlParser}.
*
* <p>It is so that we can access specific parsing methods not accessible through the {@code SqlParser}.
*/
private SqlAbstractParserImpl createFlinkParser(String expr) {
SourceStringReader reader = new SourceStringReader(expr);
SqlAbstractParserImpl parser = config.parserFactory().getParser(reader);
parser.setTabSize(1);
parser.setQuotedCasing(config.quotedCasing());
parser.setUnquotedCasing(config.unquotedCasing());
parser.setIdentifierMaxLength(config.identifierMaxLength());
parser.setConformance(config.conformance());
switch (config.quoting()) {case DOUBLE_QUOTE :
parser.switchTo(LexicalState.DQID);
break;
case BACK_TICK :
parser.switchTo(LexicalState.BTID);
break;
case BRACKET :
parser.switchTo(LexicalState.DEFAULT);
break;}
return parser;
} | 3.26 |
flink_CalciteParser_parse_rdh | /**
* Parses a SQL statement into a {@link SqlNode}. The {@link SqlNode} is not yet validated.
*
* @param sql
* a sql string to parse
* @return a parsed sql node
* @throws SqlParserException
* if an exception is thrown when parsing the statement
* @throws SqlParserEOFException
* if the statement is incomplete
*/
public SqlNode parse(String sql) {
try {
SqlParser parser = SqlParser.create(sql, config);
return parser.parseStmt();
} catch (SqlParseException
e)
{
if (e.getMessage().contains("Encountered \"<EOF>\"")) {
throw new SqlParserEOFException(e.getMessage(), e);
}
throw new SqlParserException("SQL parse failed. " + e.getMessage(), e);
}
} | 3.26 |
flink_KvStateRegistry_getKvStateRegistryListener_rdh | // ------------------------------------------------------------------------
// Internal methods
// ------------------------------------------------------------------------
private KvStateRegistryListener getKvStateRegistryListener(JobID jobId) {
// first check whether we are running the legacy code which registers
// a single listener under HighAvailabilityServices.DEFAULT_JOB_ID
KvStateRegistryListener listener = f1.get(HighAvailabilityServices.DEFAULT_JOB_ID);
if (listener == null) {
listener = f1.get(jobId);
}
return listener;
} | 3.26 |
flink_KvStateRegistry_m0_rdh | /**
* Unregisters the listener with the registry.
*
* @param jobId
* for which to unregister the {@link KvStateRegistryListener}
*/public void m0(JobID jobId) {
f1.remove(jobId);
} | 3.26 |
flink_KvStateRegistry_registerListener_rdh | /**
* Registers a listener with the registry.
*
* @param jobId
* identifying the job for which to register a {@link KvStateRegistryListener}
* @param listener
* The registry listener.
* @throws IllegalStateException
* If there is a registered listener
*/
public void registerListener(JobID jobId,
KvStateRegistryListener listener) {
final KvStateRegistryListener previousValue = f1.putIfAbsent(jobId, listener);
if (previousValue != null) {
throw new
IllegalStateException(("Listener already registered under " + jobId) + '.');
}
} | 3.26 |
flink_KvStateRegistry_unregisterKvState_rdh | /**
* Unregisters the KvState instance identified by the given KvStateID.
*
* @param jobId
* JobId the KvState instance belongs to
* @param kvStateId
* KvStateID to identify the KvState instance
* @param keyGroupRange
* Key group range the KvState instance belongs to
*/
public void unregisterKvState(JobID jobId, JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, KvStateID kvStateId) {
KvStateEntry<?, ?, ?> entry = f0.remove(kvStateId);
if (entry != null) {
entry.clear();
final KvStateRegistryListener listener = getKvStateRegistryListener(jobId);
if (listener != null) {
listener.notifyKvStateUnregistered(jobId, jobVertexId, keyGroupRange, registrationName);}
}
}
/**
* Returns the {@link KvStateEntry} containing the requested instance as identified by the given
* KvStateID, along with its {@link KvStateInfo} or <code>null</code> if none is registered.
*
* @param kvStateId
* KvStateID to identify the KvState instance
* @return The {@link KvStateEntry} | 3.26 |
flink_KvStateRegistry_registerKvState_rdh | /**
* Registers the KvState instance and returns the assigned ID.
*
* @param jobId
* JobId the KvState instance belongs to
* @param jobVertexId
* JobVertexID the KvState instance belongs to
* @param keyGroupRange
* Key group range the KvState instance belongs to
* @param registrationName
* Name under which the KvState is registered
* @param kvState
* KvState instance to be registered
* @return Assigned KvStateID
*/
public KvStateID registerKvState(JobID jobId, JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, InternalKvState<?, ?, ?> kvState, ClassLoader userClassLoader) {KvStateID kvStateId = new KvStateID();if
(f0.putIfAbsent(kvStateId, new KvStateEntry<>(kvState, userClassLoader)) == null) {
final KvStateRegistryListener listener = getKvStateRegistryListener(jobId);
if (listener != null) {
listener.notifyKvStateRegistered(jobId, jobVertexId, keyGroupRange, registrationName, kvStateId);
}
return kvStateId;} else {
throw new IllegalStateException(((("State \"" + registrationName) + " \"(id=") + kvStateId) + ") appears registered although it should not.");
}
} | 3.26 |
flink_KvStateRegistry_createTaskRegistry_rdh | // ------------------------------------------------------------------------
/**
* Creates a {@link TaskKvStateRegistry} facade for the {@link Task} identified by the given
* JobID and JobVertexID instance.
*
* @param jobId
* JobID of the task
* @param jobVertexId
* JobVertexID of the task
* @return A {@link TaskKvStateRegistry} facade for the task
*/
public TaskKvStateRegistry createTaskRegistry(JobID jobId, JobVertexID jobVertexId) {
return new TaskKvStateRegistry(this, jobId, jobVertexId);} | 3.26 |
flink_ResourceGuard_getLeaseCount_rdh | /**
* Returns the current count of open leases.
*/
public int getLeaseCount() {
return leaseCount;
} | 3.26 |
flink_ResourceGuard_m0_rdh | /**
* Closed the resource guard. This method will block until all calls to {@link #acquireResource()} have seen their matching call to {@link #releaseResource()}.
*/
@Override
public void m0() {
closeUninterruptibly();
} | 3.26 |
flink_ResourceGuard_releaseResource_rdh | /**
* Releases access for one client of the guarded resource. This method must only be called after
* a matching call to {@link #acquireResource()}.
*/
private void releaseResource() {
synchronized(lock) {
--leaseCount;
if (closed && (leaseCount == 0)) {
lock.notifyAll();
}
}
} | 3.26 |
flink_ResourceGuard_isClosed_rdh | /**
* Returns true if the resource guard is closed, i.e. after {@link #close()} was called.
*/
public boolean isClosed() {
return closed;
} | 3.26 |
flink_ResourceGuard_acquireResource_rdh | /**
* Acquired access from one new client for the guarded resource.
*
* @throws IOException
* when the resource guard is already closed.
*/public Lease acquireResource() throws IOException {
synchronized(lock) {
if (closed) {
throw new IOException("Resource guard was already closed.");
}
++leaseCount;
}
return new Lease();
} | 3.26 |
flink_ResourceGuard_closeUninterruptibly_rdh | /**
* If the current thread is {@linkplain Thread#interrupt interrupted} while waiting for the
* close method to complete, then it will continue to wait. When the thread does return from
* this method its interrupt status will be set.
*/
@SuppressWarnings("WeakerAccess")
public void closeUninterruptibly() {
boolean interrupted = false;
synchronized(lock) {
closed = true;
while (leaseCount > 0) {
try {
lock.wait();
} catch (InterruptedException e) {
interrupted = true;
}
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
} | 3.26 |
flink_ArrowUtils_createRowDataArrowWriter_rdh | /**
* Creates an {@link ArrowWriter} for the specified {@link VectorSchemaRoot}.
*/
public static ArrowWriter<RowData> createRowDataArrowWriter(VectorSchemaRoot root, RowType rowType) {
ArrowFieldWriter<RowData>[] fieldWriters = new ArrowFieldWriter[root.getFieldVectors().size()];
List<FieldVector> vectors = root.getFieldVectors();
for (int i = 0; i < vectors.size(); i++) {
FieldVector vector = vectors.get(i);
vector.allocateNew();
fieldWriters[i] = createArrowFieldWriterForRow(vector, rowType.getTypeAt(i));
}
return new ArrowWriter<>(root, fieldWriters);
} | 3.26 |
flink_ArrowUtils_toArrowSchema_rdh | /**
* Returns the Arrow schema of the specified type.
*/
public static Schema toArrowSchema(RowType rowType) {
Collection<Field> fields = rowType.getFields().stream().map(f -> ArrowUtils.toArrowField(f.getName(), f.getType())).collect(Collectors.toCollection(ArrayList::new));
return new Schema(fields);
} | 3.26 |
flink_ArrowUtils_collectAsPandasDataFrame_rdh | /**
* Convert Flink table to Pandas DataFrame.
*/
public static CustomIterator<byte[]> collectAsPandasDataFrame(Table table, int maxArrowBatchSize) throws Exception {
checkArrowUsable();
BufferAllocator allocator = getRootAllocator().newChildAllocator("collectAsPandasDataFrame", 0, Long.MAX_VALUE);
RowType rowType = ((RowType) (table.getResolvedSchema().toSourceRowDataType().getLogicalType()));
DataType defaultRowDataType = TypeConversions.fromLogicalToDataType(rowType);
VectorSchemaRoot root = VectorSchemaRoot.create(ArrowUtils.toArrowSchema(rowType), allocator);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ArrowStreamWriter arrowStreamWriter = new ArrowStreamWriter(root, null, baos);
arrowStreamWriter.start();
Iterator<Row> v57 = table.execute().collect();
Iterator<Row> appendOnlyResults;
if (isAppendOnlyTable(table)) {
appendOnlyResults = v57;
} else {
appendOnlyResults = filterOutRetractRows(v57); }
ArrowWriter arrowWriter = createRowDataArrowWriter(root, rowType);
Iterator convertedResults = new Iterator<RowData>() {
@Overridepublic boolean hasNext() {
return appendOnlyResults.hasNext();
}
@Override
public RowData next() {
DataFormatConverters.DataFormatConverter v61 = DataFormatConverters.getConverterForDataType(defaultRowDataType);
return ((RowData) (v61.toInternal(appendOnlyResults.next())));
}
};
return new CustomIterator<byte[]>() {
@Override
public boolean
hasNext() {
return convertedResults.hasNext();
}
@Override
public byte[] next() {
try {
int i = 0;
while (convertedResults.hasNext() && (i < maxArrowBatchSize)) {
i++;
arrowWriter.write(convertedResults.next());}
arrowWriter.finish();
arrowStreamWriter.writeBatch();
return baos.toByteArray();
} catch (Throwable t) {
String msg = "Failed to serialize the data of the table";
LOG.error(msg, t);
throw new RuntimeException(msg,
t);
} finally {
arrowWriter.reset();
baos.reset();
if (!hasNext()) {
root.close();
allocator.close();
}
}
}};
} | 3.26 |
flink_ArrowUtils_readFully_rdh | /**
* Fills a buffer with data read from the channel.
*/
private static void readFully(ReadableByteChannel channel, ByteBuffer dst) throws IOException {
int expected = dst.remaining();
while (dst.hasRemaining()) {
if (channel.read(dst) <
0) {
throw new EOFException(String.format("Not enough bytes in channel (expected %d).", expected));
}
}
} | 3.26 |
flink_ArrowUtils_createArrowReader_rdh | /**
* Creates an {@link ArrowReader} for the specified {@link VectorSchemaRoot}.
*/
public static ArrowReader createArrowReader(VectorSchemaRoot root, RowType rowType)
{
List<ColumnVector> columnVectors = new ArrayList<>();
List<FieldVector> fieldVectors = root.getFieldVectors();
for (int i
= 0; i < fieldVectors.size(); i++) {
columnVectors.add(createColumnVector(fieldVectors.get(i), rowType.getTypeAt(i)));
}
return new ArrowReader(columnVectors.toArray(new ColumnVector[0]));
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.