name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Tuple23_equals_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Overridepublic boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple23)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple23 tuple = ((Tuple23) (o));
if (f0 !=
null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if
(f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null)
{
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null)
{
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false; }
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if
(f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null)
{
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
if (f20 != null ? !f20.equals(tuple.f20) : tuple.f20 != null) {
return false;
}
if (f21 != null ? !f21.equals(tuple.f21) : tuple.f21 != null) {
return false;
}
if (f22 != null ? !f22.equals(tuple.f22) : tuple.f22 != null) {
return false;
}
return true;
}
| 3.26 |
flink_Tuple23_toString_rdh
|
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22), where the individual
* fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",")
+ StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",")
+ StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ",")
+ StringUtils.arrayAwareToString(this.f19)) + ",") + StringUtils.arrayAwareToString(this.f20)) + ",") + StringUtils.arrayAwareToString(this.f21)) + ",") + StringUtils.arrayAwareToString(this.f22)) + ")";
}
| 3.26 |
flink_Tuple23_of_rdh
|
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17,
T18, T19,
T20, T21, T22> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20, T21 f21, T22 f22) {
return new Tuple23<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10,
f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22);}
| 3.26 |
flink_Tuple23_setFields_rdh
|
/**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
* @param f14
* The value for field 14
* @param f15
* The value for field 15
* @param f16
* The value for field 16
* @param f17
* The value for field 17
* @param f18
* The value for field 18
* @param f19
* The value for field 19
* @param f20
* The value for field 20
* @param f21
* The value for field 21
* @param f22
* The value for field 22
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3
f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15
f15,
T16 f16, T17 f17, T18 f18, T19 f19, T20 f20, T21 f21, T22 f22) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
this.f18 = f18;
this.f19 = f19;
this.f20 = f20;
this.f21 = f21;
this.f22 = f22;
}
| 3.26 |
flink_Tuple23_copy_rdh
|
/**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple23<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> copy() {return new Tuple23<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19, this.f20, this.f21, this.f22);
}
| 3.26 |
flink_SlideWithSizeAndSlide_on_rdh
|
/**
* Specifies the time attribute on which rows are grouped.
*
* <p>For streaming tables you can specify grouping by a event-time or processing-time
* attribute.
*
* <p>For batch tables you can specify grouping on a timestamp or long attribute.
*
* @param timeField
* time attribute for streaming and batch tables
* @return a tumbling window on event-time
*/public SlideWithSizeAndSlideOnTime on(Expression timeField) {
return
new SlideWithSizeAndSlideOnTime(timeField, size, slide);
}
| 3.26 |
flink_RestServerEndpoint_createUploadDir_rdh
|
/**
* Creates the upload dir if needed.
*/
static void createUploadDir(final Path uploadDir, final Logger log, final boolean initialCreation) throws IOException {
if (!Files.exists(uploadDir)) {
if (initialCreation) {
log.info("Upload directory {} does not exist. ", uploadDir);
} else {
log.warn("Upload directory {} has been deleted externally. " + "Previously uploaded files are no longer available.", uploadDir);
}
checkAndCreateUploadDir(uploadDir, log);
}
}
| 3.26 |
flink_RestServerEndpoint_shutDownInternal_rdh
|
/**
* Stops this REST server endpoint.
*
* @return Future which is completed once the shut down has been finished.
*/
protected CompletableFuture<Void> shutDownInternal() {
synchronized(lock) {
CompletableFuture<?> channelFuture = new CompletableFuture<>();
if (serverChannel !=
null) {
serverChannel.close().addListener(finished -> {if (finished.isSuccess()) {channelFuture.complete(null);
} else {channelFuture.completeExceptionally(finished.cause());
}
});
serverChannel = null;
}
final CompletableFuture<Void> channelTerminationFuture = new CompletableFuture<>();
channelFuture.thenRun(() -> {
CompletableFuture<?> groupFuture = new CompletableFuture<>();
CompletableFuture<?> childGroupFuture = new CompletableFuture<>();
final Time gracePeriod = Time.seconds(10L);
if (bootstrap != null) {
final ServerBootstrapConfig config = bootstrap.config();
final EventLoopGroup group = config.group();
if (group != null) {
group.shutdownGracefully(0L, gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS).addListener(finished -> {
if (finished.isSuccess()) {
groupFuture.complete(null);} else {
groupFuture.completeExceptionally(finished.cause());
}
});
} else {
groupFuture.complete(null);
}
final EventLoopGroup v26 = config.childGroup();
if (v26 != null) {
v26.shutdownGracefully(0L, gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS).addListener(finished -> {
if (finished.isSuccess()) {
childGroupFuture.complete(null);} else
{
childGroupFuture.completeExceptionally(finished.cause());
}
});
} else {
childGroupFuture.complete(null);
}
bootstrap = null;
} else {
// complete the group futures since there is nothing to stop
groupFuture.complete(null);
childGroupFuture.complete(null);
}
CompletableFuture<Void> combinedFuture = FutureUtils.completeAll(Arrays.asList(groupFuture, childGroupFuture));
combinedFuture.whenComplete((Void ignored,Throwable throwable) -> {
if (throwable != null) {
channelTerminationFuture.completeExceptionally(throwable);
} else {
channelTerminationFuture.complete(null);
}});
});
return channelTerminationFuture;
}
}
| 3.26 |
flink_RestServerEndpoint_start_rdh
|
/**
* Starts this REST server endpoint.
*
* @throws Exception
* if we cannot start the RestServerEndpoint
*/
public final void start() throws Exception {
synchronized(lock) {
Preconditions.checkState(state == State.CREATED, "The RestServerEndpoint cannot be restarted.");
log.info("Starting rest endpoint.");
final Router
router = new Router();
final CompletableFuture<String> restAddressFuture = new CompletableFuture<>();
handlers = initializeHandlers(restAddressFuture);
/* sort the handlers such that they are ordered the following:
/jobs
/jobs/overview
/jobs/:jobid
/jobs/:jobid/config
/:*
*/
Collections.sort(handlers,
RestHandlerUrlComparator.INSTANCE);
checkAllEndpointsAndHandlersAreUnique(handlers);
handlers.forEach(handler -> registerHandler(router, handler, log));
ChannelInitializer<SocketChannel> initializer = new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws ConfigurationException {
RouterHandler handler = new RouterHandler(router, responseHeaders);
// SSL should be the first handler in the pipeline
if (isHttpsEnabled()) {
ch.pipeline().addLast("ssl", new RedirectingSslHandler(restAddress, restAddressFuture, sslHandlerFactory));
}
ch.pipeline().addLast(new HttpServerCodec()).addLast(new FileUploadHandler(uploadDir)).addLast(new FlinkHttpObjectAggregator(maxContentLength, responseHeaders));
for (InboundChannelHandlerFactory factory : inboundChannelHandlerFactories) {
Optional<ChannelHandler> channelHandler = factory.createHandler(f0, responseHeaders);
if (channelHandler.isPresent()) {
ch.pipeline().addLast(channelHandler.get());
}
}
ch.pipeline().addLast(new ChunkedWriteHandler()).addLast(handler.getName(),
handler).addLast(new PipelineErrorHandler(log, responseHeaders));
}
};
NioEventLoopGroup bossGroup = new NioEventLoopGroup(1, new ExecutorThreadFactory("flink-rest-server-netty-boss"));
NioEventLoopGroup workerGroup = new NioEventLoopGroup(0, new ExecutorThreadFactory("flink-rest-server-netty-worker"));
bootstrap = new ServerBootstrap();
bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).childHandler(initializer);
Iterator<Integer> portsIterator;
try {
portsIterator = NetUtils.getPortRangeFromString(restBindPortRange);
} catch (IllegalConfigurationException e) {
throw e;
} catch
(Exception e) {
throw new IllegalArgumentException("Invalid port range definition: " + restBindPortRange);
}
int chosenPort = 0;
while (portsIterator.hasNext()) {
try {
chosenPort = portsIterator.next();
final ChannelFuture v14;
if (restBindAddress == null) {
v14 = bootstrap.bind(chosenPort);
} else {v14 = bootstrap.bind(restBindAddress,
chosenPort);
} serverChannel = v14.syncUninterruptibly().channel();
break;
} catch (final Exception e) {
// syncUninterruptibly() throws checked exceptions via Unsafe
// continue if the exception is due to the port being in use, fail early
// otherwise
if (!(e instanceof BindException)) {
throw e;
}
}
}
if (serverChannel == null) {
throw new BindException("Could not start rest endpoint on any port in port range " + restBindPortRange);
}
log.debug("Binding rest endpoint to {}:{}.", restBindAddress, chosenPort);
final InetSocketAddress bindAddress = ((InetSocketAddress) (serverChannel.localAddress()));
final String advertisedAddress;
if (bindAddress.getAddress().isAnyLocalAddress()) {
advertisedAddress = this.restAddress;
} else {
advertisedAddress = bindAddress.getAddress().getHostAddress();
}
port = bindAddress.getPort();
log.info("Rest endpoint listening at {}:{}", advertisedAddress, port);
restBaseUrl = new URL(determineProtocol(), advertisedAddress, port, "").toString();
restAddressFuture.complete(restBaseUrl);
state = State.RUNNING;
startInternal();
}
}
| 3.26 |
flink_RestServerEndpoint_getServerAddress_rdh
|
/**
* Returns the address on which this endpoint is accepting requests.
*
* @return address on which this endpoint is accepting requests or null if none
*/
@Nullable
public InetSocketAddress getServerAddress() {
synchronized(lock) {
assertRestServerHasBeenStarted();
Channel server =
this.serverChannel;
if (server != null) {
try {return ((InetSocketAddress) (server.localAddress()));
} catch (Exception e) {
log.error("Cannot access local server address", e);
}
}
return null;
}
}
| 3.26 |
flink_RestServerEndpoint_checkAndCreateUploadDir_rdh
|
/**
* Checks whether the given directory exists and is writable. If it doesn't exist, this method
* will attempt to create it.
*
* @param uploadDir
* directory to check
* @param log
* logger used for logging output
* @throws IOException
* if the directory does not exist and cannot be created, or if the
* directory isn't writable
*/
private static synchronized void checkAndCreateUploadDir(final Path uploadDir, final Logger log) throws IOException {
if (Files.exists(uploadDir) && Files.isWritable(uploadDir)) {
log.info("Using directory {} for file uploads.", uploadDir);
} else
if (Files.isWritable(Files.createDirectories(uploadDir))) {
log.info("Created directory {} for file uploads.", uploadDir);
} else {
log.warn("Upload directory {} cannot be created or is not writable.", uploadDir);
throw new IOException(String.format("Upload directory %s cannot be created or is not writable.",
uploadDir));
}
}
| 3.26 |
flink_GroupReduceDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<GroupReduceFunction<IT, OT>, OT> context) {
this.taskContext = context;
this.running = true;
}
| 3.26 |
flink_ArrowSerializer_createArrowWriter_rdh
|
/**
* Creates an {@link ArrowWriter}.
*/
public ArrowWriter<RowData> createArrowWriter() {
return ArrowUtils.createRowDataArrowWriter(rootWriter, inputType);
}
| 3.26 |
flink_ArrowSerializer_finishCurrentBatch_rdh
|
/**
* Forces to finish the processing of the current batch of elements. It will serialize the batch
* of elements into one arrow batch.
*/
public void finishCurrentBatch() throws
Exception {
arrowWriter.finish();
arrowStreamWriter.writeBatch();
arrowWriter.reset();
}
| 3.26 |
flink_IntervalJoinOperator_sideOutput_rdh
|
/**
* Write skipped late arriving element to SideOutput.
*/
protected <T> void sideOutput(T value, long timestamp, boolean isLeft) {
if (isLeft) {
if (leftLateDataOutputTag
!= null) {
output.collect(leftLateDataOutputTag, new StreamRecord<>(((T1) (value)), timestamp));
}
} else if (rightLateDataOutputTag != null) {
output.collect(rightLateDataOutputTag, new StreamRecord<>(((T2) (value)), timestamp));
}
}
| 3.26 |
flink_IntervalJoinOperator_processElement1_rdh
|
/**
* Process a {@link StreamRecord} from the left stream. Whenever an {@link StreamRecord} arrives
* at the left stream, it will get added to the left buffer. Possible join candidates for that
* element will be looked up from the right buffer and if the pair lies within the user defined
* boundaries, it gets passed to the {@link ProcessJoinFunction}.
*
* @param record
* An incoming record to be joined
* @throws Exception
* Can throw an Exception during state access
*/
@Override
public void processElement1(StreamRecord<T1> record) throws Exception {
processElement(record, leftBuffer, rightBuffer, f0, upperBound, true);
}
/**
* Process a {@link StreamRecord} from the right stream. Whenever a {@link StreamRecord} arrives
* at the right stream, it will get added to the right buffer. Possible join candidates for that
* element will be looked up from the left buffer and if the pair lies within the user defined
* boundaries, it gets passed to the {@link ProcessJoinFunction}
| 3.26 |
flink_DefaultFailureEnricherContext_forGlobalFailure_rdh
|
/**
* Factory method returning a Global failure Context for the given params.
*/
public static Context forGlobalFailure(JobID jobID, String jobName,
MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) {
return new DefaultFailureEnricherContext(jobID, jobName, metricGroup, FailureType.GLOBAL, ioExecutor, classLoader);
}
| 3.26 |
flink_DefaultFailureEnricherContext_forTaskManagerFailure_rdh
|
/**
* Factory method returning a TaskManager failure Context for the given params.
*/
public static Context
forTaskManagerFailure(JobID jobID, String jobName, MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) {
return new DefaultFailureEnricherContext(jobID, jobName, metricGroup, FailureType.TASK_MANAGER, ioExecutor, classLoader);
}
| 3.26 |
flink_VoidNamespaceSerializer_snapshotConfiguration_rdh
|
// -----------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<VoidNamespace> snapshotConfiguration() {
return new VoidNamespaceSerializerSnapshot();
}
| 3.26 |
flink_AbstractInvokable_getEnvironment_rdh
|
// ------------------------------------------------------------------------
// Access to Environment and Configuration
// ------------------------------------------------------------------------
/**
* Returns the environment of this task.
*
* @return The environment of this task.
*/
public final Environment getEnvironment() {
return this.environment;
}
| 3.26 |
flink_AbstractInvokable_triggerCheckpointAsync_rdh
|
// ------------------------------------------------------------------------
// Checkpointing Methods
// ------------------------------------------------------------------------
@Overridepublic CompletableFuture<Boolean> triggerCheckpointAsync(CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) {
throw new UnsupportedOperationException(String.format("triggerCheckpointAsync not supported by %s", this.getClass().getName()));
}
| 3.26 |
flink_AbstractInvokable_getIndexInSubtaskGroup_rdh
|
/**
* Returns the index of this subtask in the subtask group.
*
* @return the index of this subtask in the subtask group
*/
public int getIndexInSubtaskGroup()
{
return this.environment.getTaskInfo().getIndexOfThisSubtask();
}
| 3.26 |
flink_AbstractInvokable_getJobConfiguration_rdh
|
/**
* Returns the job configuration object which was attached to the original {@link org.apache.flink.runtime.jobgraph.JobGraph}.
*
* @return the job configuration object which was attached to the original {@link org.apache.flink.runtime.jobgraph.JobGraph}
*/
public Configuration getJobConfiguration() {
return this.environment.getJobConfiguration();
}
| 3.26 |
flink_AbstractInvokable_getTaskConfiguration_rdh
|
/**
* Returns the task configuration object which was attached to the original {@link org.apache.flink.runtime.jobgraph.JobVertex}.
*
* @return the task configuration object which was attached to the original {@link org.apache.flink.runtime.jobgraph.JobVertex}
*/public final Configuration getTaskConfiguration() {
return this.environment.getTaskConfiguration();
}
| 3.26 |
flink_InputGateSpecUtils_getExclusiveBuffersPerChannel_rdh
|
/**
* Since at least one floating buffer is required, the number of required buffers is reduced by
* 1, and then the average number of buffers per channel is calculated. Returning the minimum
* value to ensure that the number of required buffers per gate is not more than the given
* requiredBuffersPerGate.}.
*/
private static int getExclusiveBuffersPerChannel(int configuredNetworkBuffersPerChannel, int numInputChannels, int requiredBuffersPerGate) {
checkArgument(numInputChannels > 0, "Must be positive.");
checkArgument(requiredBuffersPerGate >= 1, "Require at least 1 buffer per gate.");
return Math.min(configuredNetworkBuffersPerChannel, (requiredBuffersPerGate - 1) / numInputChannels);
}
| 3.26 |
flink_SqlGatewayEndpointFactoryUtils_createEndpointFactoryHelper_rdh
|
/**
* Creates a utility that helps to validate options for a {@link SqlGatewayEndpointFactory}.
*
* <p>Note: This utility checks for left-over options in the final step.
*/
public static EndpointFactoryHelper createEndpointFactoryHelper(SqlGatewayEndpointFactory endpointFactory, SqlGatewayEndpointFactory.Context context) {
return new EndpointFactoryHelper(endpointFactory, context.getEndpointOptions());
}
// ----------------------------------------------------------------------------------------
/**
* Helper utility for validating all options for a {@link SqlGatewayEndpointFactory}
| 3.26 |
flink_BlobUtils_createBlobStoreFromConfig_rdh
|
/**
* Creates a BlobStore based on the parameters set in the configuration.
*
* @param config
* configuration to use
* @return a (distributed) blob store for high availability
* @throws IOException
* thrown if the (distributed) file storage cannot be created
*/
public static BlobStoreService createBlobStoreFromConfig(Configuration config) throws IOException {if (HighAvailabilityMode.isHighAvailabilityModeActivated(config)) {
return createFileSystemBlobStore(config);
} else {
return new VoidBlobStore();
}
}
| 3.26 |
flink_BlobUtils_moveTempFileToStore_rdh
|
/**
* Moves the temporary <tt>incomingFile</tt> to its permanent location where it is available for
* use (not thread-safe!).
*
* @param incomingFile
* temporary file created during transfer
* @param jobId
* ID of the job this blob belongs to or <tt>null</tt> if job-unrelated
* @param blobKey
* BLOB key identifying the file
* @param storageFile
* (local) file where the blob is/should be stored
* @param log
* logger for debug information
* @param blobStore
* HA store (or <tt>null</tt> if unavailable)
* @throws IOException
* thrown if an I/O error occurs while moving the file or uploading it to
* the HA store
*/
static void moveTempFileToStore(File incomingFile, @Nullable
JobID jobId, BlobKey blobKey, File storageFile, Logger log, @Nullable
BlobStore blobStore) throws IOException {
internalMoveTempFileToStore(incomingFile, jobId, blobKey, storageFile, log, blobStore, (source, target) -> Files.move(source.toPath(), target.toPath()));
}
| 3.26 |
flink_BlobUtils_getStorageLocationPath_rdh
|
/**
* Returns the path for the given blob key.
*
* <p>The returned path can be used with the (local or HA) BLOB store file system back-end for
* recovery purposes and follows the same scheme as {@link #getStorageLocation(File, JobID,
* BlobKey)}.
*
* @param storageDir
* storage directory used be the BLOB service
* @param key
* the key identifying the BLOB
* @param jobId
* ID of the job for the incoming files
* @return the path to the given BLOB
*/
static String getStorageLocationPath(String storageDir, @Nullable
JobID jobId,
BlobKey key) {
if (jobId == null) {
// format: $base/no_job/blob_$key
return String.format("%s/%s/%s%s", storageDir, NO_JOB_DIR_PREFIX, BLOB_FILE_PREFIX, key.toString());
} else {
// format: $base/job_$jobId/blob_$key
return String.format("%s/%s%s/%s%s", storageDir, JOB_DIR_PREFIX, jobId.toString(), BLOB_FILE_PREFIX, key.toString());
}
}
| 3.26 |
flink_BlobUtils_writeLength_rdh
|
/**
* Auxiliary method to write the length of an upcoming data chunk to an output stream.
*
* @param length
* the length of the upcoming data chunk in bytes
* @param outputStream
* the output stream to write the length to
* @throws IOException
* thrown if an I/O error occurs while writing to the output stream
*/
static void writeLength(int length,
OutputStream outputStream) throws IOException {
byte[] buf = new byte[4];
buf[0] = ((byte) (length & 0xff));
buf[1] = ((byte)
((length >> 8) & 0xff));
buf[2] = ((byte) ((length >> 16) & 0xff));
buf[3] = ((byte) ((length >> 24) & 0xff));
outputStream.write(buf, 0, 4);
}
| 3.26 |
flink_BlobUtils_readFully_rdh
|
/**
* Auxiliary method to read a particular number of bytes from an input stream. This method
* blocks until the requested number of bytes have been read from the stream. If the stream
* cannot offer enough data, an {@link EOFException} is thrown.
*
* @param inputStream
* The input stream to read the data from.
* @param buf
* The buffer to store the read data.
* @param off
* The offset inside the buffer.
* @param len
* The number of bytes to read from the stream.
* @param type
* The name of the type, to throw a good error message in case of not enough data.
* @throws IOException
* Thrown if I/O error occurs while reading from the stream or the stream
* cannot offer enough data.
*/
static void readFully(InputStream inputStream, byte[] buf, int off, int len, String type) throws IOException {
int bytesRead = 0;
while (bytesRead < len) {
final int read = inputStream.read(buf, off + bytesRead, len - bytesRead);
if (read
< 0) {
throw new EOFException("Received an incomplete " + type);
}
bytesRead += read;
}
}
| 3.26 |
flink_BlobUtils_createBlobServer_rdh
|
/**
* Creates the {@link BlobServer} from the given configuration, fallback storage directory and
* blob store.
*
* @param configuration
* for the BlobServer
* @param fallbackStorageDirectory
* fallback storage directory that is used if no other directory
* has been explicitly configured
* @param blobStore
* blob store to use for this blob server
* @return new blob server instance
* @throws IOException
* if we could not create the blob storage directory
*/
public static BlobServer createBlobServer(Configuration configuration, Reference<File> fallbackStorageDirectory, BlobStore blobStore) throws IOException {
final
Reference<File> storageDirectory = createBlobStorageDirectory(configuration, fallbackStorageDirectory);
return new BlobServer(configuration, storageDirectory, blobStore);
}
| 3.26 |
flink_BlobUtils_readExceptionFromStream_rdh
|
/**
* Reads exception from given {@link InputStream}.
*
* @param in
* the input stream to read from
* @return exception that was read
* @throws IOException
* thrown if an I/O error occurs while reading from the input stream
*/
static Throwable readExceptionFromStream(InputStream in) throws IOException {
int len = readLength(in);
byte[] bytes = new byte[len];
readFully(in, bytes, 0, len, "Error message");
try {
return ((Throwable) (InstantiationUtil.deserializeObject(bytes, ClassLoader.getSystemClassLoader())));
} catch (ClassNotFoundException e) {
// should never occur
throw new IOException("Could not transfer error message", e);
}
}
| 3.26 |
flink_BlobUtils_createBlobCacheService_rdh
|
/**
* Creates the {@link BlobCacheService} from the given configuration, fallback storage
* directory, blob view and blob server address.
*
* @param configuration
* for the BlobCacheService
* @param fallbackStorageDirectory
* fallback storage directory
* @param blobView
* blob view
* @param serverAddress
* blob server address
* @return new blob cache service instance
* @throws IOException
* if we could not create the blob storage directory
*/
public static BlobCacheService createBlobCacheService(Configuration configuration, Reference<File> fallbackStorageDirectory, BlobView blobView, @Nullable
InetSocketAddress serverAddress) throws IOException {
final Reference<File> storageDirectory = createBlobStorageDirectory(configuration, fallbackStorageDirectory);
return new BlobCacheService(configuration, storageDirectory, blobView, serverAddress);
}
| 3.26 |
flink_BlobUtils_createMessageDigest_rdh
|
/**
* Creates a new instance of the message digest to use for the BLOB key computation.
*
* @return a new instance of the message digest to use for the BLOB key computation
*/
static MessageDigest createMessageDigest() {
try {
return MessageDigest.getInstance(HASHING_ALGORITHM);}
catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Cannot instantiate the message digest algorithm " + HASHING_ALGORITHM, e);
}}
| 3.26 |
flink_BlobUtils_readLength_rdh
|
/**
* Auxiliary method to read the length of an upcoming data chunk from an input stream.
*
* @param inputStream
* the input stream to read the length from
* @return the length of the upcoming data chunk in bytes
* @throws IOException
* thrown if an I/O error occurs while reading from the input stream
*/
static int readLength(InputStream inputStream) throws IOException {
byte[] buf = new byte[4];
int bytesRead = 0;
while (bytesRead
< 4) {
final int read = inputStream.read(buf, bytesRead, 4 - bytesRead);
if (read < 0) {throw new EOFException("Read an incomplete length");
}
bytesRead += read;
}
bytesRead = buf[0] & 0xff;
bytesRead |= (buf[1] & 0xff) << 8;
bytesRead |= (buf[2] & 0xff) << 16;
bytesRead |= (buf[3] & 0xff) << 24;
return bytesRead;
}
| 3.26 |
flink_BlobUtils_getIncomingDirectory_rdh
|
/**
* Returns the BLOB service's directory for incoming (job-unrelated) files. The directory is
* created if it does not exist yet.
*
* @param storageDir
* storage directory used be the BLOB service
* @return the BLOB service's directory for incoming files
* @throws IOException
* if creating the directory fails
*/
static File getIncomingDirectory(File storageDir) throws IOException {
final File incomingDir = new File(storageDir, "incoming");
Files.createDirectories(incomingDir.toPath());
return incomingDir;
}
| 3.26 |
flink_TimeWindowUtil_getShiftTimeZone_rdh
|
/**
* Get the shifted timezone of window if the time attribute type is TIMESTAMP_LTZ, always
* returns UTC timezone if the time attribute type is TIMESTAMP which means do not shift.
*/
public static ZoneId getShiftTimeZone(LogicalType timeAttributeType, ZoneId zoneFromConfig)
{
boolean needShiftTimeZone = timeAttributeType instanceof LocalZonedTimestampType;
return needShiftTimeZone ? zoneFromConfig : UTC_ZONE_ID;
}
| 3.26 |
flink_TimeWindowUtil_getNextTriggerWatermark_rdh
|
/**
* Method to get the next watermark to trigger window.
*/
public static long getNextTriggerWatermark(long currentWatermark, long interval, ZoneId shiftTimezone, boolean useDayLightSaving) {
if (currentWatermark == Long.MAX_VALUE) {
return currentWatermark;
}
long triggerWatermark;
// consider the DST timezone
if (useDayLightSaving) {
long utcWindowStart = getWindowStartWithOffset(toUtcTimestampMills(currentWatermark, shiftTimezone), 0L, interval);
triggerWatermark = toEpochMillsForTimer((utcWindowStart + interval) -
1, shiftTimezone);
} else {
long start = getWindowStartWithOffset(currentWatermark, 0L, interval);
triggerWatermark = (start + interval) - 1;
}
if (triggerWatermark
> currentWatermark) {
return triggerWatermark;
} else {
return triggerWatermark + interval;
}
}
| 3.26 |
flink_TimeWindowUtil_toUtcTimestampMills_rdh
|
/**
* Convert a epoch mills to timestamp mills which can describe a locate date time.
*
* <p>For example: The timestamp string of epoch mills 5 in GMT+08:00 is 1970-01-01 08:00:05,
* the timestamp mills is 8 * 60 * 60 * 1000 + 5.
*
* @param epochMills
* the epoch mills.
* @param shiftTimeZone
* the timezone that the given timestamp mills has been shifted.
* @return the mills which can describe the local timestamp string in given timezone.
*/
public static long toUtcTimestampMills(long epochMills, ZoneId shiftTimeZone) {
// Long.MAX_VALUE is a flag of max watermark, directly return it
if (UTC_ZONE_ID.equals(shiftTimeZone) || (Long.MAX_VALUE == epochMills)) {
return epochMills;
}
LocalDateTime localDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(epochMills), shiftTimeZone); return
localDateTime.atZone(UTC_ZONE_ID).toInstant().toEpochMilli();
}
| 3.26 |
flink_TimeWindowUtil_toEpochMillsForTimer_rdh
|
/**
* Get a timer time according to the timestamp mills and the given shift timezone.
*
* @param utcTimestampMills
* the timestamp mills.
* @param shiftTimeZone
* the timezone that the given timestamp mills has been shifted.
* @return the epoch mills.
*/
public static long toEpochMillsForTimer(long utcTimestampMills, ZoneId shiftTimeZone) {
// Long.MAX_VALUE is a flag of max watermark, directly return it
if (UTC_ZONE_ID.equals(shiftTimeZone) || (Long.MAX_VALUE == utcTimestampMills)) {
return utcTimestampMills;
}
if (TimeZone.getTimeZone(shiftTimeZone).useDaylightTime()) {
/* return the first skipped epoch mills as timer time if the time is coming the DST.
eg. Los_Angele has no timestamp 2021-03-14 02:00:00 when coming DST.
<pre>
2021-03-14 00:00:00 -> epoch1 = 1615708800000L;
2021-03-14 01:00:00 -> epoch2 = 1615712400000L;
2021-03-14 03:00:00 -> epoch3 = 1615716000000L; skip one hour (2021-03-14 02:00:00)
2021-03-14 04:00:00 -> epoch4 = 1615719600000L;
we should use the epoch3 to register timer for window that end with
[2021-03-14 02:00:00, 2021-03-14 03:00:00] to ensure the window can be fired
immediately once the window passed.
<pre>
2021-03-14 00:00:00 -> epoch0 = 1615708800000L;
2021-03-14 01:00:00 -> epoch1 = 1615712400000L;
2021-03-14 02:00:00 -> epoch3 = 1615716000000L; register 1615716000000L(epoch3)
2021-03-14 02:59:59 -> epoch3 = 1615719599000L; register 1615716000000L(epoch3)
2021-03-14 03:00:00 -> epoch3 = 1615716000000L;
*/
/* return the larger epoch mills as timer time if the time is leaving the DST.
eg. Los_Angeles has two timestamp 2021-11-07 01:00:00 when leaving DST.
<pre>
2021-11-07 00:00:00 -> epoch0 = 1636268400000L; 2021-11-07 00:00:00
2021-11-07 01:00:00 -> epoch1 = 1636272000000L; the first local timestamp 2021-11-07 01:00:00
2021-11-07 01:00:00 -> epoch2 = 1636275600000L; back to local timestamp 2021-11-07 01:00:00
2021-11-07 02:00:00 -> epoch3 = 1636279200000L; 2021-11-07 02:00:00
we should use the epoch1 + 1 hour to register timer to ensure the two hours' data can
be fired properly.
<pre>
2021-11-07 00:00:00 -> epoch0 = 1636268400000L;
2021-11-07 01:00:00 -> epoch1 = 1636272000000L; register 1636275600000L(epoch2)
2021-11-07 02:00:00 -> epoch3 = 1636279200000L;
*/ LocalDateTime utcTimestamp = LocalDateTime.ofInstant(Instant.ofEpochMilli(utcTimestampMills), UTC_ZONE_ID);long t1 = utcTimestamp.atZone(shiftTimeZone).toInstant().toEpochMilli();
long v3 = utcTimestamp.plusSeconds(SECONDS_PER_HOUR).atZone(shiftTimeZone).toInstant().toEpochMilli();
boolean hasNoEpoch = t1 == v3;
boolean hasTwoEpochs = (v3 - t1) > MILLS_PER_HOUR;
if (hasNoEpoch) {
return t1 - (t1 % MILLS_PER_HOUR);
} else if (hasTwoEpochs) {
return t1 + MILLS_PER_HOUR;
} else {
return t1;
}
}
LocalDateTime
v6 = LocalDateTime.ofInstant(Instant.ofEpochMilli(utcTimestampMills), UTC_ZONE_ID);
return v6.atZone(shiftTimeZone).toInstant().toEpochMilli();
}
| 3.26 |
flink_TimeWindowUtil_isWindowFired_rdh
|
/**
* Returns the window should fired or not on current progress.
*
* @param windowEnd
* the end of the time window.
* @param currentProgress
* current progress of the window operator, it is processing time under
* proctime, it is watermark value under rowtime.
* @param shiftTimeZone
* the shifted timezone of the time window.
*/
public static boolean isWindowFired(long windowEnd, long currentProgress, ZoneId shiftTimeZone) {
// Long.MAX_VALUE is a flag of min window end, directly return false
if (windowEnd == Long.MAX_VALUE) {
return false;
}
long windowTriggerTime = toEpochMillsForTimer(windowEnd - 1, shiftTimeZone);
return
currentProgress >= windowTriggerTime;
}
| 3.26 |
flink_TimeWindowUtil_toEpochMills_rdh
|
/**
* Convert a timestamp mills with the given timezone to epoch mills.
*
* @param utcTimestampMills
* the timezone that the given timestamp mills has been shifted.
* @param shiftTimeZone
* the timezone that the given timestamp mills has been shifted.
* @return the epoch mills.
*/
public static long toEpochMills(long utcTimestampMills, ZoneId shiftTimeZone) {
// Long.MAX_VALUE is a flag of max watermark, directly return it
if (UTC_ZONE_ID.equals(shiftTimeZone) || (Long.MAX_VALUE == utcTimestampMills)) {
return utcTimestampMills;
}
LocalDateTime utcTimestamp = LocalDateTime.ofInstant(Instant.ofEpochMilli(utcTimestampMills), UTC_ZONE_ID);
return utcTimestamp.atZone(shiftTimeZone).toInstant().toEpochMilli();}
| 3.26 |
flink_WatermarkStrategy_withWatermarkAlignment_rdh
|
/**
* Creates a new {@link WatermarkStrategy} that configures the maximum watermark drift from
* other sources/tasks/partitions in the same watermark group. The group may contain completely
* independent sources (e.g. File and Kafka).
*
* <p>Once configured Flink will "pause" consuming from a source/task/partition that is ahead of
* the emitted watermark in the group by more than the maxAllowedWatermarkDrift.
*
* @param watermarkGroup
* A group of sources to align watermarks
* @param maxAllowedWatermarkDrift
* Maximal drift, before we pause consuming from the
* source/task/partition
* @param updateInterval
* How often tasks should notify coordinator about the current watermark
* and how often the coordinator should announce the maximal aligned watermark.
*/
@PublicEvolving
default WatermarkStrategy<T> withWatermarkAlignment(String watermarkGroup, Duration maxAllowedWatermarkDrift, Duration updateInterval) {return new WatermarksWithWatermarkAlignment<T>(this, watermarkGroup, maxAllowedWatermarkDrift, updateInterval);
}
| 3.26 |
flink_WatermarkStrategy_createTimestampAssigner_rdh
|
/**
* Instantiates a {@link TimestampAssigner} for assigning timestamps according to this strategy.
*/
@Override
default TimestampAssigner<T> createTimestampAssigner(TimestampAssignerSupplier.Context context) {
// By default, this is {@link RecordTimestampAssigner},
// for cases where records come out of a source with valid timestamps, for example from
// Kafka.
return new RecordTimestampAssigner<>();
}
| 3.26 |
flink_WatermarkStrategy_forGenerator_rdh
|
/**
* Creates a watermark strategy based on an existing {@link WatermarkGeneratorSupplier}.
*/
static <T> WatermarkStrategy<T> forGenerator(WatermarkGeneratorSupplier<T> generatorSupplier) {
return generatorSupplier::createWatermarkGenerator;}
| 3.26 |
flink_WatermarkStrategy_forBoundedOutOfOrderness_rdh
|
/**
* Creates a watermark strategy for situations where records are out of order, but you can place
* an upper bound on how far the events are out of order. An out-of-order bound B means that
* once the an event with timestamp T was encountered, no events older than {@code T - B} will
* follow any more.
*
* <p>The watermarks are generated periodically. The delay introduced by this watermark strategy
* is the periodic interval length, plus the out of orderness bound.
*
* @see BoundedOutOfOrdernessWatermarks
*/
static <T> WatermarkStrategy<T> forBoundedOutOfOrderness(Duration maxOutOfOrderness) {
return ctx ->
new BoundedOutOfOrdernessWatermarks<>(maxOutOfOrderness);}
| 3.26 |
flink_WatermarkStrategy_withTimestampAssigner_rdh
|
// ------------------------------------------------------------------------
// Builder methods for enriching a base WatermarkStrategy
// ------------------------------------------------------------------------
/**
* Creates a new {@code WatermarkStrategy} that wraps this strategy but instead uses the given
* {@link TimestampAssigner} (via a {@link TimestampAssignerSupplier}).
*
* <p>You can use this when a {@link TimestampAssigner} needs additional context, for example
* access to the metrics system.
*
* <pre>
* {@code WatermarkStrategy<Object> wmStrategy = WatermarkStrategy
* .forMonotonousTimestamps()
* .withTimestampAssigner((ctx) -> new MetricsReportingAssigner(ctx));}</pre>
*/
default WatermarkStrategy<T> withTimestampAssigner(TimestampAssignerSupplier<T> timestampAssigner) {checkNotNull(timestampAssigner, "timestampAssigner");
return new WatermarkStrategyWithTimestampAssigner<>(this, timestampAssigner);
}
/**
* Creates a new {@code WatermarkStrategy} that wraps this strategy but instead uses the given
* {@link SerializableTimestampAssigner}.
*
* <p>You can use this in case you want to specify a {@link TimestampAssigner} via a lambda
* function.
*
* <pre>
* {@code WatermarkStrategy<CustomObject> wmStrategy = WatermarkStrategy
* .<CustomObject>forMonotonousTimestamps()
* .withTimestampAssigner((event, timestamp) -> event.getTimestamp());}
| 3.26 |
flink_WatermarkStrategy_forMonotonousTimestamps_rdh
|
// ------------------------------------------------------------------------
// Convenience methods for common watermark strategies
// ------------------------------------------------------------------------
/**
* Creates a watermark strategy for situations with monotonously ascending timestamps.
*
* <p>The watermarks are generated periodically and tightly follow the latest timestamp in the
* data. The delay introduced by this strategy is mainly the periodic interval in which the
* watermarks are generated.
*
* @see AscendingTimestampsWatermarks
*/
static <T> WatermarkStrategy<T> forMonotonousTimestamps() {
return ctx -> new AscendingTimestampsWatermarks<>();}
| 3.26 |
flink_WatermarkStrategy_withIdleness_rdh
|
/**
* Creates a new enriched {@link WatermarkStrategy} that also does idleness detection in the
* created {@link WatermarkGenerator}.
*
* <p>Add an idle timeout to the watermark strategy. If no records flow in a partition of a
* stream for that amount of time, then that partition is considered "idle" and will not hold
* back the progress of watermarks in downstream operators.
*
* <p>Idleness can be important if some partitions have little data and might not have events
* during some periods. Without idleness, these streams can stall the overall event time
* progress of the application.
*/
default WatermarkStrategy<T> withIdleness(Duration idleTimeout) {
checkNotNull(idleTimeout, "idleTimeout");checkArgument(!(idleTimeout.isZero() || idleTimeout.isNegative()), "idleTimeout must be greater than zero");
return new WatermarkStrategyWithIdleness<>(this, idleTimeout);}
/**
* Creates a new {@link WatermarkStrategy}
| 3.26 |
flink_WatermarkStrategy_noWatermarks_rdh
|
/**
* Creates a watermark strategy that generates no watermarks at all. This may be useful in
* scenarios that do pure processing-time based stream processing.
*/
static <T> WatermarkStrategy<T> noWatermarks() {
return ctx -> new NoWatermarksGenerator<>();
}
| 3.26 |
flink_DataStructureConverters_putConverter_rdh
|
// --------------------------------------------------------------------------------------------
// Helper methods
// --------------------------------------------------------------------------------------------
private static <E> void putConverter(LogicalTypeRoot root, Class<E> conversionClass, DataStructureConverterFactory factory) {
converters.put(new ConverterIdentifier<>(root, conversionClass), factory);}
| 3.26 |
flink_DataStructureConverters_getConverter_rdh
|
/**
* Returns a converter for the given {@link DataType}.
*/
@SuppressWarnings("unchecked")
public static DataStructureConverter<Object, Object> getConverter(DataType dataType) {
// cast to Object for ease of use
return ((DataStructureConverter<Object, Object>) (getConverterInternal(dataType)));
}
| 3.26 |
flink_ScriptProcessBuilder_getAbsolutePath_rdh
|
/**
* Returns the full path name of this file if it is listed in the path.
*/
public File getAbsolutePath(String filename) {
if (((pathenv == null) || (pathSep == null)) || (fileSep == null)) {
return null;
}
int val;
String classvalue = pathenv + pathSep;
while (((val = classvalue.indexOf(pathSep)) >= 0) && (classvalue.length() > 0)) {
//
// Extract each entry from the pathenv
//
String entry = classvalue.substring(0, val).trim();
File f = new File(entry);
try {
if (f.isDirectory()) {//
// this entry in the pathenv is a directory.
// see if the required file is in this directory
//
f = new File((entry + fileSep) + filename);
}
//
// see if the filename matches and we can read it
//
if (f.isFile() && f.canRead()) {
return f;
}
} catch (Exception ignored) {
}
classvalue = classvalue.substring(val + 1).trim();}
return null;
}
| 3.26 |
flink_ScriptProcessBuilder_addWrapper_rdh
|
/**
* Wrap the script in a wrapper that allows admins to control.
*/
private String[] addWrapper(String[] inArgs) {
String wrapper = HiveConf.getVar(jobConf, ConfVars.SCRIPTWRAPPER);
if (wrapper == null) {
return inArgs;
}
String[] v28 = splitArgs(wrapper);
int totallength = v28.length + inArgs.length;
String[] finalArgv = new String[totallength];
System.arraycopy(v28, 0, finalArgv, 0, v28.length);
System.arraycopy(inArgs, 0, finalArgv,
v28.length, inArgs.length);
return finalArgv;
}
| 3.26 |
flink_ScriptProcessBuilder_addJobConfToEnvironment_rdh
|
/**
* addJobConfToEnvironment is mostly shamelessly copied from hadoop streaming. Added additional
* check on environment variable length
*/
void addJobConfToEnvironment(Configuration conf, Map<String, String> env) {
for (Map.Entry<String, String> en : conf) {
String v32 = en.getKey();
if (!blackListed(conf, v32)) {
// String value = (String)en.getValue(); // does not apply variable
// expansion
String v33 = conf.get(v32);// does variable expansion
v32 = safeEnvVarName(v32);
boolean truncate = conf.getBoolean(ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false);
v33 = safeEnvVarValue(v33, v32, truncate);
env.put(v32, v33);
}
}
}
| 3.26 |
flink_ScriptProcessBuilder_prependPathComponent_rdh
|
/**
* Appends the specified component to the path list.
*/
public void prependPathComponent(String str) {
pathenv = (str + pathSep) + pathenv;
}
| 3.26 |
flink_ScriptProcessBuilder_blackListed_rdh
|
/**
* Checks whether a given configuration name is blacklisted and should not be converted to an
* environment variable.
*/
private boolean
blackListed(Configuration conf, String name) {
if (blackListedConfEntries == null) {
blackListedConfEntries = new HashSet<>();
if (conf != null) {
String bl = conf.get(ConfVars.HIVESCRIPT_ENV_BLACKLIST.toString(), ConfVars.HIVESCRIPT_ENV_BLACKLIST.getDefaultValue());if ((bl != null) && (!bl.isEmpty())) {
String[] bls = bl.split(",");
Collections.addAll(blackListedConfEntries, bls);
}
}
}
return blackListedConfEntries.contains(name);
}
| 3.26 |
flink_ScriptProcessBuilder_splitArgs_rdh
|
// Code below shameless borrowed from Hadoop Streaming
private String[] splitArgs(String args) {
final int outSide = 1;
final int singLeq = 2;
final
int doubleLeq = 3;
List<String> argList = new ArrayList<>();
char[] ch = args.toCharArray();
int clen = ch.length;
int state
= outSide;
int argstart = 0;
for (int c = 0; c
<= clen; c++) {
boolean last = c == clen;
int lastState = state;
boolean endToken = false;
if (!last) { if (ch[c] == '\'') {if (state ==
outSide) {
state = singLeq;
} else if (state
== singLeq) {
state = outSide;
}
endToken = state !=
lastState;
} else if (ch[c] == '"') {
if (state == outSide) {
state = doubleLeq;
} else if
(state == doubleLeq) {
state = outSide;}
endToken = state != lastState;
} else if (ch[c] == ' ') {
if (state == outSide) {
endToken = true;
}
}
}
if (last || endToken) {
if (c != argstart) {
String a;
a = args.substring(argstart, c);
argList.add(a);
}
argstart = c + 1;
}
}
return argList.toArray(new String[0]);
}
| 3.26 |
flink_ResourceID_generate_rdh
|
/**
* Generate a random resource id.
*
* @return A random resource id.
*/
public static ResourceID
generate() {
return new ResourceID(new AbstractID().toString());
}
| 3.26 |
flink_ResourceID_getResourceIdString_rdh
|
/**
* Gets the Resource Id as string.
*
* @return Stringified version of the ResourceID
*/
public final String getResourceIdString() {
return f0;
}
| 3.26 |
flink_FailureHandlingResult_getVerticesToRestart_rdh
|
/**
* Returns the tasks to restart.
*
* @return the tasks to restart
*/
public Set<ExecutionVertexID> getVerticesToRestart() {
if (canRestart()) {
return verticesToRestart;
} else {
throw new IllegalStateException("Cannot get vertices to restart when the restarting is suppressed.");
}
}
| 3.26 |
flink_FailureHandlingResult_isGlobalFailure_rdh
|
/**
* Checks if this failure was a global failure, i.e., coming from a "safety net" failover that
* involved all tasks and should reset also components like the coordinators.
*/
public boolean isGlobalFailure() {
return
globalFailure;
}
| 3.26 |
flink_FailureHandlingResult_getRestartDelayMS_rdh
|
/**
* Returns the delay before the restarting.
*
* @return the delay before the restarting
*/
public long getRestartDelayMS() {
if (canRestart()) {
return restartDelayMS;
} else {
throw new IllegalStateException("Cannot get restart delay when the restarting is suppressed.");
}}
/**
* Returns an {@code Optional} with the {@link Execution} causing this failure or an empty
* {@code Optional} if it's a global failure.
*
* @return The {@code Optional} with the failed {@code Execution} or an empty {@code Optional}
| 3.26 |
flink_FailureHandlingResult_unrecoverable_rdh
|
/**
* Creates a result that the failure is not recoverable and no restarting should be conducted.
*
* <p>The result can be flagged to be from a global failure triggered by the scheduler, rather
* than from the failure of an individual task.
*
* @param failedExecution
* the {@link Execution} that the failure is originating from. Passing
* {@code null} as a value indicates that the failure was issued by Flink itself.
* @param error
* reason why the failure is not recoverable
* @param timestamp
* The time of the failure.
* @param failureLabels
* Map of labels characterizing the failure produced by the
* FailureEnrichers.
* @return result indicating the failure is not recoverable
*/
public static FailureHandlingResult unrecoverable(@Nullable
Execution failedExecution, @Nonnull
Throwable error, long timestamp, CompletableFuture<Map<String, String>> failureLabels, boolean globalFailure) {
return new FailureHandlingResult(failedExecution, error, timestamp, failureLabels, globalFailure);
}
| 3.26 |
flink_FailureHandlingResult_getError_rdh
|
/**
* Returns reason why the restarting cannot be conducted.
*
* @return reason why the restarting cannot be conducted
*/
@Nullable
public Throwable getError() {
return error;
}
| 3.26 |
flink_FailureHandlingResult_restartable_rdh
|
/**
* Creates a result of a set of tasks to restart to recover from the failure.
*
* <p>The result can be flagged to be from a global failure triggered by the scheduler, rather
* than from the failure of an individual task.
*
* @param failedExecution
* the {@link Execution} that the failure is originating from. Passing
* {@code null} as a value indicates that the failure was issued by Flink itself.
* @param cause
* The reason of the failure.
* @param timestamp
* The time of the failure.
* @param failureLabels
* Map of labels characterizing the failure produced by the
* FailureEnrichers.
* @param verticesToRestart
* containing task vertices to restart to recover from the failure.
* {@code null} indicates that the failure is not restartable.
* @param restartDelayMS
* indicate a delay before conducting the restart
* @return result of a set of tasks to restart to recover from the failure
*/
public static FailureHandlingResult restartable(@Nullable
Execution failedExecution, @Nullable
Throwable cause, long timestamp, CompletableFuture<Map<String, String>> failureLabels, @Nullable
Set<ExecutionVertexID> verticesToRestart, long restartDelayMS, boolean globalFailure) {
return new FailureHandlingResult(failedExecution, cause, timestamp, failureLabels, verticesToRestart, restartDelayMS, globalFailure);
}
| 3.26 |
flink_IteratorSourceEnumerator_start_rdh
|
// ------------------------------------------------------------------------
@Override
public void start() {}
| 3.26 |
flink_ScalaProductFieldAccessorFactory_load_rdh
|
/**
* Loads the implementation, if it is accessible.
*
* @param log
* Logger to be used in case the loading fails
* @return Loaded implementation, if it is accessible.
*/
static ScalaProductFieldAccessorFactory load(Logger log) {try {
final
Object factory = Class.forName("org.apache.flink.streaming.util.typeutils.DefaultScalaProductFieldAccessorFactory").getDeclaredConstructor().newInstance();
return ((ScalaProductFieldAccessorFactory) (factory));
} catch (Exception e) {
log.debug("Unable to load Scala API extension.", e);
return null;
}
}
| 3.26 |
flink_MemoryTierSubpartitionProducerAgent_addFinishedBuffer_rdh
|
// ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void addFinishedBuffer(NettyPayload nettyPayload) {
finishedBufferIndex++;
checkNotNull(nettyConnectionWriter).writeNettyPayload(nettyPayload);
if ((checkNotNull(nettyConnectionWriter).numQueuedPayloads() <= 1) || (checkNotNull(nettyConnectionWriter).numQueuedBufferPayloads() <= 1)) {
checkNotNull(nettyConnectionWriter).notifyAvailable(); }
}
| 3.26 |
flink_MemoryTierSubpartitionProducerAgent_connectionEstablished_rdh
|
// ------------------------------------------------------------------------
// Called by MemoryTierProducerAgent
// ------------------------------------------------------------------------
void connectionEstablished(NettyConnectionWriter nettyConnectionWriter) {
this.nettyConnectionWriter = nettyConnectionWriter;
}
| 3.26 |
flink_SqlReplaceTableAs_getFullConstraints_rdh
|
/**
* Returns the column constraints plus the table constraints.
*/
public List<SqlTableConstraint> getFullConstraints() {
return SqlConstraintValidator.getFullConstraints(tableConstraints, columnList);}
| 3.26 |
flink_CEP_pattern_rdh
|
/**
* Creates a {@link PatternStream} from an input data stream and a pattern.
*
* @param input
* DataStream containing the input events
* @param pattern
* Pattern specification which shall be detected
* @param comparator
* Comparator to sort events with equal timestamps
* @param <T>
* Type of the input events
* @return Resulting pattern stream
*/
public static <T> PatternStream<T> pattern(DataStream<T> input, Pattern<T, ?> pattern, EventComparator<T> comparator) { final PatternStream<T> stream = new PatternStream<>(input, pattern);
return stream.withComparator(comparator);
}
| 3.26 |
flink_FloatHashSet_add_rdh
|
/**
* See {@link Float#equals(Object)}.
*/
public boolean add(final float k) {
int intKey = Float.floatToIntBits(k);
if (intKey == 0) {if (this.containsZero) {
return false;
}
this.containsZero = true;
} else {
float[] key = this.key;
int pos;
int curr;
if ((curr = Float.floatToIntBits(key[pos = MurmurHashUtil.fmix(intKey) & this.mask])) != 0) {
if (curr == intKey) {
return false;
}
while ((curr = Float.floatToIntBits(key[pos = (pos + 1) & this.mask])) != 0) {
if (curr == intKey) {
return false;
}
}
}
key[pos] = k;
}
if
((this.size++) >= this.maxFill) {
this.rehash(OptimizableHashSet.arraySize(this.size + 1, this.f));
}
return true;
}
| 3.26 |
flink_FloatHashSet_contains_rdh
|
/**
* See {@link Float#equals(Object)}.
*/
public boolean contains(final float k) {
int intKey = Float.floatToIntBits(k);
if (intKey == 0) {
return this.containsZero;
} else {
float[] key =
this.key;
int curr;
int pos;
if ((curr = Float.floatToIntBits(key[pos = MurmurHashUtil.fmix(intKey) & this.mask])) == 0) {
return false;
} else if (intKey == curr) {
return true;
} else {
while ((curr = Float.floatToIntBits(key[pos = (pos + 1) & this.mask])) != 0) {
if (intKey == curr) {
return true;
}
}
return false;
}
}
}
| 3.26 |
flink_ChangelogTruncateHelper_materialized_rdh
|
/**
* Handle changelog materialization, potentially {@link #truncate() truncating} the changelog.
*
* @param upTo
* exclusive
*/
public void materialized(SequenceNumber upTo) {
materializedUpTo = upTo;
truncate();
}
| 3.26 |
flink_ChangelogTruncateHelper_checkpoint_rdh
|
/**
* Set the highest {@link SequenceNumber} of changelog used by the given checkpoint.
*
* @param lastUploadedTo
* exclusive
*/
public void
checkpoint(long checkpointId, SequenceNumber lastUploadedTo) {
checkpointedUpTo.put(checkpointId, lastUploadedTo);
}
| 3.26 |
flink_ChangelogTruncateHelper_checkpointSubsumed_rdh
|
/**
* Handle checkpoint subsumption, potentially {@link #truncate() truncating} the changelog.
*/
public void checkpointSubsumed(long checkpointId) {
SequenceNumber sqn = checkpointedUpTo.get(checkpointId);
LOG.debug("checkpoint {} subsumed, max sqn: {}", checkpointId, sqn);
if (sqn != null) {
subsumedUpTo = sqn;
checkpointedUpTo.headMap(checkpointId, true).clear();
truncate();
}
}
| 3.26 |
flink_CsvReader_fieldDelimiter_rdh
|
/**
* Configures the delimiter that separates the fields within a row. The comma character ({@code ','}) is used by default.
*
* @param delimiter
* The delimiter that separates the fields in one row.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader fieldDelimiter(String delimiter) {
this.fieldDelimiter = delimiter;
return this;
}
| 3.26 |
flink_CsvReader_ignoreInvalidLines_rdh
|
/**
* Sets the CSV reader to ignore any invalid lines. This is useful for files that contain an
* empty line at the end, multiple header lines or comments. This would throw an exception
* otherwise.
*
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader ignoreInvalidLines() {
ignoreInvalidLines = true;
return this;
}
| 3.26 |
flink_CsvReader_ignoreFirstLine_rdh
|
/**
* Sets the CSV reader to ignore the first line. This is useful for files that contain a header
* line.
*
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader ignoreFirstLine() {
skipFirstLineAsHeader = true;
return this; }
| 3.26 |
flink_CsvReader_ignoreComments_rdh
|
/**
* Configures the string that starts comments. By default comments will be treated as invalid
* lines. This function only recognizes comments which start at the beginning of the line!
*
* @param commentPrefix
* The string that starts the comments.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader ignoreComments(String commentPrefix) {
if ((commentPrefix == null) || (commentPrefix.length() == 0)) {
throw new IllegalArgumentException("The comment prefix must not be null or an empty string");
}
this.commentPrefix = commentPrefix;
return this;
}
| 3.26 |
flink_CsvReader_lineDelimiter_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Configures the delimiter that separates the lines/rows. The linebreak character ({@code '\n'}) is used by default.
*
* @param delimiter
* The delimiter that separates the rows.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader lineDelimiter(String delimiter) {
if ((delimiter == null) || (delimiter.length() == 0)) {
throw new IllegalArgumentException("The delimiter must not be null or an empty string");
}
this.lineDelimiter = delimiter;
return this;
}
| 3.26 |
flink_CsvReader_configureInputFormat_rdh
|
// --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
private void configureInputFormat(CsvInputFormat<?> format) {
format.setCharset(this.f0);
format.setDelimiter(this.lineDelimiter);
format.setFieldDelimiter(this.fieldDelimiter);
format.setCommentPrefix(this.commentPrefix);
format.setSkipFirstLineAsHeader(skipFirstLineAsHeader);
format.setLenient(ignoreInvalidLines);
if (this.parseQuotedStrings) {
format.enableQuotedStringParsing(this.quoteCharacter);
}
}
| 3.26 |
flink_CsvReader_setCharset_rdh
|
/**
* Sets the charset of the reader.
*
* @param charset
* The character set to set.
*/
@PublicEvolving
public void setCharset(String charset) {
this.f0 = Preconditions.checkNotNull(charset);
}
| 3.26 |
flink_CsvReader_m1_rdh
|
/**
* Specifies the types for the CSV fields. This method parses the CSV data to a 18-tuple which
* has fields of the specified types. This method is overloaded for each possible length of the
* tuples to support type safe creation of data sets through CSV parsing.
*
* @param type0
* The type of CSV field 0 and the type of field 0 in the returned tuple type.
* @param type1
* The type of CSV field 1 and the type of field 1 in the returned tuple type.
* @param type2
* The type of CSV field 2 and the type of field 2 in the returned tuple type.
* @param type3
* The type of CSV field 3 and the type of field 3 in the returned tuple type.
* @param type4
* The type of CSV field 4 and the type of field 4 in the returned tuple type.
* @param type5
* The type of CSV field 5 and the type of field 5 in the returned tuple type.
* @param type6
* The type of CSV field 6 and the type of field 6 in the returned tuple type.
* @param type7
* The type of CSV field 7 and the type of field 7 in the returned tuple type.
* @param type8
* The type of CSV field 8 and the type of field 8 in the returned tuple type.
* @param type9
* The type of CSV field 9 and the type of field 9 in the returned tuple type.
* @param type10
* The type of CSV field 10 and the type of field 10 in the returned tuple type.
* @param type11
* The type of CSV field 11 and the type of field 11 in the returned tuple type.
* @param type12
* The type of CSV field 12 and the type of field 12 in the returned tuple type.
* @param type13
* The type of CSV field 13 and the type of field 13 in the returned tuple type.
* @param type14
* The type of CSV field 14 and the type of field 14 in the returned tuple type.
* @param type15
* The type of CSV field 15 and the type of field 15 in the returned tuple type.
* @param type16
* The type of CSV field 16 and the type of field 16 in the returned tuple type.
* @param type17
* The type of CSV field 17 and the type of field 17 in the returned tuple type.
* @return The {@link org.apache.flink.api.java.DataSet} representing the parsed CSV data.
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> DataSource<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> m1(Class<T0> type0, Class<T1> type1, Class<T2> type2, Class<T3> type3, Class<T4> type4, Class<T5> type5, Class<T6> type6, Class<T7> type7, Class<T8> type8,
Class<T9> type9, Class<T10> type10, Class<T11> type11, Class<T12> type12, Class<T13> type13, Class<T14> type14, Class<T15> type15,
Class<T16> type16, Class<T17> type17) {
TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> types = TupleTypeInfo.getBasicAndBasicValueTupleTypeInfo(type0, type1, type2, type3, type4, type5, type6,
type7, type8, type9, type10, type11, type12, type13, type14, type15, type16, type17);
CsvInputFormat<Tuple18<T0, T1, T2,
T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> inputFormat = new TupleCsvInputFormat<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(path, types, this.includedMask);
configureInputFormat(inputFormat);return
new DataSource<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
T12, T13, T14, T15, T16, T17>>(executionContext, inputFormat, types, Utils.getCallLocationName());}
| 3.26 |
flink_CsvReader_m0_rdh
|
/**
* Specifies the types for the CSV fields. This method parses the CSV data to a 6-tuple which
* has fields of the specified types. This method is overloaded for each possible length of the
* tuples to support type safe creation of data sets through CSV parsing.
*
* @param type0
* The type of CSV field 0 and the type of field 0 in the returned tuple type.
* @param type1
* The type of CSV field 1 and the type of field 1 in the returned tuple type.
* @param type2
* The type of CSV field 2 and the type of field 2 in the returned tuple type.
* @param type3
* The type of CSV field 3 and the type of field 3 in the returned tuple type.
* @param type4
* The type of CSV field 4 and the type of field 4 in the returned tuple type.
* @param type5
* The type of CSV field 5 and the type of field 5 in the returned tuple type.
* @return The {@link org.apache.flink.api.java.DataSet} representing the parsed CSV data.
*/
public <T0, T1, T2, T3, T4, T5> DataSource<Tuple6<T0, T1, T2, T3, T4, T5>> m0(Class<T0> type0, Class<T1> type1, Class<T2> type2, Class<T3> type3, Class<T4> type4, Class<T5> type5) {
TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> types = TupleTypeInfo.getBasicAndBasicValueTupleTypeInfo(type0, type1, type2, type3, type4, type5);
CsvInputFormat<Tuple6<T0, T1, T2, T3, T4, T5>> inputFormat = new TupleCsvInputFormat<Tuple6<T0, T1, T2, T3, T4, T5>>(path, types, this.includedMask);
configureInputFormat(inputFormat);
return new DataSource<Tuple6<T0, T1, T2, T3, T4, T5>>(executionContext, inputFormat, types, Utils.getCallLocationName());
}
| 3.26 |
flink_CsvReader_getCharset_rdh
|
/**
* Gets the character set for the reader. Default is UTF-8.
*
* @return The charset for the reader.
*/
@PublicEvolving
public String getCharset() {
return this.f0;
}
| 3.26 |
flink_CsvReader_types_rdh
|
/**
* Specifies the types for the CSV fields. This method parses the CSV data to a 25-tuple which
* has fields of the specified types. This method is overloaded for each possible length of the
* tuples to support type safe creation of data sets through CSV parsing.
*
* @param type0
* The type of CSV field 0 and the type of field 0 in the returned tuple type.
* @param type1
* The type of CSV field 1 and the type of field 1 in the returned tuple type.
* @param type2
* The type of CSV field 2 and the type of field 2 in the returned tuple type.
* @param type3
* The type of CSV field 3 and the type of field 3 in the returned tuple type.
* @param type4
* The type of CSV field 4 and the type of field 4 in the returned tuple type.
* @param type5
* The type of CSV field 5 and the type of field 5 in the returned tuple type.
* @param type6
* The type of CSV field 6 and the type of field 6 in the returned tuple type.
* @param type7
* The type of CSV field 7 and the type of field 7 in the returned tuple type.
* @param type8
* The type of CSV field 8 and the type of field 8 in the returned tuple type.
* @param type9
* The type of CSV field 9 and the type of field 9 in the returned tuple type.
* @param type10
* The type of CSV field 10 and the type of field 10 in the returned tuple type.
* @param type11
* The type of CSV field 11 and the type of field 11 in the returned tuple type.
* @param type12
* The type of CSV field 12 and the type of field 12 in the returned tuple type.
* @param type13
* The type of CSV field 13 and the type of field 13 in the returned tuple type.
* @param type14
* The type of CSV field 14 and the type of field 14 in the returned tuple type.
* @param type15
* The type of CSV field 15 and the type of field 15 in the returned tuple type.
* @param type16
* The type of CSV field 16 and the type of field 16 in the returned tuple type.
* @param type17
* The type of CSV field 17 and the type of field 17 in the returned tuple type.
* @param type18
* The type of CSV field 18 and the type of field 18 in the returned tuple type.
* @param type19
* The type of CSV field 19 and the type of field 19 in the returned tuple type.
* @param type20
* The type of CSV field 20 and the type of field 20 in the returned tuple type.
* @param type21
* The type of CSV field 21 and the type of field 21 in the returned tuple type.
* @param type22
* The type of CSV field 22 and the type of field 22 in the returned tuple type.
* @param type23
* The type of CSV field 23 and the type of field 23 in the returned tuple type.
* @param type24
* The type of CSV field 24 and the type of field 24 in the returned tuple type.
* @return The {@link org.apache.flink.api.java.DataSet} representing the parsed CSV data.
*/
public <T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11,
T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> DataSource<Tuple25<T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> types(Class<T0> type0, Class<T1> type1, Class<T2> type2, Class<T3> type3, Class<T4> type4, Class<T5> type5,
Class<T6> type6, Class<T7> type7, Class<T8> type8, Class<T9> type9, Class<T10> type10, Class<T11> type11, Class<T12> type12, Class<T13> type13, Class<T14> type14, Class<T15> type15, Class<T16> type16, Class<T17> type17, Class<T18> type18, Class<T19> type19, Class<T20> type20, Class<T21> type21, Class<T22> type22, Class<T23> type23, Class<T24> type24) {TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22,
T23, T24>> types = TupleTypeInfo.getBasicAndBasicValueTupleTypeInfo(type0, type1, type2, type3, type4, type5, type6, type7, type8, type9,
type10, type11,
type12, type13, type14, type15,
type16, type17, type18, type19, type20, type21, type22, type23, type24);
CsvInputFormat<Tuple25<T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> inputFormat = new TupleCsvInputFormat<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(path, types, this.includedMask);
configureInputFormat(inputFormat);
return new DataSource<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(executionContext, inputFormat, types, Utils.getCallLocationName());
}
| 3.26 |
flink_CsvReader_tupleType_rdh
|
/**
* Configures the reader to read the CSV data and parse it to the given type. The type must be a
* subclass of {@link Tuple}. The type information for the fields is obtained from the type
* class. The type consequently needs to specify all generic field types of the tuple.
*
* @param targetType
* The class of the target type, needs to be a subclass of Tuple.
* @return The DataSet representing the parsed CSV data.
*/
public <T extends Tuple> DataSource<T> tupleType(Class<T> targetType) {
Preconditions.checkNotNull(targetType, "The target type class must not be null.");
if (!Tuple.class.isAssignableFrom(targetType)) {
throw new IllegalArgumentException("The target type must be a subclass of " + Tuple.class.getName());
}
@SuppressWarnings("unchecked")
TupleTypeInfo<T> typeInfo = ((TupleTypeInfo<T>) (TypeExtractor.createTypeInfo(targetType)));
CsvInputFormat<T> inputFormat = new TupleCsvInputFormat<T>(path, this.lineDelimiter, this.fieldDelimiter, typeInfo, this.includedMask);
Class<?>[] classes = new Class<?>[typeInfo.getArity()];
for (int i = 0; i < typeInfo.getArity(); i++) {
classes[i] = typeInfo.getTypeAt(i).getTypeClass();
}configureInputFormat(inputFormat);
return new DataSource<T>(executionContext, inputFormat, typeInfo, Utils.getCallLocationName());
}
| 3.26 |
flink_CsvReader_includeFields_rdh
|
/**
* Configures which fields of the CSV file should be included and which should be skipped. The
* bits in the value (read from least significant to most significant) define whether the field
* at the corresponding position in the CSV schema should be included. parser will look at the
* first {@code n} fields, where {@code n} is the position of the most significant non-zero bit.
* The parser will skip over all fields where the character at the corresponding bit is zero,
* and include the fields where the corresponding bit is one.
*
* <p>Examples:
*
* <ul>
* <li>A mask of {@code 0x7} would include the first three fields.
* <li>A mask of {@code 0x26} (binary {@code 100110} would skip the first fields, include
* fields two and three, skip fields four and five, and include field six.
* </ul>
*
* @param mask
* The bit mask defining which fields to include and which to skip.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader includeFields(long mask) {
if (mask == 0) {
throw new IllegalArgumentException("The description of fields to parse excluded all fields. At least one fields must be included.");
}
ArrayList<Boolean> fields = new ArrayList<Boolean>();
while (mask != 0) {
fields.add((mask & 0x1L) != 0);
mask >>>= 1;
}
boolean[] fieldsArray = new boolean[fields.size()];
for (int i = 0; i < fieldsArray.length; i++) {fieldsArray[i] = fields.get(i);
}
return includeFields(fieldsArray);
}
| 3.26 |
flink_CsvReader_pojoType_rdh
|
/**
* Configures the reader to read the CSV data and parse it to the given type. The all fields of
* the type must be public or able to set value. The type information for the fields is obtained
* from the type class.
*
* @param pojoType
* The class of the target POJO.
* @param pojoFields
* The fields of the POJO which are mapped to CSV fields.
* @return The DataSet representing the parsed CSV data.
*/
public <T> DataSource<T> pojoType(Class<T> pojoType, String... pojoFields) {
Preconditions.checkNotNull(pojoType, "The POJO type class must not be null.");
Preconditions.checkArgument((pojoFields != null) && (pojoFields.length > 0), "POJO fields must be specified (not null) if output type is a POJO.");final TypeInformation<T> ti = TypeExtractor.createTypeInfo(pojoType);
if (!(ti instanceof PojoTypeInfo))
{
throw new IllegalArgumentException("The specified class is not a POJO. The type class must meet the POJO requirements. Found: " + ti);}
final PojoTypeInfo<T> pti = ((PojoTypeInfo<T>) (ti));CsvInputFormat<T> inputFormat = new PojoCsvInputFormat<T>(path, this.lineDelimiter, this.fieldDelimiter, pti, pojoFields, this.includedMask);
configureInputFormat(inputFormat);
return new DataSource<T>(executionContext, inputFormat, pti, Utils.getCallLocationName());
}
| 3.26 |
flink_CsvReader_parseQuotedStrings_rdh
|
/**
* Enables quoted String parsing. Field delimiters in quoted Strings are ignored. A String is
* parsed as quoted if it starts and ends with a quoting character and as unquoted otherwise.
* Leading or tailing whitespaces are not allowed.
*
* @param quoteCharacter
* The character which is used as quoting character.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader parseQuotedStrings(char quoteCharacter) {
this.parseQuotedStrings = true;
this.quoteCharacter = quoteCharacter;
return this;
}
| 3.26 |
flink_HiveParserContext_setTokenRewriteStream_rdh
|
/**
* Set the token rewrite stream being used to parse the current top-level SQL statement. Note
* that this should <b>not</b> be used for other parsing activities; for example, when we
* encounter a reference to a view, we switch to a new stream for parsing the stored view
* definition from the catalog, but we don't clobber the top-level stream in the context.
*
* @param tokenRewriteStream
* the stream being used
*/
public void setTokenRewriteStream(TokenRewriteStream tokenRewriteStream) {
assert this.tokenRewriteStream == null;
this.tokenRewriteStream
= tokenRewriteStream;
}
| 3.26 |
flink_HiveParserContext_getDestNamePrefix_rdh
|
/**
* The suffix is always relative to a given HiveParserASTNode.
*/
public DestClausePrefix getDestNamePrefix(HiveParserASTNode curNode) {
assert curNode != null : "must supply curNode";
assert (curNode.getType() == HiveASTParser.TOK_INSERT_INTO) || (curNode.getType() == HiveASTParser.TOK_DESTINATION);
return DestClausePrefix.INSERT;
}
| 3.26 |
flink_TpchResultComparator_round_rdh
|
/**
* Rounding function defined in TPC-H standard specification v2.18.0 chapter 10.
*/
private static double round(double x, int m) {
if (x < 0) {
throw new IllegalArgumentException("x must be non-negative");
}
double y = x + (5 * Math.pow(10, (-m) - 1));
double z = y * Math.pow(10, m);
double q = Math.floor(z);
return q / Math.pow(10, m);
}
| 3.26 |
flink_BinaryRowData_anyNull_rdh
|
/**
* The bit is 1 when the field is null. Default is 0.
*/
@Override
public boolean anyNull() {
// Skip the header.
if ((segments[0].getLong(0) & FIRST_BYTE_ZERO) != 0) {
return true;}
for (int i = 8; i < f1; i += 8) {
if (segments[0].getLong(i) != 0) {
return true;
}
}
return false;
}
| 3.26 |
flink_BinaryRowData_isInFixedLengthPart_rdh
|
/**
* If it is a fixed-length field, we can call this BinaryRowData's setXX method for in-place
* updates. If it is variable-length field, can't use this method, because the underlying data
* is stored continuously.
*/
public static boolean isInFixedLengthPart(LogicalType type) {switch (type.getTypeRoot()) {
case BOOLEAN :case TINYINT :
case SMALLINT :
case INTEGER :
case DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
case BIGINT :
case INTERVAL_DAY_TIME :
case FLOAT :
case DOUBLE :
return true;
case DECIMAL :
return DecimalData.isCompact(((DecimalType) (type)).getPrecision());
case TIMESTAMP_WITHOUT_TIME_ZONE :
return TimestampData.isCompact(((TimestampType) (type)).getPrecision());
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return TimestampData.isCompact(((LocalZonedTimestampType) (type)).getPrecision());
default :
return false;
}
}
| 3.26 |
flink_SharedBuffer_getEvent_rdh
|
/**
* It always returns event either from state or cache.
*
* @param eventId
* id of the event
* @return event
*/
Lockable<V> getEvent(EventId eventId) {
try {
Lockable<V> lockableFromCache = eventsBufferCache.getIfPresent(eventId);if (Objects.nonNull(lockableFromCache)) {
return lockableFromCache;
} else {
Lockable<V> lockableFromState = eventsBuffer.get(eventId);
if (Objects.nonNull(lockableFromState)) {
eventsBufferCache.put(eventId, lockableFromState);
}
return lockableFromState;}
} catch (Exception ex) {
throw
new WrappingRuntimeException(ex);
}
}
| 3.26 |
flink_SharedBuffer_flushCache_rdh
|
/**
* Flush the event and node from cache to state.
*
* @throws Exception
* Thrown if the system cannot access the state.
*/
void flushCache() throws Exception {
if (!entryCache.asMap().isEmpty()) {
entries.putAll(entryCache.asMap());
entryCache.invalidateAll();
}
if (!eventsBufferCache.asMap().isEmpty()) {
eventsBuffer.putAll(eventsBufferCache.asMap());
eventsBufferCache.invalidateAll();
}
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.