name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_LeaderElectionUtils_convertToString_rdh | /**
* Converts the passed {@link LeaderInformation} into a human-readable representation that can
* be used in log messages.
*/
public static String convertToString(LeaderInformation leaderInformation) {
return leaderInformation.isEmpty() ? "<no leader>" : convertToString(leaderInformation.getLeaderSessionID(), leaderInformation.getLeaderAddress());
} | 3.26 |
flink_AbstractExternalOneInputPythonFunctionOperator_getInputTypeInfo_rdh | // ----------------------------------------------------------------------
// Getters
// ----------------------------------------------------------------------
public TypeInformation<IN> getInputTypeInfo() {
return inputTypeInfo;
} | 3.26 |
flink_NumberSequenceSource_getProducedType_rdh | // ------------------------------------------------------------------------
// source methods
// ------------------------------------------------------------------------
@Overridepublic TypeInformation<Long> getProducedType() {
return Types.LONG;
} | 3.26 |
flink_StickyAllocationAndLocalRecoveryTestJob_m0_rdh | /**
* This code is copied from Stack Overflow.
*
* <p><a
* href="https://stackoverflow.com/questions/35842">https://stackoverflow.com/questions/35842</a>,
* answer <a
* href="https://stackoverflow.com/a/12066696/9193881">https://stackoverflow.com/a/12066696/9193881</a>
*
* <p>Author: <a href="https://stackoverflow.com/users/446591/brad-mace">Brad Mace</a>)
*/
private static int m0() throws Exception {
RuntimeMXBean runtime =
ManagementFactory.getRuntimeMXBean();
Field v31 = runtime.getClass().getDeclaredField("jvm");
v31.setAccessible(true);
VMManagement mgmt = ((VMManagement) (v31.get(runtime)));
Method pidMethod = mgmt.getClass().getDeclaredMethod("getProcessId");
pidMethod.setAccessible(true);return ((int) (Integer) (pidMethod.invoke(mgmt)));
} | 3.26 |
flink_MaxwellJsonFormatFactory_validateEncodingFormatOptions_rdh | /**
* Validator for maxwell encoding format.
*/
private static void
validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);
} | 3.26 |
flink_MaxwellJsonFormatFactory_validateDecodingFormatOptions_rdh | /**
* Validator for maxwell decoding format.
*/
private static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions);
} | 3.26 |
flink_AvroWriters_forReflectRecord_rdh | /**
* Creates an {@link AvroWriterFactory} for the given type. The Avro writers will use reflection
* to create the schema for the type and use that schema to write the records.
*
* @param type
* The class of the type to write.
*/
public static <T> AvroWriterFactory<T> forReflectRecord(Class<T> type) {
String v4 = ReflectData.get().getSchema(type).toString();
AvroBuilder<T> builder = out -> createAvroDataFileWriter(v4, ReflectDatumWriter::new, out);
return new AvroWriterFactory<>(builder);
} | 3.26 |
flink_AvroWriters_forSpecificRecord_rdh | /**
* Creates an {@link AvroWriterFactory} for an Avro specific type. The Avro writers will use the
* schema of that specific type to build and write the records.
*
* @param type
* The class of the type to write.
*/
public static <T extends SpecificRecordBase> AvroWriterFactory<T> forSpecificRecord(Class<T> type) {
String schemaString = SpecificData.get().getSchema(type).toString();
AvroBuilder<T> builder = out -> createAvroDataFileWriter(schemaString, SpecificDatumWriter::new, out);
return new
AvroWriterFactory<>(builder);} | 3.26 |
flink_AvroWriters_forGenericRecord_rdh | /**
* Creates an {@link AvroWriterFactory} that accepts and writes Avro generic types. The Avro
* writers will use the given schema to build and write the records.
*
* @param schema
* The schema of the generic type.
*/
public static AvroWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
String schemaString = schema.toString();
// Must override the lambda representation because of a bug in shading lambda
// serialization, see similar issue FLINK-28043 for more details.
AvroBuilder<GenericRecord> builder = new AvroBuilder<GenericRecord>() {
@Override
public DataFileWriter<GenericRecord> createWriter(OutputStream outputStream) throws IOException
{
return createAvroDataFileWriter(schemaString, new Function<Schema, DatumWriter<GenericRecord>>() {
@Override
public DatumWriter<GenericRecord> apply(Schema schema) {
return new GenericDatumWriter<>(schema);
}
}, outputStream);
}
};
return new AvroWriterFactory<>(builder);
} | 3.26 |
flink_JobResult_isSuccess_rdh | /**
* Returns {@code true} if the job finished successfully.
*/
public boolean isSuccess() {
return (applicationStatus == ApplicationStatus.SUCCEEDED) || ((applicationStatus == ApplicationStatus.UNKNOWN) && (serializedThrowable == null));
} | 3.26 |
flink_JobResult_toJobExecutionResult_rdh | /**
* Converts the {@link JobResult} to a {@link JobExecutionResult}.
*
* @param classLoader
* to use for deserialization
* @return JobExecutionResult
* @throws JobCancellationException
* if the job was cancelled
* @throws JobExecutionException
* if the job execution did not succeed
* @throws IOException
* if the accumulator could not be deserialized
* @throws ClassNotFoundException
* if the accumulator could not deserialized
*/
public JobExecutionResult toJobExecutionResult(ClassLoader classLoader) throws JobExecutionException, IOException, ClassNotFoundException {
if (applicationStatus == ApplicationStatus.SUCCEEDED) {
return new JobExecutionResult(jobId, netRuntime, AccumulatorHelper.deserializeAccumulators(accumulatorResults,
classLoader));
} else {
final Throwable cause;
if (serializedThrowable == null) {cause = null;
} else {
cause = serializedThrowable.deserializeError(classLoader);
}final JobExecutionException exception;
if (applicationStatus ==
ApplicationStatus.FAILED) {
exception = new JobExecutionException(jobId, "Job execution failed.", cause);
} else if (applicationStatus == ApplicationStatus.CANCELED)
{
exception = new JobCancellationException(jobId, "Job was cancelled.", cause);
} else {
exception = new JobExecutionException(jobId, ("Job completed with illegal application status: " +
applicationStatus) + '.', cause);
}
throw exception;}
} | 3.26 |
flink_JobResult_createFrom_rdh | /**
* Creates the {@link JobResult} from the given {@link AccessExecutionGraph} which must be in a
* globally terminal state.
*
* @param accessExecutionGraph
* to create the JobResult from
* @return JobResult of the given AccessExecutionGraph
*/
public static JobResult createFrom(AccessExecutionGraph accessExecutionGraph) {
final JobID jobId = accessExecutionGraph.getJobID();
final JobStatus jobStatus = accessExecutionGraph.getState();
checkArgument(jobStatus.isTerminalState(), (((((("The job " + accessExecutionGraph.getJobName()) + '(') + jobId) + ") is not in a ") + "terminal state. It is in state ") + jobStatus) +
'.');
final JobResult.Builder builder = new
JobResult.Builder();
builder.jobId(jobId);
builder.applicationStatus(ApplicationStatus.fromJobStatus(accessExecutionGraph.getState()));
final long netRuntime = accessExecutionGraph.getStatusTimestamp(jobStatus) - accessExecutionGraph.getStatusTimestamp(JobStatus.INITIALIZING);
// guard against clock changes
final long guardedNetRuntime = Math.max(netRuntime, 0L);
builder.netRuntime(guardedNetRuntime);
builder.accumulatorResults(accessExecutionGraph.getAccumulatorsSerialized());
if (jobStatus == JobStatus.FAILED)
{
final ErrorInfo errorInfo = accessExecutionGraph.getFailureInfo();
checkNotNull(errorInfo, "No root cause is found for the job failure.");
builder.serializedThrowable(errorInfo.getException());
}
return builder.build();
} | 3.26 |
flink_JsonSerdeUtil_hasJsonCreatorAnnotation_rdh | /**
* Return true if the given class's constructors have @JsonCreator annotation, else false.
*/
public static boolean hasJsonCreatorAnnotation(Class<?> clazz) {
for (Constructor<?> constructor : clazz.getDeclaredConstructors()) {
for (Annotation annotation : constructor.getAnnotations()) {
if (annotation instanceof JsonCreator) {
return true;}
}
}
return false;
} | 3.26 |
flink_JsonSerdeUtil_traverse_rdh | // Utilities for SerDes implementations
static JsonParser traverse(TreeNode node, ObjectCodec objectCodec) throws IOException {
JsonParser jsonParser = node.traverse(objectCodec);
// https://stackoverflow.com/questions/55855414/custom-jackson-deserialization-getting-com-fasterxml-jackson-databind-exc-mism
if (!node.isMissingNode()) {
// Initialize first token
if (jsonParser.getCurrentToken() == null) {
jsonParser.nextToken();
}
}
return jsonParser;
} | 3.26 |
flink_HiveShimV100_getViews_rdh | // 1.x client doesn't support filtering tables by type, so here we need to get all tables and
// filter by ourselves
@Override
public List<String> getViews(IMetaStoreClient client, String databaseName) throws UnknownDBException, TException {
// We don't have to use reflection here because client.getAllTables(String) is supposed to
// be there for
// all versions.
List<String> tableNames = client.getAllTables(databaseName);
List<String> views = new ArrayList<>();
for (String name :
tableNames) {
Table table = client.getTable(databaseName, name);
String viewDef = table.getViewOriginalText();
if ((viewDef != null) && (!viewDef.isEmpty())) {
views.add(table.getTableName());
}}
return views;
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_recoverAndAbort_rdh | /**
* Abort a transaction that was rejected by a coordinator after a failure.
*/
protected void recoverAndAbort(TXN transaction) {
m0(transaction);
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_finishRecoveringContext_rdh | /**
* Callback for subclasses which is called after restoring (each) user context.
*
* @param handledTransactions
* transactions which were already committed or aborted and do not
* need further handling
*/
protected void finishRecoveringContext(Collection<TXN> handledTransactions) {
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_invoke_rdh | // ------ entry points for above methods implementing {@CheckPointedFunction} and
// {@CheckpointListener} ------
/**
* This should not be implemented by subclasses.
*/
@Override
public final void invoke(IN value) throws Exception {
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_finishProcessing_rdh | /**
* This method is called at the end of data processing.
*
* <p>The method is expected to flush all remaining buffered data. Exceptions will cause the
* pipeline to be recognized as failed, because the last data items are not processed properly.
* You may use this method to flush remaining buffered elements in the state into the current
* transaction which will be committed in the last checkpoint.
*/
protected void finishProcessing(@Nullable
TXN transaction) {
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_setTransactionTimeout_rdh | /**
* Sets the transaction timeout. Setting only the transaction timeout has no effect in itself.
*
* @param transactionTimeout
* The transaction timeout in ms.
* @see #ignoreFailuresAfterTransactionTimeout()
* @see #enableTransactionTimeoutWarnings(double)
*/
protected TwoPhaseCommitSinkFunction<IN, TXN, CONTEXT> setTransactionTimeout(long transactionTimeout) {checkArgument(transactionTimeout >= 0, "transactionTimeout must not be negative");this.transactionTimeout = transactionTimeout;
return this;
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_ignoreFailuresAfterTransactionTimeout_rdh | /**
* If called, the sink will only log but not propagate exceptions thrown in {@link #recoverAndCommit(Object)} if the transaction is older than a specified transaction timeout.
* The start time of an transaction is determined by {@link System#currentTimeMillis()}. By
* default, failures are propagated.
*/
protected TwoPhaseCommitSinkFunction<IN, TXN, CONTEXT> ignoreFailuresAfterTransactionTimeout() {
this.ignoreFailuresAfterTransactionTimeout = true;
return this;
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_enableTransactionTimeoutWarnings_rdh | /**
* Enables logging of warnings if a transaction's elapsed time reaches a specified ratio of the
* <code>transactionTimeout</code>. If <code>warningRatio</code> is 0, a warning will be always
* logged when committing the transaction.
*
* @param warningRatio
* A value in the range [0,1].
* @return */
protected TwoPhaseCommitSinkFunction<IN, TXN, CONTEXT> enableTransactionTimeoutWarnings(double warningRatio) {
checkArgument((warningRatio >= 0) && (warningRatio <= 1), "warningRatio must be in range [0,1]");
this.transactionTimeoutWarningRatio =
warningRatio;
return this;
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_recoverAndCommit_rdh | /**
* Invoked on recovered transactions after a failure. User implementation must ensure that this
* call will eventually succeed. If it fails, Flink application will be restarted and it will be
* invoked again. If it does not succeed eventually, a data loss will occur. Transactions will
* be recovered in an order in which they were created.
*/
protected void recoverAndCommit(TXN transaction) {
commit(transaction);
} | 3.26 |
flink_TwoPhaseCommitSinkFunction_m1_rdh | /**
* This method must be the only place to call {@link #recoverAndCommit(Object)} to ensure that
* the configuration parameters {@link #transactionTimeout} and {@link #ignoreFailuresAfterTransactionTimeout} are respected.
*/
private void m1(TransactionHolder<TXN> transactionHolder) {
try {
logWarningIfTimeoutAlmostReached(transactionHolder);
recoverAndCommit(transactionHolder.handle);
} catch (final Exception e) {
final long elapsedTime = clock.millis() - transactionHolder.transactionStartTime;
if (ignoreFailuresAfterTransactionTimeout && (elapsedTime > transactionTimeout)) {LOG.error(("Error while committing transaction {}. " + "Transaction has been open for longer than the transaction timeout ({}).") + "Commit will not be attempted again. Data loss might have occurred.", transactionHolder.handle, transactionTimeout, e);
} else {
throw e;
}
}
} | 3.26 |
flink_LogicalTypeDataTypeConverter_m0_rdh | /**
* It convert {@link LegacyTypeInformationType} to planner types.
*/
@Deprecated
public static LogicalType
m0(DataType dataType) {
return PlannerTypeUtils.removeLegacyTypes(dataType.getLogicalType());
} | 3.26 |
flink_SingleInputGate_retriggerPartitionRequest_rdh | /**
* Retriggers a partition request.
*/
public void retriggerPartitionRequest(IntermediateResultPartitionID partitionId, int subpartitionIndex) throws IOException {
synchronized(requestLock) {
if (!closeFuture.isDone()) {
final InputChannel ch = inputChannels.get(new SubpartitionInfo(partitionId, subpartitionIndex));
checkNotNull(ch, "Unknown input channel with ID " + partitionId);
LOG.debug("{}: Retriggering partition request {}:{}.", owningTaskName, ch.partitionId, ch.getConsumedSubpartitionIndex());
if (ch.getClass() == RemoteInputChannel.class) {
final RemoteInputChannel rch = ((RemoteInputChannel) (ch));
rch.retriggerSubpartitionRequest();
} else if (ch.getClass() == LocalInputChannel.class) {
final LocalInputChannel ich = ((LocalInputChannel) (ch));
if (retriggerLocalRequestTimer == null) {
retriggerLocalRequestTimer = new Timer(true);
}
ich.retriggerSubpartitionRequest(retriggerLocalRequestTimer);
} else {
throw new IllegalStateException("Unexpected type of channel to retrigger partition: " + ch.getClass());
}
}
}} | 3.26 |
flink_SingleInputGate_getConsumedPartitionType_rdh | /**
* Returns the type of this input channel's consumed result partition.
*
* @return consumed result partition type
*/
public ResultPartitionType
getConsumedPartitionType() {
return consumedPartitionType;} | 3.26 |
flink_SingleInputGate_notifyPriorityEvent_rdh | /**
* Notifies that the respective channel has a priority event at the head for the given buffer
* number.
*
* <p>The buffer number limits the notification to the respective buffer and voids the whole
* notification in case that the buffer has been polled in the meantime. That is, if task thread
* polls the enqueued priority buffer before this notification occurs (notification is not
* performed under lock), this buffer number allows {@link #queueChannel(InputChannel, Integer,
* boolean)} to avoid spurious priority wake-ups.
*/
void notifyPriorityEvent(InputChannel inputChannel, int prioritySequenceNumber) {queueChannel(checkNotNull(inputChannel), prioritySequenceNumber, false);
} | 3.26 |
flink_SingleInputGate_queueChannelUnsafe_rdh | /**
* Queues the channel if not already enqueued and not received EndOfPartition, potentially
* raising the priority.
*
* @return true iff it has been enqueued/prioritized = some change to {@link #inputChannelsWithData} happened
*/
private boolean queueChannelUnsafe(InputChannel channel, boolean priority) {
assert Thread.holdsLock(inputChannelsWithData);
if (channelsWithEndOfPartitionEvents.get(channel.getChannelIndex())) {
return false;
}
final boolean alreadyEnqueued = enqueuedInputChannelsWithData.get(channel.getChannelIndex());
if (alreadyEnqueued && ((!priority) || inputChannelsWithData.containsPriorityElement(channel))) {
// already notified / prioritized (double notification), ignore
return false;
}
inputChannelsWithData.add(channel, priority, alreadyEnqueued);
if (!alreadyEnqueued) {
enqueuedInputChannelsWithData.set(channel.getChannelIndex());
}
return true;
} | 3.26 |
flink_SingleInputGate_getInputChannels_rdh | // ------------------------------------------------------------------------
public Map<SubpartitionInfo, InputChannel> getInputChannels() {
return inputChannels;
} | 3.26 |
flink_SingleInputGate_setBufferPool_rdh | // ------------------------------------------------------------------------
// Setup/Life-cycle
// ------------------------------------------------------------------------
public void setBufferPool(BufferPool bufferPool) {
checkState(this.bufferPool == null, "Bug in input gate setup logic: buffer pool has" + "already been set for this input gate.");
this.bufferPool = checkNotNull(bufferPool);
} | 3.26 |
flink_SingleInputGate_getNumberOfInputChannels_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
@Override
public int getNumberOfInputChannels() {
return numberOfInputChannels;
} | 3.26 |
flink_SingleInputGate_setupChannels_rdh | /**
* Assign the exclusive buffers to all remote input channels directly for credit-based mode.
*/
@VisibleForTesting
public void setupChannels()
throws IOException {
// Allocate enough exclusive and floating buffers to guarantee that job can make progress.
// Note: An exception will be thrown if there is no buffer available in the given timeout.
// First allocate a single floating buffer to avoid potential deadlock when the exclusive
// buffer is 0. See FLINK-24035 for more information.
bufferPool.reserveSegments(1);
// Next allocate the exclusive buffers per channel when the number of exclusive buffer is
// larger than 0.
synchronized(requestLock) {
for (InputChannel inputChannel : inputChannels.values()) {
inputChannel.setup();
}
}
} | 3.26 |
flink_SingleInputGate_notifyChannelNonEmpty_rdh | // ------------------------------------------------------------------------
// Channel notifications
// ------------------------------------------------------------------------
void notifyChannelNonEmpty(InputChannel channel) {
if (enabledTieredStorage()) {
TieredStorageConsumerSpec tieredStorageConsumerSpec = checkNotNull(tieredStorageConsumerSpecs).get(channel.getChannelIndex());
checkNotNull(availabilityNotifier).notifyAvailable(tieredStorageConsumerSpec.getPartitionId(), tieredStorageConsumerSpec.getSubpartitionId());
} else {
queueChannel(checkNotNull(channel), null, false);
}
} | 3.26 |
flink_SingleInputGate_getNext_rdh | // ------------------------------------------------------------------------
@Override
public Optional<BufferOrEvent> getNext() throws IOException, InterruptedException {
return getNextBufferOrEvent(true);
} | 3.26 |
flink_DeduplicateFunctionHelper_processFirstRowOnProcTime_rdh | /**
* Processes element to deduplicate on keys with process time semantic, sends current element if
* it is first row.
*
* @param currentRow
* latest row received by deduplicate function
* @param state
* state of function
* @param out
* underlying collector
*/
static void processFirstRowOnProcTime(RowData currentRow, ValueState<Boolean> state, Collector<RowData> out) throws Exception {
checkInsertOnly(currentRow);
// ignore record if it is not first row
if (state.value() != null) {
return;
}
state.update(true);
// emit the first row which is INSERT message
out.collect(currentRow);
} | 3.26 |
flink_DeduplicateFunctionHelper_updateDeduplicateResult_rdh | /**
* Collect the updated result for duplicate row.
*
* @param generateUpdateBefore
* flag to generate UPDATE_BEFORE message or not
* @param generateInsert
* flag to generate INSERT message or not
* @param preRow
* previous row under the key
* @param currentRow
* current row under the key which is the duplicate row
* @param out
* underlying collector
*/
static void updateDeduplicateResult(boolean generateUpdateBefore, boolean generateInsert, RowData preRow, RowData currentRow, Collector<RowData> out) {
if (generateUpdateBefore || generateInsert) {
if (preRow == null) {// the first row, send INSERT message
currentRow.setRowKind(RowKind.INSERT);
out.collect(currentRow);
} else {
if (generateUpdateBefore) {
final RowKind preRowKind = preRow.getRowKind();
preRow.setRowKind(RowKind.UPDATE_BEFORE);
out.collect(preRow);
preRow.setRowKind(preRowKind);
}
currentRow.setRowKind(RowKind.UPDATE_AFTER);out.collect(currentRow);
}
} else {
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}} | 3.26 |
flink_DeduplicateFunctionHelper_checkInsertOnly_rdh | /**
* check message should be insert only.
*/
static void checkInsertOnly(RowData currentRow) {
Preconditions.checkArgument(currentRow.getRowKind() == RowKind.INSERT);
} | 3.26 |
flink_DeduplicateFunctionHelper_processLastRowOnProcTime_rdh | /**
* Utility for deduplicate function.
*/public class DeduplicateFunctionHelper {
/**
* Processes element to deduplicate on keys with process time semantic, sends current element as
* last row, retracts previous element if needed.
*
* @param currentRow
* latest row received by deduplicate function
* @param generateUpdateBefore
* whether need to send UPDATE_BEFORE message for updates
* @param state
* state of function, null if generateUpdateBefore is false
* @param out
* underlying collector
* @param isStateTtlEnabled
* whether state ttl is disabled
* @param equaliser
* the record equaliser used to equal RowData.
*/
static void processLastRowOnProcTime(RowData currentRow, boolean generateUpdateBefore, boolean generateInsert, ValueState<RowData> state, Collector<RowData> out, boolean isStateTtlEnabled, RecordEqualiser equaliser) throws Exception {
checkInsertOnly(currentRow);
if (generateUpdateBefore || generateInsert) {
// use state to keep the previous row content if we need to generate UPDATE_BEFORE
// or use to distinguish the first row, if we need to generate INSERT
RowData preRow = state.value();
state.update(currentRow);
if (preRow == null) {
// the first row, send INSERT message
currentRow.setRowKind(RowKind.INSERT);
out.collect(currentRow);
} else if ((!isStateTtlEnabled) && equaliser.equals(preRow, currentRow)) {
// currentRow is the same as preRow and state cleaning is not enabled.
// We do not emit retraction and update message.
// If state cleaning is enabled, we have to emit messages to prevent too early
// state eviction of downstream operators.
return;
} else {
if (generateUpdateBefore) {
preRow.setRowKind(RowKind.UPDATE_BEFORE);
out.collect(preRow);
}
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
} else {
// always send UPDATE_AFTER if INSERT is not needed
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
} | 3.26 |
flink_DeduplicateFunctionHelper_processLastRowOnChangelog_rdh | /**
* Processes element to deduplicate on keys, sends current element as last row, retracts
* previous element if needed.
*
* <p>Note: we don't support stateless mode yet. Because this is not safe for Kafka tombstone
* messages which doesn't contain full content. This can be a future improvement if the
* downstream (e.g. sink) doesn't require full content for DELETE messages.
*
* @param currentRow
* latest row received by deduplicate function
* @param generateUpdateBefore
* whether need to send UPDATE_BEFORE message for updates
* @param state
* state of function
* @param out
* underlying collector
*/
static void processLastRowOnChangelog(RowData
currentRow, boolean generateUpdateBefore, ValueState<RowData> state, Collector<RowData> out, boolean isStateTtlEnabled, RecordEqualiser equaliser) throws Exception {
RowData preRow = state.value();
RowKind currentKind = currentRow.getRowKind();
if ((currentKind == RowKind.INSERT) || (currentKind
== RowKind.UPDATE_AFTER)) {
if (preRow == null) {
// the first row, send INSERT message
currentRow.setRowKind(RowKind.INSERT);
out.collect(currentRow);
} else if ((!isStateTtlEnabled) && equaliser.equals(preRow, currentRow)) {
// currentRow is the same as preRow and state cleaning is not enabled.
// We do not emit retraction and update message.
// If state cleaning is enabled, we have to emit messages to prevent too early
// state eviction of downstream operators.
return;
} else { if (generateUpdateBefore) {
preRow.setRowKind(RowKind.UPDATE_BEFORE);
out.collect(preRow);
}
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
// normalize row kind
currentRow.setRowKind(RowKind.INSERT);
// save to state
state.update(currentRow);
} else {
// DELETE or UPDATER_BEFORE
if (preRow != null) {
// always set to DELETE because this row has been removed
// even the input is UPDATE_BEFORE, there may no UPDATE_AFTER after it.
preRow.setRowKind(RowKind.DELETE);
// output the preRow instead of currentRow,
// because preRow always contains the full content.
// currentRow may only contain key parts (e.g. Kafka tombstone records).
out.collect(preRow);
// clear state as the row has been removed
state.clear();
}
// nothing to do if removing a non-existed row
}
} | 3.26 |
flink_DeduplicateFunctionHelper_isDuplicate_rdh | /**
* Returns current row is duplicate row or not compared to previous row.
*/
public static boolean isDuplicate(RowData preRow, RowData currentRow, int rowtimeIndex, boolean keepLastRow) {
if (keepLastRow) {
return (preRow == null) || (getRowtime(preRow, rowtimeIndex) <= getRowtime(currentRow, rowtimeIndex));
} else {
return (preRow
== null) || (getRowtime(currentRow, rowtimeIndex) < getRowtime(preRow, rowtimeIndex));
}
} | 3.26 |
flink_LeaderRetriever_getLeaderNow_rdh | /**
* Returns the current leader information if available. Otherwise it returns an empty optional.
*
* @return The current leader information if available. Otherwise it returns an empty optional.
* @throws Exception
* if the leader future has been completed with an exception
*/
public Optional<Tuple2<String, UUID>> getLeaderNow() throws Exception {CompletableFuture<Tuple2<String,
UUID>> leaderFuture = this.atomicLeaderFuture.get();
if (leaderFuture != null) {
if (leaderFuture.isDone()) {
return Optional.of(leaderFuture.get());
} else {
return Optional.empty();
}
} else {
return Optional.empty();
}
} | 3.26 |
flink_LeaderRetriever_getLeaderFuture_rdh | /**
* Returns the current JobManagerGateway future.
*/public CompletableFuture<Tuple2<String, UUID>> getLeaderFuture() {
return atomicLeaderFuture.get();
} | 3.26 |
flink_TwoInputTransformation_getInput2_rdh | /**
* Returns the second input {@code Transformation} of this {@code TwoInputTransformation}.
*/
public Transformation<IN2> getInput2() {
return input2;
} | 3.26 |
flink_TwoInputTransformation_getInputType2_rdh | /**
* Returns the {@code TypeInformation} for the elements from the second input.
*/
public TypeInformation<IN2> getInputType2() {
return input2.getOutputType();
} | 3.26 |
flink_TwoInputTransformation_setStateKeySelectors_rdh | /**
* Sets the {@link KeySelector KeySelectors} that must be used for partitioning keyed state of
* this transformation.
*
* @param stateKeySelector1
* The {@code KeySelector} to set for the first input
* @param stateKeySelector2
* The {@code KeySelector} to set for the first input
*/
public void setStateKeySelectors(KeySelector<IN1, ?> stateKeySelector1, KeySelector<IN2, ?> stateKeySelector2) {
this.stateKeySelector1 = stateKeySelector1;
this.stateKeySelector2 = stateKeySelector2;
updateManagedMemoryStateBackendUseCase((stateKeySelector1 != null) || (stateKeySelector2 != null));
} | 3.26 |
flink_TwoInputTransformation_m0_rdh | /**
* Returns the {@code TypeInformation} for the elements from the first input.
*/
public TypeInformation<IN1> m0() {
return input1.getOutputType();
} | 3.26 |
flink_TwoInputTransformation_getInput1_rdh | /**
* Returns the first input {@code Transformation} of this {@code TwoInputTransformation}.
*/
public Transformation<IN1> getInput1() {
return input1;
} | 3.26 |
flink_TwoInputTransformation_getStateKeySelector2_rdh | /**
* Returns the {@code KeySelector} that must be used for partitioning keyed state in this
* Operation for the second input.
*
* @see #setStateKeySelectors
*/
public KeySelector<IN2, ?> getStateKeySelector2() {
return stateKeySelector2;
} | 3.26 |
flink_TwoInputTransformation_getOperatorFactory_rdh | /**
* Returns the {@code StreamOperatorFactory} of this Transformation.
*/
public StreamOperatorFactory<OUT> getOperatorFactory() {
return operatorFactory;
} | 3.26 |
flink_RetryPolicy_fromConfig_rdh | /**
* Retry policy to use by {@link RetryingExecutor}.
*/
@Internalpublic interface RetryPolicy {static RetryPolicy fromConfig(ReadableConfig config) {
switch (config.get(FsStateChangelogOptions.RETRY_POLICY)) {
case "fixed" :return fixed(config.get(FsStateChangelogOptions.RETRY_MAX_ATTEMPTS), config.get(FsStateChangelogOptions.UPLOAD_TIMEOUT).toMillis(),
config.get(FsStateChangelogOptions.RETRY_DELAY_AFTER_FAILURE).toMillis());
case "none" :
return NONE;
default :
throw new IllegalConfigurationException("Unknown retry policy: " + config.get(FsStateChangelogOptions.RETRY_POLICY));
} | 3.26 |
flink_DependencyParser_getDepth_rdh | /**
* The depths returned by this method do NOT return a continuous sequence.
*
* <pre>
* +- org.apache.flink:...
* | +- org.apache.flink:...
* | | \- org.apache.flink:...
* ...
* </pre>
*/
private static int getDepth(String line) {
final int level = line.indexOf('+');
if (level != (-1)) {
return level;
}
return line.indexOf('\\');
} | 3.26 |
flink_DependencyParser_parseDependencyCopyOutput_rdh | /**
* Parses the output of a Maven build where {@code dependency:copy} was used, and returns a set
* of copied dependencies for each module.
*
* <p>The returned dependencies will NEVER contain the scope or optional flag.
*/
public static Map<String, Set<Dependency>> parseDependencyCopyOutput(Path buildOutput) throws IOException {
return processLines(buildOutput, DependencyParser::parseDependencyCopyOutput);
} | 3.26 |
flink_DependencyParser_parseDependencyTreeOutput_rdh | /**
* Parses the output of a Maven build where {@code dependency:tree} was used, and returns a set
* of dependencies for each module.
*/
public static Map<String, DependencyTree> parseDependencyTreeOutput(Path
buildOutput) throws IOException {
return processLines(buildOutput, DependencyParser::parseDependencyTreeOutput);
} | 3.26 |
flink_TaskExecutorRegistrationSuccess_getClusterInformation_rdh | /**
* Gets the cluster information.
*/public ClusterInformation getClusterInformation() {
return clusterInformation;
} | 3.26 |
flink_TaskExecutorRegistrationSuccess_getResourceManagerId_rdh | /**
* Gets the unique ID that identifies the ResourceManager.
*/
public ResourceID getResourceManagerId() {
return resourceManagerResourceId;
} | 3.26 |
flink_TaskExecutorRegistrationSuccess_getInitialTokens_rdh | /**
* Gets the initial tokens.
*/
public byte[] getInitialTokens() {return initialTokens;
} | 3.26 |
flink_TaskExecutorRegistrationSuccess_getRegistrationId_rdh | /**
* Gets the ID that the ResourceManager assigned the registration.
*/
public InstanceID getRegistrationId() {return registrationId;
} | 3.26 |
flink_BasicTypeInfo_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return (31 * Objects.hash(clazz, serializer, comparatorClass)) + Arrays.hashCode(possibleCastTargetTypes);
} | 3.26 |
flink_BasicTypeInfo_getInfoFor_rdh | // --------------------------------------------------------------------------------------------
@PublicEvolving
public static <X> BasicTypeInfo<X> getInfoFor(Class<X> type) {
if (type ==
null) {
throw new NullPointerException();
}
@SuppressWarnings("unchecked")
BasicTypeInfo<X> info = ((BasicTypeInfo<X>) (TYPES.get(type)));
return info;
} | 3.26 |
flink_BasicTypeInfo_shouldAutocastTo_rdh | // --------------------------------------------------------------------------------------------
/**
* Returns whether this type should be automatically casted to the target type in an arithmetic
* operation.
*/
@PublicEvolving
public boolean shouldAutocastTo(BasicTypeInfo<?> to) {
for (Class<?> possibleTo : possibleCastTargetTypes) {
if (possibleTo.equals(to.m0())) {
return true;
}
}
return false;
} | 3.26 |
flink_CopyableValueSerializer_snapshotConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<T> snapshotConfiguration() {
return new CopyableValueSerializerSnapshot<>(valueClass);
} | 3.26 |
flink_CopyableValueSerializer_ensureInstanceInstantiated_rdh | // --------------------------------------------------------------------------------------------
private void ensureInstanceInstantiated() {
if (instance == null) {
instance = createInstance();
}
} | 3.26 |
flink_SpecializedFunction_close_rdh | /**
* Closes the runtime implementation for expression evaluation. It performs clean up work.
*
* <p>This method should be called in {@link UserDefinedFunction#close()}.
*/
default void close() {
} | 3.26 |
flink_HiveParserQueryState_createConf_rdh | /**
* If there are query specific settings to overlay, then create a copy of config There are two
* cases we need to clone the session config that's being passed to hive driver 1. Async query -
* If the client changes a config setting, that shouldn't reflect in the execution already
* underway 2. confOverlay - The query specific settings should only be applied to the query
* config and not session
*
* @return new configuration
*/
private HiveConf createConf(HiveConf conf, Map<String, String> confOverlay, boolean runAsync) {
if ((confOverlay != null) && (!confOverlay.isEmpty())) {
conf = (conf == null) ? new HiveConf() : new HiveConf(conf);// apply overlay query specific settings, if any
for (Map.Entry<String, String> confEntry : confOverlay.entrySet()) {
try {
conf.verifyAndSet(confEntry.getKey(), confEntry.getValue());
} catch (IllegalArgumentException e) {
throw new RuntimeException("Error applying statement specific settings", e);
}}
}
else if (runAsync) {
conf = (conf == null) ? new HiveConf() :
new HiveConf(conf);
}if (conf == null) {
conf = new HiveConf();
}
conf.setVar(ConfVars.HIVEQUERYID, QueryPlan.makeQueryId());
return conf;
} | 3.26 |
flink_AllReduceDriver_prepare_rdh | // --------------------------------------------------------------------------------------------
@Override
public void prepare() throws Exception {
final TaskConfig config = this.taskContext.getTaskConfig();
if (config.getDriverStrategy() != DriverStrategy.ALL_REDUCE) {
throw new Exception("Unrecognized driver strategy for AllReduce driver: " + config.getDriverStrategy().name());
}
TypeSerializerFactory<T> serializerFactory = this.taskContext.getInputSerializer(0);
this.serializer = serializerFactory.getSerializer();
this.input = this.taskContext.getInput(0);
ExecutionConfig executionConfig = f0.getExecutionConfig();
this.objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (LOG.isDebugEnabled()) {
LOG.debug(("AllReduceDriver object reuse: "
+ (this.objectReuseEnabled ? "ENABLED" : "DISABLED")) + ".");
}
} | 3.26 |
flink_AllReduceDriver_setup_rdh | // ------------------------------------------------------------------------
@Override
public void
setup(TaskContext<ReduceFunction<T>, T> context) {
this.taskContext = context;
this.running = true;
} | 3.26 |
flink_RestServerEndpointConfiguration_getUploadDir_rdh | /**
* Returns the directory used to temporarily store multipart/form-data uploads.
*/
public Path getUploadDir() {return uploadDir;
} | 3.26 |
flink_RestServerEndpointConfiguration_fromConfiguration_rdh | /**
* Creates and returns a new {@link RestServerEndpointConfiguration} from the given {@link Configuration}.
*
* @param config
* configuration from which the REST server endpoint configuration should be
* created from
* @return REST server endpoint configuration
* @throws ConfigurationException
* if SSL was configured incorrectly
*/
public static RestServerEndpointConfiguration fromConfiguration(Configuration config) throws ConfigurationException {
Preconditions.checkNotNull(config);
final String restAddress = Preconditions.checkNotNull(config.getString(RestOptions.ADDRESS), "%s must be set", RestOptions.ADDRESS.key());
final String restBindAddress = config.getString(RestOptions.BIND_ADDRESS);
final String portRangeDefinition = config.getString(RestOptions.BIND_PORT);
final SSLHandlerFactory sslHandlerFactory;
if (SecurityOptions.isRestSSLEnabled(config)) {
try {
sslHandlerFactory = SSLUtils.createRestServerSSLEngineFactory(config);} catch (Exception e) {
throw new ConfigurationException("Failed to initialize SSLEngineFactory for REST server endpoint.", e);
}
} else {
sslHandlerFactory = null;
}final Path uploadDir = Paths.get(config.getString(WebOptions.UPLOAD_DIR, config.getString(WebOptions.TMP_DIR)), "flink-web-upload");
final int maxContentLength = config.getInteger(RestOptions.SERVER_MAX_CONTENT_LENGTH);
final Map<String, String> responseHeaders = Collections.singletonMap(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN.toString(), config.getString(WebOptions.ACCESS_CONTROL_ALLOW_ORIGIN));
return new RestServerEndpointConfiguration(restAddress, restBindAddress, portRangeDefinition, sslHandlerFactory, uploadDir, maxContentLength, responseHeaders);
} | 3.26 |
flink_RestServerEndpointConfiguration_getMaxContentLength_rdh | /**
* Returns the max content length that the REST server endpoint could handle.
*
* @return max content length that the REST server endpoint could handle
*/
public int getMaxContentLength() {
return maxContentLength;
} | 3.26 |
flink_RestServerEndpointConfiguration_getRestBindPortRange_rdh | /**
* Returns the port range that the REST server endpoint should listen on.
*
* @return port range that the REST server endpoint should listen on
*/public String getRestBindPortRange() {
return restBindPortRange;
}
/**
* Returns the {@link SSLEngine} | 3.26 |
flink_RestServerEndpointConfiguration_getRestAddress_rdh | /**
*
* @see RestOptions#ADDRESS
*/
public String getRestAddress() {
return f0;
} | 3.26 |
flink_RestServerEndpointConfiguration_getResponseHeaders_rdh | /**
* Response headers that should be added to every HTTP response.
*/
public Map<String, String> getResponseHeaders() {
return responseHeaders;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_genColListRegex_rdh | // TODO: make aliases unique, otherwise needless rewriting takes place
@SuppressWarnings("nls")
public Integer genColListRegex(String colRegex,
String tabAlias, HiveParserASTNode sel, ArrayList<ExprNodeDesc> colList, HashSet<ColumnInfo> excludeCols, HiveParserRowResolver input, HiveParserRowResolver colSrcRR, Integer pos, HiveParserRowResolver output, List<String> aliases, boolean ensureUniqueCols) throws SemanticException {
if (colSrcRR == null) {
colSrcRR = input; }
// The table alias should exist
if ((tabAlias != null) && (!colSrcRR.hasTableAlias(tabAlias)))
{
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_TABLE_ALIAS, sel));
}
// TODO: Have to put in the support for AS clause
Pattern regex;
try {
regex = Pattern.compile(colRegex, Pattern.CASE_INSENSITIVE);
} catch (PatternSyntaxException e) {throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_COLUMN, sel, e.getMessage()));
}
StringBuilder replacementText = new StringBuilder();
int matched = 0;
// add empty string to the list of aliases. Some operators (ex. GroupBy) add
// ColumnInfos for table alias "".
if (!aliases.contains("")) {
aliases.add("");
}
/* track the input ColumnInfos that are added to the output.
if a columnInfo has multiple mappings; then add the column only once,
but carry the mappings forward.
*/
Map<ColumnInfo, ColumnInfo> inputColsProcessed = new HashMap<>();
// For expr "*", aliases should be iterated in the order they are specified in the query.
if (colSrcRR.getNamedJoinInfo() != null) {
// We got using() clause in previous join. Need to generate select list as
// per standard. For * we will have joining columns first non-repeated
// followed by other columns.
HashMap<String, ColumnInfo> leftMap = colSrcRR.getFieldMap(colSrcRR.getNamedJoinInfo().getAliases().get(0));
HashMap<String, ColumnInfo> rightMap = colSrcRR.getFieldMap(colSrcRR.getNamedJoinInfo().getAliases().get(1));
HashMap<String, ColumnInfo> chosenMap = null;
if (colSrcRR.getNamedJoinInfo().getHiveJoinType() != JoinType.RIGHTOUTER) {
chosenMap
= leftMap;
} else {
chosenMap = rightMap;
}
// first get the columns in named columns
for (String columnName : colSrcRR.getNamedJoinInfo().getNamedColumns()) {
for (Map.Entry<String, ColumnInfo> entry : chosenMap.entrySet()) {
ColumnInfo colInfo = entry.getValue();
if (!columnName.equals(colInfo.getAlias())) {
continue;
}
String name = colInfo.getInternalName();
String[] tmp = colSrcRR.reverseLookup(name);
// Skip the colinfos which are not for this particular alias
if ((tabAlias
!= null) && (!tmp[0].equalsIgnoreCase(tabAlias))) {
continue;
}
if (colInfo.getIsVirtualCol() && colInfo.isHiddenVirtualCol()) {
continue;
}
ColumnInfo oColInfo = inputColsProcessed.get(colInfo);
if (oColInfo == null) {
ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(), name, colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isSkewedCol());
colList.add(expr);
oColInfo = new ColumnInfo(getColumnInternalName(pos), colInfo.getType(), colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isHiddenVirtualCol());
inputColsProcessed.put(colInfo, oColInfo);
}
if (ensureUniqueCols) {
if (!output.putWithCheck(tmp[0], tmp[1], null, oColInfo)) {
throw new SemanticException(((((("Cannot add column to RR: " + tmp[0]) + ".") + tmp[1]) + " => ") + oColInfo) + " due to duplication, see previous warnings");
}
} else {
output.put(tmp[0], tmp[1], oColInfo);
}
pos = pos + 1;
matched++;
if (unparseTranslator.isEnabled()) {
if (replacementText.length() > 0) {
replacementText.append(", ");
}
replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
replacementText.append(".");
replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
}
}
}
}
for (String alias : aliases) {
HashMap<String, ColumnInfo> fMap = colSrcRR.getFieldMap(alias);
if (fMap == null) {
continue;
}
// For the tab.* case, add all the columns to the fieldList from the input schema
for (Map.Entry<String, ColumnInfo> entry : fMap.entrySet()) {
ColumnInfo colInfo = entry.getValue();
if ((colSrcRR.getNamedJoinInfo() != null) && colSrcRR.getNamedJoinInfo().getNamedColumns().contains(colInfo.getAlias())) {
// we already added this column in select list.
continue;
}
if ((excludeCols != null) && excludeCols.contains(colInfo)) {
continue;// This was added during plan generation.
}
// First, look up the column from the source against which * is to be resolved.
// We'd later translated this into the column from proper input, if it's valid.
// TODO: excludeCols may be possible to remove using the same technique.
String name = colInfo.getInternalName();
String[] tmp = colSrcRR.reverseLookup(name);
// Skip the colinfos which are not for this particular alias
if ((tabAlias != null) && (!tmp[0].equalsIgnoreCase(tabAlias))) {
continue;
}
if (colInfo.getIsVirtualCol() && colInfo.isHiddenVirtualCol()) {
continue;
}
// Not matching the regex?
if (!regex.matcher(tmp[1]).matches()) {
continue;
}
// If input (GBY) is different than the source of columns, find the
// same column in input.
// TODO: This is fraught with peril.
if (input != colSrcRR) {
colInfo = input.get(tabAlias, tmp[1]);
if (colInfo
== null) {LOG.error(((((((("Cannot find colInfo for " + tabAlias) + ".") + tmp[1]) + ", derived from [") + colSrcRR) + "], in [") + input) + "]");
throw new SemanticException(ErrorMsg.NON_KEY_EXPR_IN_GROUPBY, tmp[1]);
}String oldCol = null;
if (LOG.isDebugEnabled()) {
oldCol = (name + " => ") + (tmp == null ? "null" : (tmp[0] + ".") + tmp[1]);
}name = colInfo.getInternalName();
tmp
= input.reverseLookup(name);if (LOG.isDebugEnabled()) {
String newCol = (name + " => ") + (tmp == null ? "null" : (tmp[0] + ".") + tmp[1]);
LOG.debug(((("Translated [" + oldCol) + "] to [") + newCol) + "]");
}
}
ColumnInfo oColInfo
= inputColsProcessed.get(colInfo);
if (oColInfo == null) {
ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(), name, colInfo.getTabAlias(), colInfo.getIsVirtualCol(),
colInfo.isSkewedCol());
colList.add(expr);
oColInfo = new ColumnInfo(getColumnInternalName(pos), colInfo.getType(), colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isHiddenVirtualCol());
inputColsProcessed.put(colInfo, oColInfo);
}
if (ensureUniqueCols) {
if (!output.putWithCheck(tmp[0], tmp[1], null, oColInfo)) {
throw new SemanticException(((((("Cannot add column to RR: " + tmp[0]) + ".") + tmp[1]) + " => ") + oColInfo) + " due to duplication, see previous warnings");
}
} else {
output.put(tmp[0], tmp[1], oColInfo);
}
pos++;
matched++;
if (unparseTranslator.isEnabled()) {
if (replacementText.length() > 0) {
replacementText.append(", ");
}
replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
replacementText.append(".");
replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
}
}
}
if (matched == 0) {
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_COLUMN, sel));
}
if (unparseTranslator.isEnabled()) {
unparseTranslator.addTranslation(sel, replacementText.toString());
} return pos;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_processTable_rdh | /**
* Goes though the tabref tree and finds the alias for the table. Once found, it records the
* table name-> alias association in aliasToTabs. It also makes an association from the alias to
* the table AST in parse info.
*/
private String
processTable(HiveParserQB qb, HiveParserASTNode tabref) throws SemanticException {
// For each table reference get the table name
// and the alias (if alias is not present, the table name
// is used as an alias)
int[] indexes = findTabRefIdxs(tabref);
int aliasIndex = indexes[0];int propsIndex = indexes[1];
int tsampleIndex = indexes[2];
int ssampleIndex = indexes[3];
HiveParserASTNode tableTree = ((HiveParserASTNode) (tabref.getChild(0)));
String qualifiedTableName = getUnescapedName(tableTree, catalogRegistry.getCurrentCatalog(), catalogRegistry.getCurrentDatabase()).toLowerCase();
String originTableName = getUnescapedOriginTableName(tableTree);
String alias = findSimpleTableName(tabref, aliasIndex);
if (propsIndex >= 0) {Tree
propsAST = tabref.getChild(propsIndex);
Map<String, String> props = HiveParserDDLSemanticAnalyzer.getProps(((HiveParserASTNode) (propsAST.getChild(0))));
// We get the information from Calcite.
if ("TRUE".equals(props.get("insideView"))) {qb.getAliasInsideView().add(alias.toLowerCase());
}
qb.setTabProps(alias, props);
}
// If the alias is already there then we have a conflict
if (qb.exists(alias)) {
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.AMBIGUOUS_TABLE_ALIAS, tabref.getChild(aliasIndex)));
}
if (tsampleIndex >= 0) {
HiveParserASTNode sampleClause = ((HiveParserASTNode) (tabref.getChild(tsampleIndex)));
ArrayList<HiveParserASTNode> sampleCols = new ArrayList<>();
if (sampleClause.getChildCount() > 2) {
for (int i = 2; i < sampleClause.getChildCount(); i++) {
sampleCols.add(((HiveParserASTNode) (sampleClause.getChild(i))));
}
}
// TODO: For now only support sampling on up to two columns
// Need to change it to list of columns
if (sampleCols.size() > 2) {
throw new SemanticException(HiveParserUtils.generateErrorMessage(((HiveParserASTNode) (tabref.getChild(0))), ErrorMsg.SAMPLE_RESTRICTION.getMsg()));
}
qb.getParseInfo().setTabSample(alias);
if (unparseTranslator.isEnabled()) {
for (HiveParserASTNode sampleCol : sampleCols) {
unparseTranslator.addIdentifierTranslation(((HiveParserASTNode) (sampleCol.getChild(0))));}
}
} else if (ssampleIndex >= 0) {
HiveParserASTNode sampleClause = ((HiveParserASTNode) (tabref.getChild(ssampleIndex)));
Tree type = sampleClause.getChild(0);
Tree numerator = sampleClause.getChild(1);
String value = unescapeIdentifier(numerator.getText());
SplitSample sample;
if (type.getType() == HiveASTParser.TOK_PERCENT) {
double percent = Double.parseDouble(value);
if ((percent < 0) || (percent > 100)) {
throw new SemanticException(HiveParserUtils.generateErrorMessage(((HiveParserASTNode) (numerator)), "Sampling percentage should be between 0 and 100"));
}
int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM);
sample = new SplitSample(percent, seedNum);
} else if (type.getType() == HiveASTParser.TOK_ROWCOUNT) {
sample = new SplitSample(Integer.parseInt(value));
} else {
assert type.getType() == HiveASTParser.TOK_LENGTH;
long length = Integer.parseInt(value.substring(0, value.length() - 1));
char last = value.charAt(value.length() - 1);
if ((last == 'k') || (last == 'K')) {
length <<= 10;
} else
if ((last == 'm') || (last == 'M')) {
length <<= 20;} else if ((last == 'g') || (last == 'G')) {
length <<= 30;
}
int v45 = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM);
sample = new SplitSample(length, v45);
}
String aliasId = getAliasId(alias, qb);
nameToSplitSample.put(aliasId, sample);
}
// Insert this map into the stats
qb.setTabAlias(alias, originTableName, qualifiedTableName);
if (qb.isInsideView()) {
qb.getAliasInsideView().add(alias.toLowerCase());
}
qb.addAlias(alias);
qb.getParseInfo().setSrcForAlias(alias, tableTree);// if alias to CTE contains the table name, we do not do the translation because
// cte is actually a subquery.
if (!this.aliasToCTEs.containsKey(qualifiedTableName)) {
unparseTranslator.addTableNameTranslation(tableTree, catalogRegistry.getCurrentCatalog(), catalogRegistry.getCurrentDatabase());
if (aliasIndex != 0) {
unparseTranslator.addIdentifierTranslation(((HiveParserASTNode) (tabref.getChild(aliasIndex))));
}
}
return alias;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_gatherCTEReferences_rdh | // TODO: check view references, too
private void gatherCTEReferences(HiveParserQB qb, HiveParserBaseSemanticAnalyzer.CTEClause current) throws HiveException {
for (String alias : qb.getTabAliases()) {
String originTabName = qb.getOriginTabNameForAlias(alias);
String cteName = originTabName.toLowerCase();
HiveParserBaseSemanticAnalyzer.CTEClause v147 = findCTEFromName(qb, cteName);
if (v147 !=
null) {
if (ctesExpanded.contains(cteName)) {
throw new SemanticException(((((("Recursive cte " + cteName)
+ " detected (cycle: ") + StringUtils.join(ctesExpanded, " -> ")) + " -> ") + cteName) + ").");
}
v147.reference++;
current.parents.add(v147);
if (v147.qbExpr != null) {
continue;
}
v147.qbExpr = new HiveParserQBExpr(cteName);
doPhase1QBExpr(v147.cteNode, v147.qbExpr, qb.getId(), cteName);
ctesExpanded.add(cteName);
gatherCTEReferences(v147.qbExpr, v147);
ctesExpanded.remove(ctesExpanded.size() - 1);
}
}
for (String alias : qb.getSubqAliases()) {
gatherCTEReferences(qb.getSubqForAlias(alias), current);
}
} | 3.26 |
flink_HiveParserSemanticAnalyzer_doPhase1GetAllAggregations_rdh | // DFS-scan the expressionTree to find all aggregation subtrees and put them in aggregations.
private void doPhase1GetAllAggregations(HiveParserASTNode expressionTree, HashMap<String, HiveParserASTNode> aggregations, List<HiveParserASTNode> wdwFns) throws SemanticException {
int exprTokenType = expressionTree.getToken().getType();
if (exprTokenType == HiveASTParser.TOK_SUBQUERY_EXPR) {
// since now we have scalar subqueries we can get subquery expression in having
// we don't want to include aggregate from within subquery
return;
}
if (((exprTokenType == HiveASTParser.TOK_FUNCTION)
|| (exprTokenType ==
HiveASTParser.TOK_FUNCTIONDI))
|| (exprTokenType == HiveASTParser.TOK_FUNCTIONSTAR)) {
assert expressionTree.getChildCount() != 0;
if (expressionTree.getChild(expressionTree.getChildCount() - 1).getType() == HiveASTParser.TOK_WINDOWSPEC) {
// If it is a windowing spec, we include it in the list
// Further, we will examine its children AST nodes to check whether there are
// aggregation functions within
wdwFns.add(expressionTree);
doPhase1GetAllAggregations(((HiveParserASTNode) (expressionTree.getChild(expressionTree.getChildCount() - 1))), aggregations, wdwFns);
return;
}
if (expressionTree.getChild(0).getType() == HiveASTParser.Identifier) {
String functionName = unescapeIdentifier(expressionTree.getChild(0).getText());
SqlOperator sqlOperator = HiveParserUtils.getAnySqlOperator(functionName, frameworkConfig.getOperatorTable());
if (sqlOperator == null) {
throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(functionName));
}
if (FunctionRegistry.impliesOrder(functionName)) {
throw new SemanticException(ErrorMsg.MISSING_OVER_CLAUSE.getMsg(functionName));
}
if
(HiveParserUtils.isUDAF(sqlOperator))
{
if (containsLeadLagUDF(expressionTree)) {
throw new SemanticException(ErrorMsg.MISSING_OVER_CLAUSE.getMsg(functionName));}
aggregations.put(expressionTree.toStringTree(), expressionTree);
if (!HiveParserUtils.isNative(sqlOperator))
{
unparseTranslator.addIdentifierTranslation(((HiveParserASTNode) (expressionTree.getChild(0))));
}
return;
}
}
}
for (int i = 0; i < expressionTree.getChildCount(); i++) {
doPhase1GetAllAggregations(((HiveParserASTNode)
(expressionTree.getChild(i))), aggregations, wdwFns);
}
} | 3.26 |
flink_HiveParserSemanticAnalyzer_genAllExprNodeDesc_rdh | /**
* Generates all of the expression node descriptors for the expression and children of it passed
* in the arguments. This function uses the row resolver and the metadata information that are
* passed as arguments to resolve the column names to internal names.
*/
@SuppressWarnings("nls")
public Map<HiveParserASTNode, ExprNodeDesc> genAllExprNodeDesc(HiveParserASTNode expr, HiveParserRowResolver input, HiveParserTypeCheckCtx tcCtx) throws SemanticException {
// Create the walker and the rules dispatcher.
tcCtx.setUnparseTranslator(unparseTranslator);
Map<HiveParserASTNode, ExprNodeDesc> nodeOutputs = HiveParserTypeCheckProcFactory.genExprNode(expr,
tcCtx);
ExprNodeDesc desc = nodeOutputs.get(expr);
if (desc == null) {
String errMsg = tcCtx.getError();if (errMsg == null) {
errMsg = "Error in parsing ";
}
throw new SemanticException(errMsg);
}
if (desc instanceof HiveParserExprNodeColumnListDesc) {
throw new SemanticException("TOK_ALLCOLREF is not supported in current context");}
if (!unparseTranslator.isEnabled()) {
// Not creating a view, so no need to track view expansions.
return nodeOutputs;
}
Map<ExprNodeDesc, String> nodeToText = new HashMap<>();
List<HiveParserASTNode> fieldDescList = new ArrayList<>();
for (Map.Entry<HiveParserASTNode, ExprNodeDesc> v245 : nodeOutputs.entrySet()) {
if (!(v245.getValue() instanceof ExprNodeColumnDesc)) {
// we need to translate the ExprNodeFieldDesc too, e.g., identifiers in
// struct<>.
if (v245.getValue() instanceof ExprNodeFieldDesc) {
fieldDescList.add(v245.getKey());
}
continue;
}
HiveParserASTNode node
= v245.getKey();
ExprNodeColumnDesc columnDesc = ((ExprNodeColumnDesc) (v245.getValue()));
if ((columnDesc.getTabAlias() == null) || (columnDesc.getTabAlias().length() == 0)) {
// These aren't real column refs; instead, they are special
// internal expressions used in the representation of aggregation.
continue;
}
String[] tmp = input.reverseLookup(columnDesc.getColumn());
// in subquery case, tmp may be from outside.
if ((((tmp[0] != null) && (columnDesc.getTabAlias() != null)) && (!tmp[0].equals(columnDesc.getTabAlias()))) && (tcCtx.getOuterRR() != null)) {
tmp = tcCtx.getOuterRR().reverseLookup(columnDesc.getColumn());
}
StringBuilder replacementText = new StringBuilder();
replacementText.append(HiveUtils.unparseIdentifier(tmp[0], conf));
replacementText.append(".");
replacementText.append(HiveUtils.unparseIdentifier(tmp[1], conf));
nodeToText.put(columnDesc, replacementText.toString());
unparseTranslator.addTranslation(node, replacementText.toString());
}
for (HiveParserASTNode node : fieldDescList) {
Map<HiveParserASTNode, String> map = translateFieldDesc(node);
for (Entry<HiveParserASTNode, String> entry : map.entrySet()) {
unparseTranslator.addTranslation(entry.getKey(), entry.getValue());
}
}
return nodeOutputs;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_getExprNodeDescCached_rdh | // Find ExprNodeDesc for the expression cached in the HiveParserRowResolver. Returns null if not
// exists.
private ExprNodeDesc getExprNodeDescCached(HiveParserASTNode expr, HiveParserRowResolver input) throws SemanticException {
ColumnInfo colInfo = input.getExpression(expr);if (colInfo != null) {
HiveParserASTNode source = input.getExpressionSource(expr);
if (source != null) {
unparseTranslator.addCopyTranslation(expr, source);
}
return new ExprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName(), colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isSkewedCol());
}
return null;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_processLateralView_rdh | /**
* Given the AST with TOK_LATERAL_VIEW as the root, get the alias for the table or subquery in
* the lateral view and also make a mapping from the alias to all the lateral view AST's.
*/
private String processLateralView(HiveParserQB qb, HiveParserASTNode lateralView) throws SemanticException {
int numChildren = lateralView.getChildCount();
assert numChildren
== 2;
HiveParserASTNode next = ((HiveParserASTNode) (lateralView.getChild(1)));
String alias;
switch (next.getToken().getType()) {
case HiveASTParser.TOK_TABREF :
alias = processTable(qb, next);
break;
case HiveASTParser.TOK_SUBQUERY :
alias = processSubQuery(qb, next);
break;
case HiveASTParser.TOK_LATERAL_VIEW :
case HiveASTParser.TOK_LATERAL_VIEW_OUTER :
alias
= processLateralView(qb, next);
break;default :
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.LATERAL_VIEW_INVALID_CHILD, lateralView));
}
alias = alias.toLowerCase();
qb.getParseInfo().addLateralViewForAlias(alias, lateralView);
qb.addAlias(alias);
return alias;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_genValuesTempTable_rdh | // Generate a temp table out of a values clause.
// See also preProcessForInsert(HiveParserASTNode, HiveParserQB)
private HiveParserASTNode genValuesTempTable(HiveParserASTNode originalFrom, HiveParserQB qb) throws SemanticException {
// hive creates a temp table and writes the values data into it
// here we skip writing the data but remember the values data instead
// later calcite planner can generate LogicalValues from it
// Step 1, parse the values clause we were handed
List<? extends Node> fromChildren = originalFrom.getChildren();
// First child should be the virtual table ref
HiveParserASTNode virtualTableRef = ((HiveParserASTNode) (fromChildren.get(0)));
Preconditions.checkArgument(virtualTableRef.getToken().getType() == HiveASTParser.TOK_VIRTUAL_TABREF, "Expected first child of TOK_VIRTUAL_TABLE to be TOK_VIRTUAL_TABREF but was " + virtualTableRef.getName());
List<? extends Node> virtualTableRefChildren = virtualTableRef.getChildren();
// First child of this should be the table name. If it's anonymous,
// then we don't have a table name.
HiveParserASTNode tabName = ((HiveParserASTNode) (virtualTableRefChildren.get(0)));
if (tabName.getToken().getType() != HiveASTParser.TOK_ANONYMOUS) {
// TODO, if you want to make select ... from (values(...) as foo(...) work,
// you need to parse this list of columns names and build it into the table
throw new SemanticException(ErrorMsg.VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED.getMsg());
}
// The second child of the TOK_VIRTUAL_TABLE should be TOK_VALUES_TABLE
HiveParserASTNode valuesTable = ((HiveParserASTNode) (fromChildren.get(1)));
Preconditions.checkArgument(valuesTable.getToken().getType() == HiveASTParser.TOK_VALUES_TABLE, "Expected second child of TOK_VIRTUAL_TABLE to be TOK_VALUE_TABLE but was " + valuesTable.getName());
// Pick a name for the table
SessionState
ss = SessionState.get();
String tableName = (VALUES_TMP_TABLE_NAME_PREFIX + ss.getNextValuesTempTableSuffix()).toLowerCase();List<? extends Node> rows = valuesTable.getChildren();
List<List<String>> valuesData = new ArrayList<>(rows.size());
List<String> fieldsName = new ArrayList<>();
List<DataType> fieldsDataType = new ArrayList<>();
try {
boolean firstRow = true;for (Node n : rows) {
// Each of the children of TOK_VALUES_TABLE will be a TOK_VALUE_ROW
HiveParserASTNode row = ((HiveParserASTNode) (n));
Preconditions.checkArgument(row.getToken().getType() == HiveASTParser.TOK_VALUE_ROW, "Expected child of TOK_VALUE_TABLE to be TOK_VALUE_ROW but was " + row.getName());
// Each of the children of this should be a literal
List<? extends Node> columns = row.getChildren();
List<String> data = new ArrayList<>(columns.size());
int nextColNum = 1;
for (Node n1 : columns) {
HiveParserASTNode column = ((HiveParserASTNode) (n1));
if (firstRow) {
fieldsName.add("tmp_values_col" + (nextColNum++));
fieldsDataType.add(DataTypes.STRING());
}
data.add(unparseExprForValuesClause(column));
}
firstRow = false;
valuesData.add(data);
}
// Step 2, create a temp table to maintain table schema
ResolvedSchema v66 = ResolvedSchema.physical(fieldsName, fieldsDataType);
ResolvedCatalogTable tempTable = new ResolvedCatalogTable(CatalogTable.of(Schema.newBuilder().fromResolvedSchema(v66).build(), "values temp table", new ArrayList<>(), Collections.emptyMap()), v66);
// remember the data for this table
qb.getValuesTableToData().put(tableName, Tuple2.of(tempTable, valuesData));
} catch (Exception
e) {
throw new SemanticException("Failed to create temp table for VALUES", e);
}// Step 3, return a new subtree with a from clause built around that temp table
// The form of the tree is TOK_TABREF->TOK_TABNAME->identifier(tablename)
Token t = new ClassicToken(HiveASTParser.TOK_TABREF);
HiveParserASTNode tabRef = new HiveParserASTNode(t);
t = new ClassicToken(HiveASTParser.TOK_TABNAME);
HiveParserASTNode tabNameNode = new HiveParserASTNode(t);
tabRef.addChild(tabNameNode);
t = new ClassicToken(HiveASTParser.Identifier, tableName);
HiveParserASTNode identifier = new HiveParserASTNode(t);
tabNameNode.addChild(identifier);
return tabRef;
} | 3.26 |
flink_HiveParserSemanticAnalyzer_processJoin_rdh | /**
* Given the AST with TOK_JOIN as the root, get all the aliases for the tables or subqueries in
* the join.
*/
@SuppressWarnings("nls")
private void processJoin(HiveParserQB qb, HiveParserASTNode join) throws SemanticException {
int numChildren = join.getChildCount();
if (((numChildren != 2) && (numChildren != 3)) && (join.getToken().getType() != HiveASTParser.TOK_UNIQUEJOIN)) {
throw new SemanticException(HiveParserUtils.generateErrorMessage(join, "Join with multiple children"));
}
queryProperties.incrementJoinCount(HiveParserUtils.isOuterJoinToken(join));
for (int num = 0; num < numChildren; num++) {
HiveParserASTNode child = ((HiveParserASTNode) (join.getChild(num)));
if (child.getToken().getType() == HiveASTParser.TOK_TABREF) {
processTable(qb, child);
} else if (child.getToken().getType() ==
HiveASTParser.TOK_SUBQUERY) {
processSubQuery(qb, child);
} else if (child.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION) {queryProperties.setHasPTF(true);
processPTF(qb, child);
HiveParserPTFInvocationSpec ptfInvocationSpec = qb.getPTFInvocationSpec(child);
String inputAlias = (ptfInvocationSpec == null) ? null : ptfInvocationSpec.getFunction().getAlias();
if (inputAlias == null) {
throw new SemanticException(HiveParserUtils.generateErrorMessage(child, "PTF invocation in a Join must have an alias"));
}
} else if ((child.getToken().getType() ==
HiveASTParser.TOK_LATERAL_VIEW) || (child.getToken().getType() == HiveASTParser.TOK_LATERAL_VIEW_OUTER)) {
// SELECT * FROM src1 LATERAL VIEW udtf() AS myTable JOIN src2 ...
// is not supported. Instead, the lateral view must be in a subquery
// SELECT * FROM (SELECT * FROM src1 LATERAL VIEW udtf() AS myTable) a
// JOIN src2 ...
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.LATERAL_VIEW_WITH_JOIN, join));
} else if (HiveParserUtils.isJoinToken(child)) {
processJoin(qb, child);
}}
} | 3.26 |
flink_HiveParserSemanticAnalyzer_genExprNodeDesc_rdh | // Generates an expression node descriptor for the expression with HiveParserTypeCheckCtx.
public ExprNodeDesc genExprNodeDesc(HiveParserASTNode expr, HiveParserRowResolver input) throws SemanticException {
// Since the user didn't supply a customized type-checking context,
// use default settings.
return
genExprNodeDesc(expr, input, true, false);
} | 3.26 |
flink_PipelinedSubpartition_getBuffersInBacklogUnsafe_rdh | /**
* Gets the number of non-event buffers in this subpartition.
*/
@SuppressWarnings("FieldAccessNotGuarded")
@Override
public int getBuffersInBacklogUnsafe() {
if (isBlocked || buffers.isEmpty()) {
return 0;
}
if ((flushRequested || isFinished) || (!checkNotNull(buffers.peekLast()).getBufferConsumer().isBuffer())) {
return buffersInBacklog;} else {
return Math.max(buffersInBacklog - 1, 0);
}
} | 3.26 |
flink_PipelinedSubpartition_getChannelStateFuture_rdh | /**
* for testing only.
*/
// suppress this warning as it is only for testing.
@SuppressWarnings("FieldAccessNotGuarded")
@VisibleForTesting
CompletableFuture<List<Buffer>> getChannelStateFuture() {
return channelStateFuture;
} | 3.26 |
flink_PipelinedSubpartition_getNumberOfQueuedBuffers_rdh | // ------------------------------------------------------------------------
@Override
public int getNumberOfQueuedBuffers() {
synchronized(buffers) {
return buffers.size();
}
} | 3.26 |
flink_PipelinedSubpartition_needNotifyPriorityEvent_rdh | // It is just called after add priorityEvent.
@GuardedBy("buffers")
private boolean needNotifyPriorityEvent() {
assert Thread.holdsLock(buffers);
// if subpartition is blocked then downstream doesn't expect any notifications
return (buffers.getNumPriorityElements() == 1) && (!isBlocked);
} | 3.26 |
flink_PipelinedSubpartition_increaseBuffersInBacklog_rdh | /**
* Increases the number of non-event buffers by one after adding a non-event buffer into this
* subpartition.
*/
@GuardedBy("buffers")
private void increaseBuffersInBacklog(BufferConsumer buffer)
{
assert Thread.holdsLock(buffers);
if ((buffer != null) && buffer.isBuffer()) {
buffersInBacklog++;
}
} | 3.26 |
flink_PipelinedSubpartition_getNextBuffer_rdh | /**
* for testing only.
*/
@VisibleForTesting
BufferConsumerWithPartialRecordLength getNextBuffer() {
return buffers.poll();
} | 3.26 |
flink_PipelinedSubpartition_toString_rdh | // ------------------------------------------------------------------------
@Override
public String toString() {
final long numBuffers;
final long numBytes;final boolean v36;
final boolean hasReadView;
synchronized(buffers) {
numBuffers = getTotalNumberOfBuffersUnsafe();
numBytes = m1();
v36 = isFinished;
hasReadView = readView != null;
}
return String.format("%s#%d [number of buffers: %d (%d bytes), number of buffers in backlog: %d, finished? %s, read view? %s]", this.getClass().getSimpleName(), getSubPartitionIndex(), numBuffers, numBytes, getBuffersInBacklogUnsafe(), v36, hasReadView);
} | 3.26 |
flink_JoinOperator_projectTuple18_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> ProjectJoin<I1, I2, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> projectTuple18() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> tType = new TupleTypeInfo<Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(fTypes);
return new ProjectJoin<I1, I2, Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16,
T17>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple4_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3> ProjectJoin<I1, I2, Tuple4<T0, T1, T2, T3>> projectTuple4() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes);
return new ProjectJoin<I1, I2, Tuple4<T0, T1, T2, T3>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple8_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7> ProjectJoin<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes);
return new ProjectJoin<I1, I2, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple25_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> ProjectJoin<I1, I2, Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> projectTuple25() {
TypeInformation<?>[] fTypes
= extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> tType = new TupleTypeInfo<Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(fTypes);
return new ProjectJoin<I1, I2, Tuple25<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple14_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10, T11, T12, T13> ProjectJoin<I1, I2, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>> projectTuple14() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
T12, T13>> tType = new TupleTypeInfo<Tuple14<T0,
T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(fTypes);return new ProjectJoin<I1, I2, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple10_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> ProjectJoin<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> projectTuple10() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9>>
tType = new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes);
return new ProjectJoin<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple21_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> ProjectJoin<I1, I2, Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> projectTuple21() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> tType = new TupleTypeInfo<Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(fTypes);
return new ProjectJoin<I1, I2, Tuple21<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple24_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> ProjectJoin<I1, I2, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>> projectTuple24() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>> tType = new
TupleTypeInfo<Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(fTypes);
return new ProjectJoin<I1, I2, Tuple24<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this);
} | 3.26 |
flink_JoinOperator_projectTuple9_rdh | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/public <T0, T1, T2, T3, T4,
T5, T6, T7, T8> ProjectJoin<I1, I2, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> projectTuple9() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> v64 = new TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6,
T7, T8>>(fTypes);
return new ProjectJoin<I1, I2, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, v64, this);
} | 3.26 |
flink_JoinOperator_projectSecond_rdh | /**
* Continues a ProjectJoin transformation and adds fields of the second join input.
*
* <p>If the second join input is a {@link Tuple} {@link DataSet}, fields can be selected by
* their index. If the second join input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectFirst(int...)} and
* {@link org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectSecond(int...)}.
*
* @param secondFieldIndexes
* If the second input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended JoinProjection.
* @see Tuple
* @see DataSet
*/
protected JoinProjection<I1, I2> projectSecond(int... secondFieldIndexes) {
boolean isSecondTuple;
isSecondTuple = (ds2.getType() instanceof
TupleTypeInfo) && (secondFieldIndexes.length >
0);
if ((!isSecondTuple) && (secondFieldIndexes.length != 0)) {
// field index provided for non-Tuple input
throw new IllegalArgumentException("Input is not a Tuple. Call projectSecond() without arguments to include it.");
} else if (secondFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException("You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isSecondTuple) {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + secondFieldIndexes.length);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + secondFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs2;
for (int i = 0; i < secondFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(secondFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = false;
this.fieldIndexes[offset + i] = secondFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst = Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = false;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.