name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SubtaskStateStats_m0_rdh | /**
* Returns the size of the checkpointed state at this subtask.
*
* @return Checkpoint state size of the sub task.
*/
public long m0() {
return stateSize;
} | 3.26 |
flink_SubtaskStateStats_getPersistedData_rdh | /**
*
* @return the total number of persisted bytes during the checkpoint.
*/
public long getPersistedData() {
return persistedData;
} | 3.26 |
flink_SubtaskStateStats_getEndToEndDuration_rdh | /**
* Computes the duration since the given trigger timestamp.
*
* <p>If the trigger timestamp is greater than the ACK timestamp, this returns <code>0</code>.
*
* @param triggerTimestamp
* Trigger timestamp of the checkpoint.
* @return Duration since the given trigger timestamp.
*/
public long getEndToEndDuration(long triggerTimestamp) {
return Math.max(0, ackTimestamp - triggerTimestamp);
} | 3.26 |
flink_SubtaskStateStats_getProcessedData_rdh | /**
*
* @return the total number of processed bytes during the checkpoint.
*/
public long getProcessedData() {
return processedData;
} | 3.26 |
flink_SubtaskStateStats_getCheckpointedSize_rdh | /**
* Returns the incremental state size.
*
* @return The incremental state size.
*/
public long getCheckpointedSize() {
return checkpointedSize;
} | 3.26 |
flink_SubtaskStateStats_getAckTimestamp_rdh | /**
* Returns the timestamp when the acknowledgement of this subtask was received at the
* coordinator.
*
* @return ACK timestamp at the coordinator.
*/
public long getAckTimestamp() {
return
ackTimestamp;
} | 3.26 |
flink_AbstractHeapPriorityQueue_clear_rdh | /**
* Clears the queue.
*/
public void clear() {
final int arrayOffset = getHeadElementIndex();
Arrays.fill(queue, arrayOffset, arrayOffset + size, null);
size = 0;
} | 3.26 |
flink_AbstractHeapPriorityQueue_iterator_rdh | /**
* Returns an iterator over the elements in this queue. The iterator does not return the
* elements in any particular order.
*
* @return an iterator over the elements in this queue.
*/
@Nonnull
@Override
public CloseableIterator<T> iterator() {
return new HeapIterator();
} | 3.26 |
flink_GatewayRetriever_getNow_rdh | /**
* Returns the currently retrieved gateway if there is such an object. Otherwise it returns an
* empty optional.
*
* @return Optional object to retrieve
*/
default Optional<T> getNow() {
CompletableFuture<T> leaderFuture = getFuture();
if (leaderFuture != null) {
if (leaderFuture.isCompletedExceptionally() || leaderFuture.isCancelled()) { return Optional.empty();
} else if (leaderFuture.isDone()) {
try {
return Optional.of(leaderFuture.get());
} catch (Exception e) {
// this should never happen
throw
new FlinkRuntimeException("Unexpected error while accessing the retrieved gateway.", e);
}
} else {
return Optional.empty();
}
} else {
return Optional.empty();
}
} | 3.26 |
flink_TimeIntervalTypeInfo_hashCode_rdh | // ----------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return Objects.hash(clazz, serializer, comparatorClass);
} | 3.26 |
flink_CoreOptions_fileSystemConnectionLimit_rdh | /**
* The total number of input plus output connections that a file system for the given scheme may
* open. Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimit(String
scheme) {
return ConfigOptions.key(("fs." + scheme) + ".limit.total").intType().defaultValue(-1);
} | 3.26 |
flink_CoreOptions_fileSystemConnectionLimitTimeout_rdh | /**
* If any connection limit is configured, this option can be optionally set to define after
* which time (in milliseconds) stream opening fails with a timeout exception, if no stream
* connection becomes available. Unlimited timeout be default.
*/
public static ConfigOption<Long> fileSystemConnectionLimitTimeout(String scheme) {
return ConfigOptions.key(("fs." + scheme)
+ ".limit.timeout").longType().defaultValue(0L);
} | 3.26 |
flink_CoreOptions_fileSystemConnectionLimitOut_rdh | /**
* The total number of output connections that a file system for the given scheme may open.
* Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimitOut(String scheme) {
return ConfigOptions.key(("fs." + scheme) + ".limit.output").intType().defaultValue(-1);
} | 3.26 |
flink_CoreOptions_fileSystemConnectionLimitIn_rdh | /**
* The total number of input connections that a file system for the given scheme may open.
* Unlimited be default.
*/
public static ConfigOption<Integer> fileSystemConnectionLimitIn(String
scheme) {
return ConfigOptions.key(("fs." + scheme) + ".limit.input").intType().defaultValue(-1);
} | 3.26 |
flink_CoreOptions_fileSystemConnectionLimitStreamInactivityTimeout_rdh | /**
* If any connection limit is configured, this option can be optionally set to define after
* which time (in milliseconds) inactive streams are reclaimed. This option can help to prevent
* that inactive streams make up the full pool of limited connections, and no further
* connections can be established. Unlimited timeout be default.
*/
public static ConfigOption<Long> fileSystemConnectionLimitStreamInactivityTimeout(String scheme) {
return ConfigOptions.key(("fs." + scheme) + ".limit.stream-timeout").longType().defaultValue(0L);
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_getPrioritizedRawOperatorState_rdh | /**
* Returns an immutable list with all alternative snapshots to restore the raw operator state,
* in the order in which we should attempt to restore.
*/
@Nonnull
public List<StateObjectCollection<OperatorStateHandle>> getPrioritizedRawOperatorState() {
return prioritizedRawOperatorState;
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_getPrioritizedManagedOperatorState_rdh | // -----------------------------------------------------------------------------------------------------------------
/**
* Returns an immutable list with all alternative snapshots to restore the managed operator
* state, in the order in which we should attempt to restore.
*/@Nonnull
public List<StateObjectCollection<OperatorStateHandle>> getPrioritizedManagedOperatorState() {
return prioritizedManagedOperatorState;
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_resolvePrioritizedAlternatives_rdh | /**
* This helper method resolves the dependencies between the ground truth of the operator
* state obtained from the job manager and potential alternatives for recovery, e.g. from a
* task-local source.
*/
<T extends StateObject> List<StateObjectCollection<T>> resolvePrioritizedAlternatives(StateObjectCollection<T> jobManagerState, List<StateObjectCollection<T>> alternativesByPriority, BiFunction<T, T, Boolean> approveFun) {
// Nothing to resolve if there are no alternatives, or the ground truth has already no
// state, or if we can assume that a rescaling happened because we find more than one
// handle in the JM state
// (this is more a sanity check).
if ((((alternativesByPriority == null) || alternativesByPriority.isEmpty()) || (!jobManagerState.hasState())) || (jobManagerState.size() != 1)) {
return Collections.singletonList(jobManagerState);
}
// As we know size is == 1
T reference = jobManagerState.iterator().next();
// This will contain the end result, we initialize it with the potential max. size.
List<StateObjectCollection<T>> approved = new ArrayList<>(1 + alternativesByPriority.size());
for (StateObjectCollection<T> alternative : alternativesByPriority) {
// We found an alternative to the JM state if it has state, we have a 1:1
// relationship, and the approve-function signaled true.
if ((((alternative != null) && alternative.hasState()) && (alternative.size() == 1)) && BooleanUtils.isTrue(approveFun.apply(reference, alternative.iterator().next()))) {
approved.add(alternative);
}
}
// Of course we include the ground truth as last alternative.
approved.add(jobManagerState);
return Collections.unmodifiableList(approved);
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_getJobManagerManagedOperatorState_rdh | // -----------------------------------------------------------------------------------------------------------------
/**
* Returns the managed operator state from the job manager, which represents the ground truth
* about what this state should represent. This is the alternative with lowest priority.
*/
@Nonnull
public StateObjectCollection<OperatorStateHandle> getJobManagerManagedOperatorState() {
return lastElement(prioritizedManagedOperatorState);
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_computePrioritizedAlternatives_rdh | /**
* This method creates an alternative recovery option by replacing as much job manager state
* with higher prioritized (=local) alternatives as possible.
*
* @param jobManagerState
* the state that the task got assigned from the job manager (this
* state lives in remote storage).
* @param alternativesByPriority
* local alternatives to the job manager state, ordered by
* priority.
* @param identityExtractor
* function to extract an identifier from a state object.
* @return prioritized state alternatives.
* @param <STATE_OBJ_TYPE>
* the type of the state objects we process.
* @param <ID_TYPE>
* the type of object that represents the id the state object type.
*/
<STATE_OBJ_TYPE extends StateObject, ID_TYPE> List<StateObjectCollection<STATE_OBJ_TYPE>> computePrioritizedAlternatives(StateObjectCollection<STATE_OBJ_TYPE> jobManagerState, List<StateObjectCollection<STATE_OBJ_TYPE>> alternativesByPriority, Function<STATE_OBJ_TYPE, ID_TYPE> identityExtractor) {
if (((alternativesByPriority != null) && (!alternativesByPriority.isEmpty())) && jobManagerState.hasState()) {
Optional<StateObjectCollection<STATE_OBJ_TYPE>> mergedAlternative = tryComputeMixedLocalAndRemoteAlternative(jobManagerState, alternativesByPriority, identityExtractor);
// Return the mix of local/remote state as first and pure remote state as second
// alternative (in case that we fail to recover from the local state, e.g. because
// of corruption).
if (mergedAlternative.isPresent()) {
return Arrays.asList(mergedAlternative.get(), jobManagerState);
}
}
return Collections.singletonList(jobManagerState);
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_isRestored_rdh | // -----------------------------------------------------------------------------------------------------------------
/**
* Returns true if this was created for a restored operator, false otherwise. Restored operators
* are operators that participated in a previous checkpoint, even if they did not emit any state
* snapshots.
*/
public boolean isRestored() {
return restoredCheckpointId != null;
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_getPrioritizedRawKeyedState_rdh | /**
* Returns an immutable list with all alternative snapshots to restore the raw keyed state, in
* the order in which we should attempt to restore.
*/
@Nonnull public List<StateObjectCollection<KeyedStateHandle>> getPrioritizedRawKeyedState() {
return prioritizedRawKeyedState;
} | 3.26 |
flink_PrioritizedOperatorSubtaskState_m0_rdh | /**
* Returns the managed keyed state from the job manager, which represents the ground truth about
* what this state should represent. This is the alternative with lowest priority.
*/
@Nonnull
public StateObjectCollection<KeyedStateHandle> m0() {
return lastElement(prioritizedManagedKeyedState);
} | 3.26 |
flink_HadoopRecoverableFsDataOutputStream_waitUntilLeaseIsRevoked_rdh | /**
* Called when resuming execution after a failure and waits until the lease of the file we are
* resuming is free.
*
* <p>The lease of the file we are resuming writing/committing to may still belong to the
* process that failed previously and whose state we are recovering.
*
* @param path
* The path to the file we want to resume writing to.
*/
private static boolean waitUntilLeaseIsRevoked(final FileSystem fs, final Path path) throws IOException {
Preconditions.checkState(fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = ((DistributedFileSystem) (fs));
dfs.recoverLease(path);
final Deadline deadline = Deadline.now().plus(Duration.ofMillis(f0));
boolean isClosed = dfs.isFileClosed(path);while ((!isClosed) && deadline.hasTimeLeft()) {
try {
Thread.sleep(500L);
} catch (InterruptedException e1) {
throw new IOException("Recovering the lease failed: ", e1);
}
isClosed = dfs.isFileClosed(path);
}
return isClosed;
} | 3.26 |
flink_HadoopRecoverableFsDataOutputStream_revokeLeaseByFileSystem_rdh | /**
* Resolve the real path of FileSystem if it is {@link ViewFileSystem} and revoke the lease of
* the file we are resuming with different FileSystem.
*
* @param path
* The path to the file we want to resume writing to.
*/
private static boolean revokeLeaseByFileSystem(final FileSystem fs, final Path
path) throws IOException {
if (fs instanceof ViewFileSystem) {
final ViewFileSystem vfs = ((ViewFileSystem) (fs));
final Path resolvePath = vfs.resolvePath(path);
final FileSystem resolveFs = resolvePath.getFileSystem(fs.getConf());
return waitUntilLeaseIsRevoked(resolveFs, resolvePath);
}
return waitUntilLeaseIsRevoked(fs, path);
} | 3.26 |
flink_HadoopRecoverableFsDataOutputStream_safelyTruncateFile_rdh | // ------------------------------------------------------------------------
// Reflection utils for truncation
// These are needed to compile against Hadoop versions before
// Hadoop 2.7, which have no truncation calls for HDFS.
// ------------------------------------------------------------------------
private static void safelyTruncateFile(final FileSystem fileSystem, final Path path, final HadoopFsRecoverable recoverable) throws IOException {
ensureTruncateInitialized();
revokeLeaseByFileSystem(fileSystem, path);
// truncate back and append
boolean truncated;
try {
truncated = truncate(fileSystem, path, recoverable.offset());
} catch (Exception e) {
throw new IOException("Problem while truncating file: " + path, e);
}
if (!truncated) {
// Truncate did not complete immediately, we must wait for
// the operation to complete and release the lease.
revokeLeaseByFileSystem(fileSystem, path);
}
} | 3.26 |
flink_CoGroupWithSolutionSetSecondDriver_m0_rdh | // --------------------------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
public void m0() throws Exception {
final TypeComparator<IT2> solutionSetComparator;
// grab a handle to the hash table from the iteration broker
if (taskContext instanceof AbstractIterativeTask) {
AbstractIterativeTask<?, ?> v2
= ((AbstractIterativeTask<?, ?>) (taskContext));
String v3 = v2.brokerKey();
Object table = SolutionSetBroker.instance().get(v3);
if (table instanceof CompactingHashTable) {
this.hashTable = ((CompactingHashTable<IT2>) (table));
f0 = this.hashTable.getBuildSideSerializer();
solutionSetComparator = this.hashTable.getBuildSideComparator().duplicate();
} else if (table instanceof JoinHashMap) {
this.objectMap = ((JoinHashMap<IT2>) (table));
f0 = this.objectMap.getBuildSerializer();
solutionSetComparator = this.objectMap.getBuildComparator().duplicate();
} else {
throw new RuntimeException("Unrecognized solution set index: " + table);
}} else
{
throw new Exception("The task context of this driver is no iterative task context.");}
TaskConfig config = taskContext.getTaskConfig();
ClassLoader classLoader = taskContext.getUserCodeClassLoader();
TypeComparatorFactory<IT1> probeSideComparatorFactory = config.getDriverComparator(0, classLoader);
this.probeSideSerializer = taskContext.<IT1>getInputSerializer(0).getSerializer();
this.probeSideComparator = probeSideComparatorFactory.createComparator();
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (objectReuseEnabled) {
solutionSideRecord = f0.createInstance();
} TypePairComparatorFactory<IT1, IT2> factory = taskContext.getTaskConfig().getPairComparatorFactory(taskContext.getUserCodeClassLoader());pairComparator = factory.createComparator12(this.probeSideComparator, solutionSetComparator);
} | 3.26 |
flink_CoGroupWithSolutionSetSecondDriver_setup_rdh | // --------------------------------------------------------------------------------------------
@Override
public void setup(TaskContext<CoGroupFunction<IT1, IT2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
} | 3.26 |
flink_ExecutionFailureHandler_getFailureHandlingResult_rdh | /**
* Return result of failure handling. Can be a set of task vertices to restart and a delay of
* the restarting. Or that the failure is not recoverable and the reason for it.
*
* @param failedExecution
* is the failed execution
* @param cause
* of the task failure
* @param timestamp
* of the task failure
* @return result of the failure handling
*/
public FailureHandlingResult getFailureHandlingResult(Execution failedExecution, Throwable cause, long timestamp) {
return handleFailure(failedExecution, cause, timestamp, failoverStrategy.getTasksNeedingRestart(failedExecution.getVertex().getID(), cause), false);
} | 3.26 |
flink_ExecutionFailureHandler_getGlobalFailureHandlingResult_rdh | /**
* Return result of failure handling on a global failure. Can be a set of task vertices to
* restart and a delay of the restarting. Or that the failure is not recoverable and the reason
* for it.
*
* @param cause
* of the task failure
* @param timestamp
* of the task failure
* @return result of the failure handling
*/
public FailureHandlingResult getGlobalFailureHandlingResult(final Throwable cause, long timestamp) {
return handleFailure(null, cause, timestamp, IterableUtils.toStream(schedulingTopology.getVertices()).map(SchedulingExecutionVertex::getId).collect(Collectors.toSet()), true);
} | 3.26 |
flink_BooleanValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_AnswerFormatter_format_rdh | /**
* TPC-DS answer set has three kind of formats, recognize them and convert to unified format.
*
* @param originFile
* origin answer set file from TPC-DS.
* @param destFile
* file to save formatted answer set.
* @throws Exception
*/
private static void format(File originFile, File destFile) throws Exception {
BufferedReader reader = new BufferedReader(new FileReader(originFile));
BufferedWriter writer = new BufferedWriter(new FileWriter(destFile));
String line;
List<Integer> colLengthList;
List<String> content = new ArrayList<>();
while ((line = reader.readLine()) != null) {
content.add(line);
}
if (isFormat1(content)) {
colLengthList = Arrays.stream(content.get(1).split(REGEX_SPLIT_BAR)).map(col -> col.length()).collect(Collectors.toList());
writeContent(writer, content, colLengthList);
} else if (isFormat2(content)) {
colLengthList = Arrays.stream(content.get(1).split(f0)).map(col -> col.length()).collect(Collectors.toList());
writeContent(writer, content, colLengthList);
} else {writeContent(writer, content, null);
}
reader.close();
writer.close();
} | 3.26 |
flink_PythonCsvUtils_createCsvBulkWriterFactory_rdh | /**
* Util for creating a {@link BulkWriter.Factory} that wraps {@link CsvBulkWriter#forSchema}.
*/
public static BulkWriter.Factory<RowData> createCsvBulkWriterFactory(CsvSchema schema, DataType physicalDataType) {
return CsvFileFormatFactory.createCsvBulkWriterFactory(schema, ((RowType) (physicalDataType.getLogicalType())));
} | 3.26 |
flink_PythonCsvUtils_createRowDataToCsvFormatConverterContext_rdh | /**
* Util for creating a {@link RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext}.
*/
public static RowDataToCsvFormatConverterContext createRowDataToCsvFormatConverterContext(CsvMapper mapper, ContainerNode<?> container) {
return new RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext(mapper, container);
} | 3.26 |
flink_PythonCsvUtils_createCsvReaderFormat_rdh | /**
* Util for creating a {@link CsvReaderFormat}.
*/
public static CsvReaderFormat<Object> createCsvReaderFormat(CsvSchema schema, DataType dataType) {
Preconditions.checkArgument(dataType.getLogicalType() instanceof
RowType);
return new CsvReaderFormat<>(CsvMapper::new, ignored -> schema, JsonNode.class, new CsvToRowDataConverters(false).createRowConverter(LogicalTypeUtils.toRowType(dataType.getLogicalType()), true), InternalTypeInfo.of(dataType.getLogicalType()), false);
} | 3.26 |
flink_SubtaskConnectionDescriptor_getInputSubtaskIndex_rdh | // ------------------------------------------------------------------------
public int getInputSubtaskIndex() {
return inputSubtaskIndex;
} | 3.26 |
flink_SubtaskConnectionDescriptor_write_rdh | // ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
@Override
public void write(DataOutputView out) {
throw new UnsupportedOperationException("This method should never be called");
} | 3.26 |
flink_DataSourceTask_initOutputs_rdh | /**
* Creates a writer for each output. Creates an OutputCollector which forwards its input to all
* writers. The output collector applies the configured shipping strategy.
*/
private void initOutputs(UserCodeClassLoader cl) throws Exception {
this.chainedTasks = new ArrayList<ChainedDriver<?, ?>>();
this.eventualOutputs = new ArrayList<RecordWriter<?>>();
this.output = BatchTask.initOutputs(this, cl, this.config, this.chainedTasks, this.eventualOutputs, getExecutionConfig(), getEnvironment().getAccumulatorRegistry().getUserMap());
} | 3.26 |
flink_DataSourceTask_initInputFormat_rdh | /**
* Initializes the InputFormat implementation and configuration.
*
* @throws RuntimeException
* Throws if instance of InputFormat implementation can not be
* obtained.
*/
private void
initInputFormat() {
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
// obtain task configuration (including stub parameters)
Configuration taskConf = getTaskConfiguration();
this.config = new TaskConfig(taskConf);
final Pair<OperatorID, InputFormat<OT, InputSplit>> operatorIdAndInputFormat;
InputOutputFormatContainer formatContainer
= new InputOutputFormatContainer(config, userCodeClassLoader);
try {
operatorIdAndInputFormat = formatContainer.getUniqueInputFormat();
this.format = operatorIdAndInputFormat.getValue();
// check if the class is a subclass, if the check is required
if (!InputFormat.class.isAssignableFrom(this.format.getClass())) {
throw new RuntimeException(((("The class '" + this.format.getClass().getName()) + "' is not a subclass of '") + InputFormat.class.getName()) + "' as is required.");
}
} catch (ClassCastException ccex) {
throw new RuntimeException("The stub class is not a proper subclass of " + InputFormat.class.getName(), ccex);
}
Thread thread = Thread.currentThread();
ClassLoader original = thread.getContextClassLoader();
// configure the stub. catch exceptions here extra, to report them as originating from the
// user code
try {
thread.setContextClassLoader(userCodeClassLoader);
this.format.configure(formatContainer.getParameters(operatorIdAndInputFormat.getKey()));
} catch (Throwable t) {
throw new RuntimeException("The user defined 'configure()' method caused an error: " + t.getMessage(), t);
} finally {
thread.setContextClassLoader(original);
}
// get the factory for the type serializer
this.serializerFactory = this.config.getOutputSerializer(userCodeClassLoader);
} | 3.26 |
flink_DataSourceTask_getLogString_rdh | /**
* Utility function that composes a string for logging purposes. The string includes the given
* message and the index of the task in its task group together with the number of tasks in the
* task group.
*
* @param message
* The main message for the log.
* @param taskName
* The name of the task.
* @return The string ready for logging.
*/
private String getLogString(String message, String taskName) {
return BatchTask.constructLogString(message, taskName, this);
} | 3.26 |
flink_BlobCacheService_setBlobServerAddress_rdh | /**
* Sets the address of the {@link BlobServer}.
*
* @param blobServerAddress
* address of the {@link BlobServer}.
*/
public void setBlobServerAddress(InetSocketAddress blobServerAddress) {
permanentBlobCache.setBlobServerAddress(blobServerAddress);
transientBlobCache.setBlobServerAddress(blobServerAddress);
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_getPartitionSpec_rdh | // get partition metadata
public static Map<String, String> getPartitionSpec(HiveParserASTNode ast) {
HiveParserASTNode partNode = null;
// if this ast has only one child, then no partition spec specified.
if (ast.getChildCount() == 1) {
return null;
}
// if ast has two children
// the 2nd child could be partition spec or columnName
// if the ast has 3 children, the second *has to* be partition spec
if ((ast.getChildCount() > 2) &&
(ast.getChild(1).getType() != HiveASTParser.TOK_PARTSPEC)) {
throw new ValidationException(ast.getChild(1).getType() + " is not a partition specification");
}
if (ast.getChild(1).getType() == HiveASTParser.TOK_PARTSPEC) {
partNode = ((HiveParserASTNode) (ast.getChild(1)));
}if
(partNode != null) {
return getPartSpec(partNode);
}
return null;
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_getFullyQualifiedName_rdh | // generate a name of the form a.b.c
public static String getFullyQualifiedName(HiveParserASTNode ast) {
if (ast.getChildCount() == 0) {
return ast.getText();
} else if (ast.getChildCount() == 2) {
return (getFullyQualifiedName(((HiveParserASTNode) (ast.getChild(0)))) + ".") + getFullyQualifiedName(((HiveParserASTNode) (ast.getChild(1))));} else if (ast.getChildCount() == 3) {
return (((getFullyQualifiedName(((HiveParserASTNode) (ast.getChild(0))))
+ ".") + getFullyQualifiedName(((HiveParserASTNode) (ast.getChild(1))))) + ".") + getFullyQualifiedName(((HiveParserASTNode) (ast.getChild(2))));
}
else {
return null;
}
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_validatePartitionValues_rdh | /**
* Certain partition values are are used by hive. e.g. the default partition in dynamic
* partitioning and the intermediate partition values used in the archiving process. Naturally,
* prohibit the user from creating partitions with these reserved values. The check that this
* function is more restrictive than the actual limitation, but it's simpler. Should be okay
* since the reserved names are fairly long and uncommon.
*/
private void validatePartitionValues(Map<String, String> partSpec) {
for (Map.Entry<String, String> e : partSpec.entrySet()) {
for (String s : reservedPartitionValues) {
String value = e.getValue();
if ((value != null) && value.contains(s))
{
throw new ValidationException(ErrorMsg.RESERVED_PART_VAL.getMsg(((("(User value: " + e.getValue()) + " Reserved substring: ") + s) + ")"));
}
}}
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_getColPath_rdh | // get the column path
// return column name if exists, column could be DOT separated.
// example: lintString.$elem$.myint
// return table name for column name if no column has been specified.
public static String getColPath(HiveParserASTNode node, String dbName, String tableName, Map<String, String> partSpec) {
// if this ast has only one child, then no column name specified.
if (node.getChildCount() == 1) {
return tableName;
}
HiveParserASTNode columnNode = null;
// Second child node could be partitionspec or column
if (node.getChildCount() > 1) {
if (partSpec == null)
{
columnNode = ((HiveParserASTNode) (node.getChild(1)));
} else {
columnNode = ((HiveParserASTNode)
(node.getChild(2)));
}
}
if (columnNode != null) {
if (dbName == null) {
return (tableName + ".") + QualifiedNameUtil.getFullyQualifiedName(columnNode);
} else {
return (tableName.substring(dbName.length() + 1) + ".") + QualifiedNameUtil.getFullyQualifiedName(columnNode);
}
} else {
return tableName;
}
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_getPartitionSpecs_rdh | // Get the partition specs from the tree
private List<Map<String, String>> getPartitionSpecs(CommonTree ast) {
List<Map<String, String>> partSpecs = new ArrayList<>();
// get partition metadata if partition specified
for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) {
HiveParserASTNode v202 = ((HiveParserASTNode) (ast.getChild(childIndex)));
// sanity check
if (v202.getType() == HiveASTParser.TOK_PARTSPEC) {
Map<String, String> partSpec = getPartSpec(v202);
partSpecs.add(partSpec);
}
}
return partSpecs;
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_convertAlterTableAddParts_rdh | /**
* Add one or more partitions to a table. Useful when the data has been copied to the right
* location by some other process.
*/
private Operation
convertAlterTableAddParts(String[] qualified, CommonTree ast) {
// ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists?
// alterStatementSuffixAddPartitionsElement+)
boolean ifNotExists = ast.getChild(0).getType() == HiveASTParser.TOK_IFNOTEXISTS;
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
boolean isView = tab.isView();
m0(tab);
int numCh = ast.getChildCount();
int start = (ifNotExists) ? 1 : 0;
String currentLocation = null;
Map<String, String> currentPartSpec = null;
// Parser has done some verification, so the order of tokens doesn't need to be verified
// here.
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
for (int num = start; num < numCh;
num++) {
HiveParserASTNode child = ((HiveParserASTNode) (ast.getChild(num)));
switch (child.getToken().getType()) {
case HiveASTParser.TOK_PARTSPEC :
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> v268 = new HashMap<>();
if (currentLocation != null) {
v268.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new
CatalogPartitionImpl(v268, null));
currentLocation = null;
}
currentPartSpec = getPartSpec(child);
validatePartitionValues(currentPartSpec);// validate reserved values
break;
case HiveASTParser.TOK_PARTITIONLOCATION :
// if location specified, set in partition
if (isView) {
throw
new ValidationException("LOCATION clause illegal for view partition");
}
currentLocation = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
break;
default
:
throw new ValidationException("Unknown child: " + child);
}
}
// add the last one
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation !=
null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
}
ObjectIdentifier tableIdentifier = (tab.getDbName() == null) ? parseObjectIdentifier(tab.getTableName()) : catalogRegistry.qualifyIdentifier(UnresolvedIdentifier.of(tab.getDbName(), tab.getTableName()));
return new AddPartitionsOperation(tableIdentifier, ifNotExists, specs, partitions);
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_convertDescribeTable_rdh | /**
* A query like this will generate a tree as follows "describe formatted default.maptable
* partition (b=100) id;" TOK_TABTYPE TOK_TABNAME --> root for tablename, 2 child nodes mean DB
* specified default maptable TOK_PARTSPEC --> root node for partition spec. else columnName
* TOK_PARTVAL b 100 id --> root node for columnName formatted
*/
private Operation convertDescribeTable(HiveParserASTNode ast) {
HiveParserASTNode tableTypeExpr = ((HiveParserASTNode) (ast.getChild(0)));
String dbName = null;
String tableName;
String colPath;
Map<String, String> partSpec;
HiveParserASTNode tableNode;
// process the first node to extract tablename
// tablename is either TABLENAME or DBNAME.TABLENAME if db is given
if (tableTypeExpr.getChild(0).getType() == HiveASTParser.TOK_TABNAME) {
tableNode = ((HiveParserASTNode) (tableTypeExpr.getChild(0)));
if (tableNode.getChildCount() == 1) {
tableName = tableNode.getChild(0).getText();
} else if (tableNode.getChildCount() == 2) {
dbName = tableNode.getChild(0).getText();
tableName = (dbName + ".") + tableNode.getChild(1).getText();
} else { // tablemname is CATALOGNAME.DBNAME.TABLENAME, which is not supported yet.
// todo: fix it in FLINK-29343
throw new ValidationException("Describe a table in specific catalog is not supported in HiveDialect," + " please switch to Flink default dialect.");
}
} else {
throw new ValidationException(tableTypeExpr.getChild(0).getText() + " is not an expected token type");
}
// process the second child,if exists, node to get partition spec(s)
partSpec = QualifiedNameUtil.getPartitionSpec(tableTypeExpr);
// process the third child node,if exists, to get partition spec(s)
colPath = QualifiedNameUtil.getColPath(tableTypeExpr, dbName, tableName, partSpec);
if (partSpec != null) {
handleUnsupportedOperation("DESCRIBE PARTITION is not supported");
}
if (!colPath.equals(tableName)) {
handleUnsupportedOperation("DESCRIBE COLUMNS is not supported");
}
boolean
isExt = false;
boolean
isFormatted = false;
if (ast.getChildCount() == 2) {int descOptions = ast.getChild(1).getType();
isExt = descOptions == HiveASTParser.KW_EXTENDED;
isFormatted = descOptions == HiveASTParser.KW_FORMATTED;if (descOptions == HiveASTParser.KW_PRETTY) {
handleUnsupportedOperation("DESCRIBE PRETTY is not supported.");
}
}
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tableName);
return new HiveExecutableOperation(new DescribeTableOperation(tableIdentifier, isExt || isFormatted));
} | 3.26 |
flink_HiveParserDDLSemanticAnalyzer_convertShowFunctions_rdh | /**
* Add the task according to the parsed command tree. This is used for the CLI command "SHOW
* FUNCTIONS;".
*
* @param ast
* The parsed command tree.
*/
private Operation convertShowFunctions(HiveParserASTNode ast) {
if (ast.getChildCount() == 2) {
assert ast.getChild(0).getType() == HiveASTParser.KW_LIKE;
throw new ValidationException("SHOW FUNCTIONS LIKE is not supported yet");
}
return new ShowFunctionsOperation();
} | 3.26 |
flink_ExternalPythonKeyedProcessOperator_setCurrentKey_rdh | /**
* As the beam state gRPC service will access the KeyedStateBackend in parallel with this
* operator, we must override this method to prevent changing the current key of the
* KeyedStateBackend while the beam service is handling requests.
*/
@Override
public void setCurrentKey(Object key) {
if (inBatchExecutionMode(getKeyedStateBackend())) {
super.setCurrentKey(key);}
keyForTimerService = key;
} | 3.26 |
flink_ExternalPythonKeyedProcessOperator_processTimer_rdh | /**
* It is responsible to send timer data to python worker when a registered timer is fired. The
* input data is a Row containing 4 fields: TimerFlag 0 for proc time, 1 for event time;
* Timestamp of the fired timer; Current watermark and the key of the timer.
*
* @param timeDomain
* The type of the timer.
* @param timer
* The internal timer.
* @throws Exception
* The runnerInputSerializer might throw exception.
*/
private void processTimer(TimeDomain timeDomain, InternalTimer<Row, Object> timer) throws Exception {
Object namespace
= timer.getNamespace();
byte[] encodedNamespace;
if (VoidNamespace.INSTANCE.equals(namespace)) {
encodedNamespace = null;
}
else {
namespaceSerializer.serialize(namespace, baosWrapper);
encodedNamespace = baos.toByteArray();
baos.reset();
}
Row timerData = timerHandler.buildTimerData(timeDomain, internalTimerService.currentWatermark(), timer.getTimestamp(), timer.getKey(),
encodedNamespace);
timerDataSerializer.serialize(timerData, baosWrapper);
pythonFunctionRunner.processTimer(baos.toByteArray());
baos.reset();elementCount++;
checkInvokeFinishBundleByCount();
emitResults();
} | 3.26 |
flink_AbstractCheckpointStats_getTaskStateStats_rdh | /**
* Returns the task state stats for the given job vertex ID or <code>null</code> if no task with
* such an ID is available.
*
* @param jobVertexId
* Job vertex ID of the task stats to look up.
* @return The task state stats instance for the given ID or <code>null</code>.
*/
public TaskStateStats
getTaskStateStats(JobVertexID jobVertexId) {
return taskStats.get(jobVertexId);
} | 3.26 |
flink_AbstractCheckpointStats_getAllTaskStateStats_rdh | /**
* Returns all task state stats instances.
*
* @return All task state stats instances.
*/public Collection<TaskStateStats> getAllTaskStateStats() {
return taskStats.values();
} | 3.26 |
flink_AbstractCheckpointStats_getLatestAckTimestamp_rdh | /**
* Returns the ack timestamp of the latest acknowledged subtask or <code>-1</code> if none was
* acknowledged yet.
*
* @return Ack timestamp of the latest acknowledged subtask or <code>-1</code>.
*/
public long getLatestAckTimestamp() {
SubtaskStateStats subtask = getLatestAcknowledgedSubtaskStats();
if (subtask != null) {
return subtask.getAckTimestamp();
} else {
return -1;
}
} | 3.26 |
flink_AbstractPagedInputView_getHeaderLength_rdh | /**
*
* @return header length.
*/
public int getHeaderLength() {
return headerLength;
} | 3.26 |
flink_AbstractPagedInputView_getCurrentSegment_rdh | // --------------------------------------------------------------------------------------------
// Page Management
// --------------------------------------------------------------------------------------------
/**
* Gets the memory segment that will be used to read the next bytes from. If the segment is
* exactly exhausted, meaning that the last byte read was the last byte available in the
* segment, then this segment will not serve the next bytes. The segment to serve the next bytes
* will be obtained through the {@link #nextSegment(MemorySegment)} method.
*
* @return The current memory segment.
*/
public MemorySegment getCurrentSegment() {
return this.currentSegment;
} | 3.26 |
flink_AbstractPagedInputView_read_rdh | // --------------------------------------------------------------------------------------------
// Data Input Specific methods
// --------------------------------------------------------------------------------------------
@Overridepublic int read(byte[] b) throws IOException {
return read(b, 0, b.length);
} | 3.26 |
flink_AbstractPagedInputView_clear_rdh | /**
* Clears the internal state of the view. After this call, all read attempts will fail, until
* the {@link #advance()} or {@link #seekInput(MemorySegment, int, int)} method have been
* invoked.
*/
protected void clear() {
this.currentSegment = null;
this.positionInSegment = this.headerLength;
this.limitInSegment = headerLength;
} | 3.26 |
flink_AbstractPagedInputView_getCurrentSegmentLimit_rdh | /**
* Gets the current limit in the memory segment. This value points to the byte one after the
* last valid byte in the memory segment.
*
* @return The current limit in the memory segment.
* @see #getCurrentPositionInSegment()
*/
public int getCurrentSegmentLimit() {
return this.limitInSegment;
}
/**
* The method by which concrete subclasses realize page crossing. This method is invoked when
* the current page is exhausted and a new page is required to continue the reading. If no
* further page is available, this method must throw an {@link EOFException}.
*
* @param current
* The current page that was read to its limit. May be {@code null}, if this
* method is invoked for the first time.
* @return The next page from which the reading should continue. May not be {@code null}. If the
input is exhausted, an {@link EOFException} | 3.26 |
flink_AbstractPagedInputView_seekInput_rdh | /**
* Sets the internal state of the view such that the next bytes will be read from the given
* memory segment, starting at the given position. The memory segment will provide bytes up to
* the given limit position.
*
* @param segment
* The segment to read the next bytes from.
* @param positionInSegment
* The position in the segment to start reading from.
* @param limitInSegment
* The limit in the segment. When reached, the view will attempt to switch
* to the next segment.
*/
protected void seekInput(MemorySegment segment, int positionInSegment, int limitInSegment)
{
this.currentSegment = segment;
this.positionInSegment = positionInSegment;
this.limitInSegment = limitInSegment;
} | 3.26 |
flink_AbstractPagedInputView_advance_rdh | /**
* Advances the view to the next memory segment. The reading will continue after the header of
* the next segment. This method uses {@link #nextSegment(MemorySegment)} and {@link #getLimitForSegment(MemorySegment)} to get the next segment and set its limit.
*
* @throws IOException
* Thrown, if the next segment could not be obtained.
* @see #nextSegment(MemorySegment)
* @see #getLimitForSegment(MemorySegment)
*/
public void advance() throws IOException {
doAdvance();
} | 3.26 |
flink_FactoryUtils_loadAndInvokeFactory_rdh | /**
* Loads all factories for the given class using the {@link ServiceLoader} and attempts to
* create an instance.
*
* @param factoryInterface
* factory interface
* @param factoryInvoker
* factory invoker
* @param defaultProvider
* default factory provider
* @param <R>
* resource type
* @param <F>
* factory type
* @throws RuntimeException
* if no or multiple resources could be instantiated
* @return created instance
*/
public static <R,
F> R loadAndInvokeFactory(final Class<F> factoryInterface, final FactoryInvoker<F, R> factoryInvoker, final Supplier<F> defaultProvider) {
final ServiceLoader<F> factories = ServiceLoader.load(factoryInterface);
final List<R> instantiatedResources = new ArrayList<>();
final List<Exception> errorsDuringInitialization
= new ArrayList<>();
for (F factory : factories) {
try {
R resource = factoryInvoker.invoke(factory);
instantiatedResources.add(resource);
f0.info("Instantiated {}.", resource.getClass().getSimpleName());
} catch (Exception
e) {
f0.debug("Factory {} could not instantiate instance.", factory.getClass().getSimpleName(), e);
errorsDuringInitialization.add(e);
}
}
if (instantiatedResources.size() == 1) {
return instantiatedResources.get(0);}if (instantiatedResources.isEmpty()) {
try {return factoryInvoker.invoke(defaultProvider.get());
} catch (Exception e)
{
final RuntimeException exception = new RuntimeException("Could not instantiate any instance.");
final RuntimeException defaultException = new RuntimeException("Could not instantiate default instance.", e);
exception.addSuppressed(defaultException);
errorsDuringInitialization.forEach(exception::addSuppressed);
throw exception;
}
}
throw new RuntimeException("Multiple instances were created: " + instantiatedResources);
} | 3.26 |
flink_QueryOperation_asSerializableString_rdh | /**
* Returns a string that fully serializes this instance. The serialized string can be used for
* storing the query in e.g. a {@link org.apache.flink.table.catalog.Catalog} as a view.
*
* @return detailed string for persisting in a catalog
* @see Operation#asSummaryString()
*/
default String asSerializableString() {
throw new UnsupportedOperationException("QueryOperations are not string serializable for now.");
} | 3.26 |
flink_ContinuousEnumerationSettings_toString_rdh | // ------------------------------------------------------------------------
@Override
public String
toString() {
return (("ContinuousEnumerationSettings{" + "discoveryInterval=") + discoveryInterval) + '}';
} | 3.26 |
flink_DataSource_getSplitDataProperties_rdh | /**
* Returns the {@link org.apache.flink.api.java.io.SplitDataProperties} for the {@link org.apache.flink.core.io.InputSplit}s of this DataSource for configurations.
*
* <p>SplitDataProperties can help to generate more efficient execution plans.
*
* <p><b> IMPORTANT: Incorrect configuration of SplitDataProperties can cause wrong results!
* </b>
*
* @return The SplitDataProperties for the InputSplits of this DataSource.
*/
@PublicEvolving
public SplitDataProperties<OUT> getSplitDataProperties() {if (this.splitDataProperties == null) {
this.splitDataProperties = new SplitDataProperties<OUT>(this);
}
return this.splitDataProperties;
} | 3.26 |
flink_DataSource_getInputFormat_rdh | /**
* Gets the input format that is executed by this data source.
*
* @return The input format that is executed by this data source.
*/
@Internal
public InputFormat<OUT, ?> getInputFormat() {
return this.inputFormat;} | 3.26 |
flink_DataSource_translateToDataFlow_rdh | // --------------------------------------------------------------------------------------------
protected GenericDataSourceBase<OUT, ?> translateToDataFlow() {
String v0 = (this.name != null) ? this.name : ((("at " +
dataSourceLocationName) + " (") + inputFormat.getClass().getName()) + ")";
if (v0.length() > 150) {
v0 = v0.substring(0, 150);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
GenericDataSourceBase<OUT, ?> source = new GenericDataSourceBase(this.inputFormat, new OperatorInformation<OUT>(getType()), v0);
source.setParallelism(parallelism);
if (this.parameters != null) {
source.getParameters().addAll(this.parameters);
}
if (this.splitDataProperties != null) {
source.setSplitDataProperties(this.splitDataProperties);
}
return source;
} | 3.26 |
flink_DataSource_getParameters_rdh | /**
*
* @return Configuration for the InputFormat.
*/public Configuration getParameters() {
return this.parameters;
} | 3.26 |
flink_DataSource_withParameters_rdh | /**
* Pass a configuration to the InputFormat.
*
* @param parameters
* Configuration parameters
*/
public DataSource<OUT> withParameters(Configuration parameters) {
this.parameters = parameters;
return this;
} | 3.26 |
flink_FieldAccessorFactory_getAccessor_rdh | /**
* Creates a {@link FieldAccessor} for the field that is given by a field expression, which can
* be used to get and set the specified field on instances of this type.
*
* @param field
* The field expression
* @param config
* Configuration object
* @param <F>
* The type of the field to access
* @return The created FieldAccessor
*/
@Internal
public static <T, F> FieldAccessor<T, F> getAccessor(TypeInformation<T> typeInfo, String field, ExecutionConfig config) {
// In case of arrays
if ((typeInfo instanceof BasicArrayTypeInfo) || (typeInfo instanceof PrimitiveArrayTypeInfo)) {
try {
return new FieldAccessor.ArrayFieldAccessor<>(Integer.parseInt(field), typeInfo);
} catch (NumberFormatException ex) {
throw new
CompositeType.InvalidFieldReferenceException("A field expression on an array must be an integer index (that might be given as a string).");
}
// In case of basic types
} else if (typeInfo instanceof
BasicTypeInfo) {
try {
int pos = (field.equals(ExpressionKeys.SELECT_ALL_CHAR)) ? 0 : Integer.parseInt(field);
return FieldAccessorFactory.getAccessor(typeInfo, pos, config);
} catch (NumberFormatException ex) {
throw new CompositeType.InvalidFieldReferenceException((((("You tried to select the field \"" + field) + "\" on a ") + typeInfo.toString()) + ". A field expression on a basic type can only be \"*\" or \"0\"") + " (both of which mean selecting the entire basic type).");
}
// In case of Pojos
} else if (typeInfo instanceof PojoTypeInfo) {
FieldExpression decomp = decomposeFieldExpression(field);
PojoTypeInfo<?> v6 = ((PojoTypeInfo) (typeInfo));
int fieldIndex = v6.getFieldIndex(decomp.head);
if (fieldIndex == (-1)) {
throw new CompositeType.InvalidFieldReferenceException(((("Unable to find field \"" + decomp.head) + "\" in type ") + typeInfo) + ".");
} else {
PojoField pojoField = v6.getPojoFieldAt(fieldIndex);
TypeInformation<?> fieldType = v6.getTypeAt(fieldIndex);
if (decomp.tail == null) {
@SuppressWarnings("unchecked")
FieldAccessor<F, F> innerAccessor
= new FieldAccessor.SimpleFieldAccessor<>(((TypeInformation<F>) (fieldType)));return new FieldAccessor.PojoFieldAccessor<>(pojoField.getField(), innerAccessor);
} else {
@SuppressWarnings("unchecked")
FieldAccessor<Object, F> innerAccessor = FieldAccessorFactory.getAccessor(((TypeInformation<Object>) (fieldType)), decomp.tail, config);
return new FieldAccessor.PojoFieldAccessor<>(pojoField.getField(), innerAccessor);
}
}
// In case of case classes
} else if (typeInfo.isTupleType() && ((TupleTypeInfoBase) (typeInfo)).isCaseClass()) {
TupleTypeInfoBase tupleTypeInfo = ((TupleTypeInfoBase) (typeInfo));
FieldExpression decomp =
decomposeFieldExpression(field);
int fieldPos = tupleTypeInfo.getFieldIndex(decomp.head);
if (fieldPos < 0) {
throw new CompositeType.InvalidFieldReferenceException("Invalid field selected: " + field);
}
if (decomp.tail == null) {
if (scalaProductFieldAccessorFactory
!= null) {
return scalaProductFieldAccessorFactory.createSimpleProductFieldAccessor(fieldPos, typeInfo, config);
} else {
throw new IllegalStateException("Scala products are used but Scala API is not on the classpath.");
}
} else {
@SuppressWarnings("unchecked")
FieldAccessor<Object, F> innerAccessor = getAccessor(tupleTypeInfo.getTypeAt(fieldPos), decomp.tail, config);
if (scalaProductFieldAccessorFactory != null) {
return
scalaProductFieldAccessorFactory.createRecursiveProductFieldAccessor(fieldPos, typeInfo, innerAccessor, config);
} else {
throw new IllegalStateException("Scala products are used but Scala API is not on the classpath.");
}
}
// In case of tuples
} else if (typeInfo.isTupleType()
&& (typeInfo instanceof TupleTypeInfo)) {
TupleTypeInfo tupleTypeInfo = ((TupleTypeInfo) (typeInfo));
FieldExpression decomp = decomposeFieldExpression(field);
int v18 = tupleTypeInfo.getFieldIndex(decomp.head);
if (v18 == (-1)) {
try {
v18 = Integer.parseInt(decomp.head);
} catch (NumberFormatException ex) {
throw new CompositeType.InvalidFieldReferenceException(((("Tried to select field \"" + decomp.head) + "\" on ") + typeInfo.toString()) + " . Only integer values are allowed here.");}
}
if (decomp.tail == null) {
@SuppressWarnings("unchecked")
FieldAccessor<T, F> result = new FieldAccessor.SimpleTupleFieldAccessor(v18, tupleTypeInfo);
return result;
} else {
@SuppressWarnings("unchecked")
FieldAccessor<?, F> innerAccessor = getAccessor(tupleTypeInfo.getTypeAt(v18), decomp.tail, config);
@SuppressWarnings("unchecked")
FieldAccessor<T, F> result = new FieldAccessor.RecursiveTupleFieldAccessor(v18, innerAccessor, tupleTypeInfo);
return result;
}
// Default statement
} else {
throw new CompositeType.InvalidFieldReferenceException((("Cannot reference field by field expression on " + typeInfo.toString()) + "Field expressions are only supported on POJO types, tuples, and case classes. ") + "(See the Flink documentation on what is considered a POJO.)");
}
} | 3.26 |
flink_AllWindowedStream_allowedLateness_rdh | /**
* Sets the time by which elements are allowed to be late. Elements that arrive behind the
* watermark by more than the specified time will be dropped. By default, the allowed lateness
* is {@code 0L}.
*
* <p>Setting an allowed lateness is only valid for event-time windows.
*/
@PublicEvolving
public AllWindowedStream<T, W> allowedLateness(Time lateness) {
final long millis = lateness.toMilliseconds();
checkArgument(millis >= 0, "The allowed lateness cannot be negative.");
this.allowedLateness = millis;
return this;
} | 3.26 |
flink_AllWindowedStream_m1_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window. The output of the window function is interpreted as a regular
* non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function
* The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> m1(ProcessAllWindowFunction<T, R, W> function, TypeInformation<R> resultType) {
String callLocation = Utils.getCallLocationName();
function = input.getExecutionEnvironment().clean(function);
return apply(new InternalIterableProcessAllWindowFunction<>(function), resultType, callLocation);
} | 3.26 |
flink_AllWindowedStream_trigger_rdh | /**
* Sets the {@code Trigger} that should be used to trigger window emission.
*/
@PublicEvolving
public AllWindowedStream<T, W> trigger(Trigger<? super T, ? super W> trigger) {
if ((windowAssigner instanceof MergingWindowAssigner) && (!trigger.canMerge())) {
throw new UnsupportedOperationException("A merging window assigner cannot be used with a trigger that does not support merging.");
}
this.trigger = trigger;
return this;
} | 3.26 |
flink_AllWindowedStream_m0_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction
* The reduce function that is used for incremental aggregation.
* @param function
* The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> m0(ReduceFunction<T> reduceFunction, ProcessAllWindowFunction<T, R, W> function) {
TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(function, input.getType());
return reduce(reduceFunction, function, resultType);
} | 3.26 |
flink_AllWindowedStream_apply_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction
* The reduce function that is used for incremental aggregation.
* @param function
* The window function.
* @param resultType
* Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
* @deprecated Use {@link #reduce(ReduceFunction, AllWindowFunction, TypeInformation)} instead.
*/
@Deprecated
public <R> SingleOutputStreamOperator<R> apply(ReduceFunction<T> reduceFunction, AllWindowFunction<T, R, W> function, TypeInformation<R> resultType) {if (reduceFunction instanceof RichFunction) {
throw new UnsupportedOperationException("ReduceFunction of apply can not be a RichFunction.");
}
// clean the closures
function = input.getExecutionEnvironment().clean(function);
reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
String callLocation = Utils.getCallLocationName();
String udfName = "AllWindowedStream." + callLocation;
String opName;
KeySelector<T, Byte> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({ "unchecked", "rawtypes" })
TypeSerializer<StreamRecord<T>> streamRecordSerializer = ((TypeSerializer<StreamRecord<T>>) (new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()))));
ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opName = ((((((((("TriggerWindow(" + windowAssigner) + ", ") + stateDesc) + ", ") + trigger) + ", ") + evictor) + ", ") + udfName) + ")";
operator
= new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableAllWindowFunction<>(new ReduceApplyAllWindowFunction<>(reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag);
} else {
ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>("window-contents", reduceFunction, input.getType().createSerializer(getExecutionEnvironment().getConfig()));
opName = ((((((("TriggerWindow(" + windowAssigner) + ", ") + stateDesc) + ", ") + trigger) + ", ") + udfName) + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc,
new InternalSingleValueAllWindowFunction<>(function), trigger,
allowedLateness, lateDataOutputTag);
}
return input.transform(opName, resultType, operator).forceNonParallel();
} | 3.26 |
flink_AllWindowedStream_getExecutionEnvironment_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
public StreamExecutionEnvironment getExecutionEnvironment() {
return input.getExecutionEnvironment();
} | 3.26 |
flink_AllWindowedStream_minBy_rdh | /**
* Applies an aggregation that gives the minimum element of the pojo data stream by the given
* field expression for every window. A field expression is either the name of a public field or
* a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot
* can be used to drill down into objects, as in {@code "field1.getInnerField2()"}.
*
* @param field
* The field expression based on which the aggregation will be applied.
* @param first
* If True then in case of field equality the first object will be returned
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> minBy(String field, boolean first) {
return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationType.MINBY, first, input.getExecutionConfig()));
} | 3.26 |
flink_AllWindowedStream_aggregate_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction
* The aggregation function that is used for incremental aggregation.
* @param windowFunction
* The process window function.
* @param accumulatorType
* Type information for the internal accumulator type of the aggregation
* function
* @param resultType
* Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
* @param <ACC>
* The type of the AggregateFunction's accumulator
* @param <V>
* The type of AggregateFunction's result, and the WindowFunction's input
* @param <R>
* The type of the elements in the resulting stream, equal to the WindowFunction's
* result type
*/
@PublicEvolvingpublic <ACC, V, R> SingleOutputStreamOperator<R> aggregate(AggregateFunction<T, ACC, V> aggregateFunction, ProcessAllWindowFunction<V, R, W> windowFunction, TypeInformation<ACC> accumulatorType, TypeInformation<V> aggregateResultType, TypeInformation<R> resultType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");checkNotNull(aggregateResultType, "aggregateResultType");
checkNotNull(resultType, "resultType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException("This aggregate function cannot be a RichFunction.");
}
// clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction
= input.getExecutionEnvironment().clean(aggregateFunction);final String callLocation = Utils.getCallLocationName();
final String udfName = "AllWindowedStream." + callLocation;final String opName = windowAssigner.getClass().getSimpleName();
final String opDescription;
final KeySelector<T, Byte> v43 = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({ "unchecked", "rawtypes" })
TypeSerializer<StreamRecord<T>> streamRecordSerializer = ((TypeSerializer<StreamRecord<T>>) (new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()))));ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opDescription = ((((((((("TriggerWindow(" + windowAssigner) + ", ") + stateDesc) + ", ") + trigger) + ", ") + evictor) + ", ") + udfName)
+ ")";
operator
= new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), v43, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalAggregateProcessAllWindowFunction<>(aggregateFunction, windowFunction), trigger, evictor, allowedLateness, lateDataOutputTag);} else {
AggregatingStateDescriptor<T, ACC, V> stateDesc = new AggregatingStateDescriptor<>("window-contents", aggregateFunction, accumulatorType.createSerializer(getExecutionEnvironment().getConfig()));
opDescription = ((((((("TriggerWindow(" + windowAssigner) + ", ") + stateDesc) + ", ") + trigger) + ", ") + udfName) + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), v43, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueProcessAllWindowFunction<>(windowFunction), trigger, allowedLateness, lateDataOutputTag);
}
return input.transform(opName, resultType, operator).setDescription(opDescription).forceNonParallel();
} | 3.26 |
flink_AllWindowedStream_sideOutputLateData_rdh | /**
* Send late arriving data to the side output identified by the given {@link OutputTag}. Data is
* considered late after the watermark has passed the end of the window plus the allowed
* lateness set using {@link #allowedLateness(Time)}.
*
* <p>You can get the stream of late data using {@link SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link SingleOutputStreamOperator} resulting from the windowed operation with the same {@link OutputTag}.
*/
@PublicEvolving
public AllWindowedStream<T, W> sideOutputLateData(OutputTag<T> outputTag) {Preconditions.checkNotNull(outputTag, "Side output tag must not be null.");
this.lateDataOutputTag = input.getExecutionEnvironment().clean(outputTag);
return this;
} | 3.26 |
flink_AllWindowedStream_process_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window. The output of the window function is interpreted as a regular
* non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function
* The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> process(ProcessAllWindowFunction<T, R, W> function) {
String callLocation = Utils.getCallLocationName();
function = input.getExecutionEnvironment().clean(function);
TypeInformation<R> resultType = getProcessAllWindowFunctionReturnType(function, getInputType());return apply(new InternalIterableProcessAllWindowFunction<>(function), resultType, callLocation);
} | 3.26 |
flink_AllWindowedStream_sum_rdh | /**
* Applies an aggregation that sums every window of the pojo data stream at the given field for
* every window.
*
* <p>A field expression is either the name of a public field or a getter method with
* parentheses of the stream's underlying type. A dot can be used to drill down into objects, as
* in {@code "field1.getInnerField2()"}.
*
* @param field
* The field to sum
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> sum(String field) {
return aggregate(new SumAggregator<>(field, input.getType(), input.getExecutionConfig()));
} | 3.26 |
flink_AllWindowedStream_maxBy_rdh | /**
* Applies an aggregation that gives the maximum element of the pojo data stream by the given
* field expression for every window. A field expression is either the name of a public field or
* a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be
* used to drill down into objects, as in {@code "field1.getInnerField2()"}.
*
* @param field
* The field expression based on which the aggregation will be applied.
* @param first
* If True then in case of field equality the first object will be returned
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> maxBy(String field, boolean first) {
return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationType.MAXBY, first, input.getExecutionConfig()));
} | 3.26 |
flink_AllWindowedStream_reduce_rdh | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given reducer.
*
* @param reduceFunction
* The reduce function that is used for incremental aggregation.
* @param function
* The process window function.
* @param resultType
* Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> reduce(ReduceFunction<T> reduceFunction, ProcessAllWindowFunction<T, R, W> function, TypeInformation<R> resultType) {
if (reduceFunction instanceof RichFunction) {
throw new UnsupportedOperationException("ReduceFunction of reduce can not be a RichFunction.");
}
// clean the closures
function = input.getExecutionEnvironment().clean(function);
reduceFunction = input.getExecutionEnvironment().clean(reduceFunction);
String callLocation = Utils.getCallLocationName();
String udfName = "AllWindowedStream." + callLocation;
String opName;
KeySelector<T, Byte> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null)
{
@SuppressWarnings({ "unchecked", "rawtypes" })
TypeSerializer<StreamRecord<T>> streamRecordSerializer = ((TypeSerializer<StreamRecord<T>>) (new StreamElementSerializer(input.getType().createSerializer(getExecutionEnvironment().getConfig()))));
ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opName = ((((((((("TriggerWindow(" + windowAssigner) + ", ") + stateDesc) + ", ") + trigger) + ", ") + evictor) + ", ") + udfName) + ")";
operator = new EvictingWindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableProcessAllWindowFunction<>(new ReduceApplyProcessAllWindowFunction<>(reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag);} else {
ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>("window-contents", reduceFunction, input.getType().createSerializer(getExecutionEnvironment().getConfig()));
opName = ((((((("TriggerWindow(" + windowAssigner) + ", ") + stateDesc) + ", ") + trigger) + ", ") + udfName) + ")";
operator = new WindowOperator<>(windowAssigner, windowAssigner.getWindowSerializer(getExecutionEnvironment().getConfig()), keySel, input.getKeyType().createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueProcessAllWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag);
}
return input.transform(opName, resultType, operator).forceNonParallel();
} | 3.26 |
flink_AllWindowedStream_min_rdh | /**
* Applies an aggregation that gives the minimum value of the pojo data stream at the given
* field expression for every window.
*
* <p>A field expression is either the name of a public field or a getter method with
* parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into
* objects, as in {@code "field1.getInnerField2()"}.
*
* @param field
* The field expression based on which the aggregation will be applied.
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> min(String field) {
return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationType.MIN, false, input.getExecutionConfig()));
} | 3.26 |
flink_AllWindowedStream_max_rdh | /**
* Applies an aggregation that gives the maximum value of the pojo data stream at the given
* field expression for every window. A field expression is either the name of a public field or
* a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot
* can be used to drill down into objects, as in {@code "field1.getInnerField2()"}.
*
* @param field
* The field expression based on which the aggregation will be applied.
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> max(String field) {
return aggregate(new ComparableAggregator<>(field, input.getType(), AggregationType.MAX, false, input.getExecutionConfig()));
} | 3.26 |
flink_AllWindowedStream_evictor_rdh | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* incremental aggregation of window results cannot be used.
*/
@PublicEvolving
public AllWindowedStream<T, W> evictor(Evictor<? super T, ? super W> evictor) {
this.evictor = evictor;
return this;
} | 3.26 |
flink_PartialCachingAsyncLookupProvider_of_rdh | /**
* Build a {@link PartialCachingAsyncLookupProvider} from the specified {@link AsyncLookupFunction} and {@link LookupCache}.
*/
static PartialCachingAsyncLookupProvider of(AsyncLookupFunction asyncLookupFunction, LookupCache cache) {
return new PartialCachingAsyncLookupProvider() {
@Override
public LookupCache getCache() {
return cache;
}
@Override
public AsyncLookupFunction createAsyncLookupFunction() {
return asyncLookupFunction;
}
};
} | 3.26 |
flink_TaskExecutorManager_getTotalRegisteredResources_rdh | // ---------------------------------------------------------------------------------------------
// slot / resource counts
// ---------------------------------------------------------------------------------------------
public ResourceProfile getTotalRegisteredResources() {
return taskManagerRegistrations.values().stream().map(TaskManagerRegistration::getTotalResource).reduce(ResourceProfile.ZERO, ResourceProfile::merge);
} | 3.26 |
flink_TaskExecutorManager_clearPendingTaskManagerSlots_rdh | /**
* clear all pending task manager slots.
*/
public void clearPendingTaskManagerSlots() {
if (!resourceAllocator.isSupported()) {
return;
}
if (!pendingSlots.isEmpty()) {
this.pendingSlots.clear();
declareNeededResourcesWithDelay();
}
} | 3.26 |
flink_TaskExecutorManager_occupySlot_rdh | // ---------------------------------------------------------------------------------------------
// TaskExecutor slot book-keeping
// ---------------------------------------------------------------------------------------------
public void occupySlot(InstanceID instanceId) {
taskManagerRegistrations.get(instanceId).occupySlot();
} | 3.26 |
flink_TaskExecutorManager_isTaskManagerRegistered_rdh | // ---------------------------------------------------------------------------------------------
// TaskExecutor (un)registration
// ---------------------------------------------------------------------------------------------
public boolean isTaskManagerRegistered(InstanceID instanceId) {
return taskManagerRegistrations.containsKey(instanceId);
} | 3.26 |
flink_TaskExecutorManager_checkTaskManagerTimeoutsAndRedundancy_rdh | // ---------------------------------------------------------------------------------------------
// TaskExecutor idleness / redundancy
// ---------------------------------------------------------------------------------------------
private void checkTaskManagerTimeoutsAndRedundancy() {
if (!taskManagerRegistrations.isEmpty()) {
long currentTime = System.currentTimeMillis();
ArrayList<TaskManagerRegistration> timedOutTaskManagers = new ArrayList<>(taskManagerRegistrations.size());
// first retrieve the timed out TaskManagers
for (TaskManagerRegistration taskManagerRegistration
: taskManagerRegistrations.values())
{
if ((currentTime - taskManagerRegistration.getIdleSince()) >= taskManagerTimeout.toMilliseconds()) {
// we collect the instance ids first in order to avoid concurrent modifications
// by the
// ResourceAllocator.releaseResource call
timedOutTaskManagers.add(taskManagerRegistration); }
}
int slotsDiff = (redundantTaskManagerNum * numSlotsPerWorker) - getNumberFreeSlots();
if (slotsDiff > 0) {
if (pendingSlots.isEmpty()) {
// Keep enough redundant taskManagers from time to time.
int requiredTaskManagers = MathUtils.divideRoundUp(slotsDiff, numSlotsPerWorker);
m1(requiredTaskManagers);
} else {
LOG.debug("There are some pending slots, skip allocate redundant task manager and wait them fulfilled.");
}
} else {
// second we trigger the release resource callback which can decide upon the
// resource release
int maxReleaseNum = (-slotsDiff) / numSlotsPerWorker;
releaseIdleTaskExecutors(timedOutTaskManagers, Math.min(maxReleaseNum,
timedOutTaskManagers.size()));
}
}
} | 3.26 |
flink_TaskExecutorManager_removePendingTaskManagerSlots_rdh | /**
* remove unused pending task manager slots.
*
* @param unusedResourceCounter
* the count of unused resources.
*/
public void removePendingTaskManagerSlots(ResourceCounter unusedResourceCounter) {
if
(!resourceAllocator.isSupported()) {
return;
}
Preconditions.checkState(unusedResourceCounter.getResources().size() == 1);
Preconditions.checkState(unusedResourceCounter.getResources().contains(defaultSlotResourceProfile));
int wantedPendingSlotsNumber = pendingSlots.size() - unusedResourceCounter.getResourceCount(defaultSlotResourceProfile);
pendingSlots.entrySet().removeIf(ignore -> pendingSlots.size() > wantedPendingSlotsNumber);
declareNeededResourcesWithDelay();
} | 3.26 |
flink_TaskExecutorManager_declareNeededResources_rdh | /**
* DO NOT call this method directly. Use {@link #declareNeededResourcesWithDelay()} instead.
*/
private void declareNeededResources() {
resourceAllocator.declareResourceNeeded(getResourceDeclaration());
} | 3.26 |
flink_TaskExecutorManager_allocateWorkers_rdh | /**
* Allocate a number of workers based on the input param.
*
* @param workerNum
* the number of workers to allocate
* @return the number of successfully allocated workers
*/
private int allocateWorkers(int workerNum) {
int allocatedWorkerNum = 0;
for (int i = 0; i < workerNum; ++i) {
if (allocateWorker(defaultSlotResourceProfile).isPresent()) {
++allocatedWorkerNum;
} else {
break;
}
}
return allocatedWorkerNum;
} | 3.26 |
flink_StreamWindowSQLExample_createTempFile_rdh | /**
* Creates a temporary file with the contents and returns the absolute path.
*/
private static String createTempFile(String contents) throws IOException {
File tempFile = File.createTempFile("orders", ".csv");
tempFile.deleteOnExit();
FileUtils.writeFileUtf8(tempFile, contents);
return tempFile.toURI().toString();
} | 3.26 |
flink_DoubleZeroConvergence_isConverged_rdh | /**
* Returns true, if the aggregator value is zero, false otherwise.
*
* @param iteration
* The number of the iteration superstep. Ignored in this case.
* @param value
* The aggregator value, which is compared to zero.
* @return True, if the aggregator value is zero, false otherwise.
*/
@Override
public boolean isConverged(int iteration, DoubleValue value) {return value.getValue() == 0;
} | 3.26 |
flink_SortMergeResultPartition_writeLargeRecord_rdh | /**
* Spills the large record into the target {@link PartitionedFile} as a separate data region.
*/
private void writeLargeRecord(ByteBuffer record, int targetSubpartition, DataType dataType, boolean isBroadcast) throws
IOException {
// a large record will be spilled to a separated data region
fileWriter.startNewRegion(isBroadcast);
List<BufferWithChannel> toWrite = new ArrayList<>();
Queue<MemorySegment> segments = new ArrayDeque<>(freeSegments);
while
(record.hasRemaining()) {
if (segments.isEmpty()) {
fileWriter.writeBuffers(toWrite);
toWrite.clear();
segments = new ArrayDeque<>(freeSegments);
}
int toCopy = Math.min(record.remaining(), networkBufferSize);
MemorySegment writeBuffer = checkNotNull(segments.poll());
writeBuffer.put(0, record, toCopy);
NetworkBuffer buffer = new NetworkBuffer(writeBuffer, buf -> {
}, dataType, toCopy);
BufferWithChannel bufferWithChannel = new BufferWithChannel(buffer, targetSubpartition);
updateStatistics(bufferWithChannel, isBroadcast);
toWrite.add(compressBufferIfPossible(bufferWithChannel));
}
fileWriter.writeBuffers(toWrite);
releaseFreeBuffers();
} | 3.26 |
flink_GroupReduceIterator_reduce_rdh | // -------------------------------------------------------------------------------------------
@Override
public final void reduce(Iterable<IN> values, Collector<OUT> out) throws Exception {
for (Iterator<OUT> iter = reduceGroup(values); iter.hasNext();) {
out.collect(iter.next());
}
} | 3.26 |
flink_TypeInferenceUtil_createInvalidInputException_rdh | /**
* Returns an exception for invalid input arguments.
*/
public static ValidationException createInvalidInputException(TypeInference typeInference, CallContext callContext, ValidationException cause) {
return new ValidationException(String.format("Invalid input arguments. Expected signatures are:\n%s", generateSignature(typeInference, callContext.getName(), callContext.getFunctionDefinition())), cause);
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.